1
0
mirror of https://github.com/rclone/rclone.git synced 2026-01-22 12:23:15 +00:00

Compare commits

...

701 Commits

Author SHA1 Message Date
Nick Craig-Wood
2769321555 crypt: add --crypt-pass-corrupted-blocks flag 2019-02-02 10:55:36 +00:00
Nick Craig-Wood
c496efe9a4 Add Wojciech Smigielski to contributors 2019-02-01 17:12:43 +00:00
Nick Craig-Wood
cf583e0237 Add Rémy Léone to contributors 2019-02-01 17:12:43 +00:00
Wojciech Smigielski
f09d0f5fef b2: added disable sha1sum flag 2019-02-01 17:12:24 +00:00
Rémy Léone
1e6cbaa355 s3: Add Scaleway to s3 documentation 2019-02-01 17:09:57 +00:00
Nick Craig-Wood
be643ecfbc sftp: don't error on dangling symlinks 2019-02-01 16:43:26 +00:00
Nick Craig-Wood
0c4ed35b9b build: improve beta tidy script 2019-02-01 16:40:55 +00:00
Nick Craig-Wood
4e4feebf0a drive: fix google docs in rclone mount in some circumstances #1732
Before this change any attempt to access a google doc in an rclone
mount would give the error "partial downloads are not supported while
exporting Google Documents" as the mount uses ranged requests to read
data.

This implements ranged requests for a limited number of scenarios,
just enough so that Google docs can be cat-ed from an rclone mount.
When they are cat-ed then they receive their correct size also.
2019-01-31 10:39:13 +00:00
Sebastian Bünger
291f270904 jottacloud: add support for 2-factor authentification fixes #2722 2019-01-30 08:13:46 +00:00
Nick Craig-Wood
f799be1d6a local: fix symlink tests under Windows 2019-01-29 15:40:49 +00:00
Nick Craig-Wood
74297a0c55 local: make sure we close file handle in local tests
...as Windows can't remove a directory with an open file handle in
2019-01-29 15:23:42 +00:00
Nick Craig-Wood
7e13103ba2 Add kayrus to contributors 2019-01-29 14:43:25 +00:00
kayrus
34baf05d9d Swift: introduce application credential auth support 2019-01-29 14:43:10 +00:00
kayrus
38c0018906 Bump github.com/ncw/swift to v1.0.44 2019-01-29 14:43:10 +00:00
Nick Craig-Wood
6f25e48cbb ftp: fix docs to note ftp_proxy isn't supported 2019-01-29 14:38:26 +00:00
Nick Craig-Wood
7e99abb5da Add Matt Robinson to contributors 2019-01-29 14:38:26 +00:00
Nick Craig-Wood
629019c3e4 Add yair@unicorn to contributors 2019-01-29 14:38:26 +00:00
Matt Robinson
1402fcb234 fix typo in rcd docs 2019-01-29 14:37:58 +00:00
Nick Craig-Wood
b26276b416 union: fix poll-interval not working - fixes #2837
Before this change the union remote was using whether the writable
union could poll for changes to decide whether the union mount could
poll for changes.

The fix causes the union backend to signal it can poll for changes if
**any** of the remotes can poll for changes.
2019-01-28 14:43:12 +00:00
Nick Craig-Wood
e317f04098 local: make using -l/--links with -L/--copy-links throw an error #1152 2019-01-28 13:47:27 +00:00
Nick Craig-Wood
65ff330602 local: add tests for -l feature #1152 2019-01-28 13:47:27 +00:00
Nick Craig-Wood
52763e1918 local: when using -l fix setting modification times of symlinks #1152
Before this change it was setting the modification times of the things
that the symlinks pointed to.

Note that this is only implemented for unix style OSes.  Other OSes
will not attempt to set the modification time of a symlink.
2019-01-28 13:47:27 +00:00
yair@unicorn
23e06cedbd local: Add support for '-l' (symbolic link translation) #1152 2019-01-28 13:47:27 +00:00
yair@unicorn
b369fcde28 local: add documentation for -l option #1152 2019-01-28 13:47:27 +00:00
Nick Craig-Wood
c294068780 webdav: support About - fixes #2937
This means that `rclone about` will work with Webdav backends and `df`
will be correct when using them in `rclone mount`.
2019-01-28 13:45:09 +00:00
Nick Craig-Wood
8a774a3dd4 webdav: support MD5 and SHA1 hashes with Owncloud and Nextcloud - fixes #2379 2019-01-28 13:45:09 +00:00
Nick Craig-Wood
53a8b5a275 vfs: Implement renaming of directories for backends without DirMove #2539
Previously to this change, backends without the optional interface
DirMove could not rename directories.

This change uses the new operations.DirMove call to implement renaming
directories which will fall back to Move/Copy as necessary.
2019-01-27 21:26:56 +00:00
Nick Craig-Wood
bbd03f49a4 operations: Implement DirMove for moving a directory #2539
This does the equivalent of sync.Move but is specialised for moving
files in one backend.
2019-01-27 21:26:56 +00:00
Nick Craig-Wood
e31578e03c s3: Auto detect region for buckets on operation failure - fixes #2915
If an incorrect region error is returned while using a bucket then the
region is updated, the session is remade and the operation is retried.
2019-01-27 21:22:49 +00:00
Nick Craig-Wood
0855608bc1 drive: add --drive-pacer-burst config to control bursting of the rate limiter 2019-01-27 21:19:20 +00:00
Nick Craig-Wood
f8dbf8292a pacer: control the minSleep with a rate limiter and allow burst
This will mean rclone tracks the minimum sleep values more precisely
when it isn't rate limiting.

Allowing burst is good for some backends (eg Google Drive).
2019-01-27 21:19:20 +00:00
Nick Craig-Wood
144daec800 drive: set default pacer to 100ms for 10 tps - fixes #2880 2019-01-27 21:19:20 +00:00
Nick Craig-Wood
6a832b7173 qingstor: default --qingstor-upload-concurrency to 1 to work around bug
If the upload concurrency is set > 1 then the hash becomes corrupted.
The upload is fine, and can be downloaded fine, however the hash is no
longer the md5sum of the object.  It is not known whether this is
rclone's fault or a bug at QingStor.
2019-01-27 21:09:11 +00:00
Nick Craig-Wood
184a9c8da6 mountlib: clip blocks returned to 32 bit number for Windows 32 bit - fixes #2934 2019-01-27 12:04:56 +00:00
Sebastian Bünger
88592a1779 jottacloud: Use token auth for all API requests
Don't store password anymore
2019-01-27 11:32:11 +00:00
Sebastian Bünger
92fa30a787 jottacloud: resume/deduplication fixups 2019-01-27 11:32:11 +00:00
Oliver Heyme
e4dfe78ef0 jottacloud: resume and deduplication support 2019-01-27 11:32:11 +00:00
Nick Craig-Wood
ba84eecd94 build: don't attempt to upload artifacts for pull requests on circleci 2019-01-25 17:27:02 +00:00
Cnly
ea12d76c03 onedrive: fix root ID not normalised #2930 2019-01-24 19:59:23 +08:00
Nick Craig-Wood
5f0a8a4e28 rest: fix upload of 0 length files
Before this change if ContentLength was set in the options but 0 then
we would upload using chunked encoding.  Fix this to always upload
with a "Content-Length" header even if the size is 0.

Remove workarounds for this from b2 and onedrive backends.

This fixes the issue for the webdav backend described here:

https://forum.rclone.org/t/code-500-errors-with-webdav-nextcloud/8440/
2019-01-24 11:38:00 +00:00
Nick Craig-Wood
2fc095cd3e azureblob: Stop Mkdir attempting to create existing containers
Before this change azureblob would attempt to create already existing
containers.  This causes problems with limited permissions keys.

This change checks the container exists before trying to create it in
the same way the s3 backend does.  This uses no more requests in the
usual case of the container existing.

See: https://forum.rclone.org/t/copying-individual-files-to-azure-blob-storage/8397
2019-01-23 09:58:46 +00:00
Nick Craig-Wood
a2341cc412 qingstor: add upload chunk size/concurrency/cutoff control #2851
* --upload-chunk-size
* --upload-concurrency
* --upload-cutoff
2019-01-18 15:20:20 +00:00
Nick Craig-Wood
9685be64cd qingstor: fix go routine leak on multipart upload errors - fixes #2851 2019-01-18 15:20:20 +00:00
Nick Craig-Wood
39f5059d48 s3: add --s3-bucket-acl to control bucket ACL - fixes #2918
Before this change buckets were created with the same ACL as objects.

After this change, the user can set just --s3-acl to set the ACL of
buckets and objects, or use --s3-bucket-acl as well to have a
different ACL used for bucket creation.

This also logs at INFO level the creation and deletion of buckets.
2019-01-18 15:12:11 +00:00
Nick Craig-Wood
a30e80564d config: when using auto confirm make user interaction configurable
* drive: don't run teamdrive config if auto confirm set
* onedrive: don't run extra config if auto confirm set
* make Confirm results customisable by config

Fixes #1010
2019-01-18 14:46:26 +00:00
Nick Craig-Wood
8e107b9657 build: update the build container to use latest go version for circleci 2019-01-18 13:26:27 +00:00
Nick Craig-Wood
21a0693b79 build: upload circleci builds for the beta release latest too 2019-01-17 15:18:03 +00:00
Nick Craig-Wood
4846d9393d ftp: wait for 60 seconds for connection Close then declare it dead
This helps with indefinite hangs when transferring very large files on
some ftp server.

Fixes #2912
2019-01-15 17:32:14 +00:00
Nick Craig-Wood
fc4f20d52f build: upload circleci builds for the beta release 2019-01-15 12:18:50 +00:00
Onno Zweers
60558b5d37 webdav: update docs about dcache and macaroons
Added link to dcache.org

Updated link to macaroon script to new location
2019-01-15 09:21:34 +00:00
Nick Craig-Wood
5990573ccd accounting: fix layout of stats - fixes #2910
This fixes several things wrong with the layout of the stats.

Transfers which haven't started are printed in the same format as
those which have so the stats with `--progress` don't show horrible
artifacts.

Checkers and transfers now get a ": checkers" and ": transfers" label
on the end of the stats line.  Transfers will have the transfer stats
when the transfer has started instead of this.

There was a bug in the routine which shortened the file names (it
always produces strings 1 too long).  This is now fixed with a test.

The formatting string was wrong with a fixed width of 45 - this is now
replaces with the value of `--stats-file-name-length`.

This also meant that there were unecessary leading spaces in the file
names.  So the default `--stats-file-name-length` was raised to 45
from 40.
2019-01-14 16:12:39 +00:00
Nick Craig-Wood
bd11d3cb62 vfs: Fix panic on rename with --dry-run set - fixes #2911 2019-01-14 12:07:25 +00:00
Nick Craig-Wood
5e5578d2c3 docs: rclone config file instead of rclone -h to find config file 2019-01-13 17:56:57 +00:00
Nick Craig-Wood
1318c6aec8 s3: Add Alibaba OSS to integration tests and fix storage classes 2019-01-12 20:41:47 +00:00
Nick Craig-Wood
f29757de3b test_all: make a way of ignoring integration test failures
Use this to ignore known failures
2019-01-12 20:18:05 +00:00
Nick Craig-Wood
f397c35935 fstest/test_all: add alternate s3 and swift providers to the integration tests 2019-01-12 18:33:31 +00:00
Nick Craig-Wood
f365230aea doc: Add more info on testing to CONTRIBUTING 2019-01-12 18:28:51 +00:00
Nick Craig-Wood
ff0b8e10af s3: Support Alibaba Cloud (Aliyun) OSS
The existing s3 backend passed all integration tests with OSS provided
`force_path_style = false`.

This makes sure that is so and adds documentation and configuration
for OSS.

Thanks to @luolibin for their work on the OSS backend which we ended
up not needing.

Fixes #1641
Fixes #1237
2019-01-12 17:28:04 +00:00
Nick Craig-Wood
8d16a5693c vendor: update github.com/goftp/server - fixes #2845 2019-01-12 17:09:11 +00:00
Nick Craig-Wood
781142a73f Add qip to contributors 2019-01-11 17:35:47 +00:00
qip
f471a7e3f5 fshttp: Add cookie support with cmdline switch --use-cookies
Cookies are handled by cookiejar in memory with fshttp module through
the entire session.

One useful scenario is, with HTTP storage system where index server
adds authentication cookie while redirecting to CDN for actual files.

Also, it can be helpful to reuse fshttp in other storage systems
requiring cookie.
2019-01-11 17:35:29 +00:00
Nick Craig-Wood
d7a1fd2a6b Add Dario Guzik to contributors 2019-01-11 14:14:12 +00:00
Dario Guzik
7782eda88e check: Add stats showing total files matched. 2019-01-11 14:13:48 +00:00
Nick Craig-Wood
d08453d402 local: fix renaming/deleting open files on Windows #2730
This uses the lib/file package to open files in such a way open files
can be renamed or deleted even under Windows.
2019-01-11 10:26:34 +00:00
Nick Craig-Wood
71e98ea584 vfs: fix renaming/deleting open files with cache mode "writes" under Windows
Before this change, renaming and deleting of open files (which can
easily happen due to the asynchronous nature of file systems) would
produce an error, for example saving files with Firefox.

After this change we open files with the flags necessary for open
files to be renamed or deleted.

Fixes #2730
2019-01-11 10:26:34 +00:00
Nick Craig-Wood
42d997f639 lib/file: reimplement os.OpenFile allowing rename/delete open files under Windows
Normally os.OpenFile under Windows does not allow renaming or deleting
open file handles.  This package provides equivelents for os.OpenFile,
os.Open and os.Create which do allow that.
2019-01-11 10:26:34 +00:00
Nick Craig-Wood
571b4c060b mount: check that mountpoint and local directory to mount don't overlap
If the mountpoint and the directory to mount overlap this causes a
lockup.

Fixes #2905
2019-01-10 14:18:00 +00:00
Nick Craig-Wood
ff72059a94 operations: warn if --checksum is set but there are no hashes available
Also caveat the help of --checksum

Fixes #2903
2019-01-10 11:07:10 +00:00
Nick Craig-Wood
2e6ef4f6ec test_all: fix run with -remotes that aren't in the config file 2019-01-10 10:59:32 +00:00
Nick Craig-Wood
0ec6dd9f4b Add nicolov to contributors 2019-01-09 19:29:26 +00:00
nicolov
0b7fdf16a2 serve: add dlna server 2019-01-09 19:14:14 +00:00
nicolov
5edfd31a6d vendor: add github.com/anacrolix/dms 2019-01-09 19:14:14 +00:00
Nick Craig-Wood
7ee7bc87ae vfs: fix tests after --dir-perms changes
This was introduced in 554ee0d963
2019-01-09 09:49:34 +00:00
Fabian Möller
1433558c01 sftp: perform environment variable expansion on key-file 2019-01-09 10:11:33 +01:00
Fabian Möller
0458b961c5 sftp: add option to force the usage of an ssh-agent
Also adds the possibility to specify a specific key to request from the
ssh-agent.
2019-01-09 10:11:33 +01:00
Fabian Möller
c1998c4efe sftp: add support for PEM encrypted private keys 2019-01-09 10:11:33 +01:00
Alex Chen
49da220b65 onedrive: fix broken support for "shared with me" folders - fixes #2536, #2778 (#2876) 2019-01-09 13:11:00 +08:00
Nick Craig-Wood
554ee0d963 vfs: add --dir-perms and --file-perms flags - fixes #2897
This allows files to be shown with the execute bit which allows
binaries to be run under Windows and Linux.
2019-01-08 17:29:38 +00:00
Denis Skovpen
2d2533a08a cmd/copyurl: fix checking of --dry-run 2019-01-08 11:28:05 +00:00
Nick Craig-Wood
733b072d4f azureblob: ignore directory markers in inital Fs creation too - fixes #2806
This is a follow-up to feea0532 including the initial Fs creation
where the backend detects whether the path is pointing to a file or a
directory.
2019-01-08 11:21:20 +00:00
Nick Craig-Wood
2d01a65e36 oauthutil: read a fresh token config file before using the refresh token.
This means that rclone will pick up tokens from concurrently running
rclones.  This helps for Box which only allows each refresh token to
be used once.

Without this fix, rclone caches the refresh token at the start of the
run, then when the token expires the refresh token may have been used
already by a concurrently running rclone.

This also will retry the oauth up to 5 times at 1 second intervals.

See: https://forum.rclone.org/t/box-token-refresh-timing/8175
2019-01-08 11:01:30 +00:00
Nick Craig-Wood
b8280521a5 drive: supply correct scopes to when using --drive-impersonate
This fixes using --drive-impersonate and appfolders.
2019-01-07 11:50:05 +00:00
Nick Craig-Wood
60e6af2605 Add andrea rota to contributors 2019-01-05 20:56:03 +00:00
andrea rota
9d16822c63 note on minimum version with support for b2 multi application keys
This trivial patch adds a note about the minimum version of rclone
needed in order to be able to use multiple application keys with the b2
backend.

As Debian stable (amongst other distros) is shipping an older version,
users running rclone < 1.43 and reading about this feature in the online
docs may struggle to realise why they are not able to sync to b2 when
configured to use an application key other than the master one.

For reference: https://github.com/ncw/rclone/issues/2513
2019-01-04 11:22:20 +00:00
Cnly
38a0946071 docs: update OneDrive limitations and versioning issue 2019-01-04 16:42:19 +08:00
Nick Craig-Wood
95e52e1ac3 cmd: improve error reporting for too many/few arguments - fixes #2860
Improve docs on the different kind of flag passing.
2018-12-29 17:40:21 +00:00
Nick Craig-Wood
51ab1c940a Add Sebastian Bünger - @buengese to the MAINTAINERS list
Also add areas of specific responsibility
2018-12-29 16:08:40 +00:00
Nick Craig-Wood
6f30427357 yandex: note --timeout increase needed for large files
See: https://forum.rclone.org/t/rclone-stucks-at-the-end-of-a-big-file-upload/8102
2018-12-29 16:08:40 +00:00
Cnly
3220acc729 fstests: fix TestFsName fails when using remote:with/path 2018-12-29 09:34:04 +00:00
Nick Craig-Wood
3c97933416 oauthutil: suppress ERROR message when doing remote config
Before this change doing a remote config using rclone authorize gave
this error.  The token is saved a bit later anyway so the error is
needlessly confusing.

    ERROR : Failed to save new token in config file: section 'remote' not found.

This commit suppresses that error.

https://forum.rclone.org/t/onedrive-for-business-failed-to-save-token/8061
2018-12-28 09:53:53 +00:00
Nick Craig-Wood
039e2a9649 vendor: pull in github.com/ncw/swift latest to fix reauth on big files 2018-12-28 09:23:57 +00:00
Nick Craig-Wood
1c01d0b84a vendor: update dropbox SDK to fix failing integration tests #2829 2018-12-26 15:17:03 +00:00
Nick Craig-Wood
39eac7a765 Add Jay to contributors 2018-12-26 15:08:09 +00:00
Jay
082a7065b1 Use vfsgen for static HTML templates 2018-12-26 15:07:21 +00:00
Jay
f7b08a6982 vendor: add github.com/shurcooL/vfsgen 2018-12-26 15:07:21 +00:00
Nick Craig-Wood
37e32d8c80 Add Arkadius Stefanski to contributors 2018-12-26 15:03:27 +00:00
Arkadius Stefanski
f2a1b991de readme: fix copying link 2018-12-26 15:03:08 +00:00
Nick Craig-Wood
4128e696d6 Add François Leurent to contributors
New email for Animosity022
2018-12-26 15:00:16 +00:00
François Leurent
7e7f3de355 qingcloud: fix typos in trace messages 2018-12-26 14:51:48 +00:00
Nick Craig-Wood
1f6a1cd26d vfs: add test_vfs code for hunting for deadlocks 2018-12-26 09:08:27 +00:00
Nick Craig-Wood
2cfe2354df vfs: fix deadlock between RWFileHandle.close and File.Remove - fixes #2857
Before this change we took the locks file.mu and file.muRW in an
inconsistent order - after the change we always take them in the same
order to fix the deadlock.
2018-12-26 09:08:27 +00:00
Nick Craig-Wood
13387c0838 vfs: fix deadlock on concurrent operations on a directory - fixes #2811
Before this fix there were two paths where concurrent use of a
directory could take the file lock then directory lock and the other
would take the locks in the reverse order.

Fix this by narrowing the locking windows so the file lock and
directory lock don't overlap.
2018-12-26 09:08:27 +00:00
Animosity022
5babf2dc5c Update drive.md
Fixed the Google Drive API documentation
2018-12-22 18:37:00 +00:00
Nick Craig-Wood
9012d7c6c1 cmd: fix --progress crash under Windows Jenkins - fixes #2846 2018-12-22 18:05:13 +00:00
Nick Craig-Wood
df1faa9a8f webdav: fail soft on time parsing errors
The time format provided by webdav servers seems to vary wildly from
that specified in the RFC - rclone already parses times in 5 different
formats!

If an unparseable time is found, then fail softly logging an ERROR
(just once) but returning the epoch.

This will mean that webdav servers with bad time formats will still be
usable by rclone.
2018-12-20 12:10:15 +00:00
Nick Craig-Wood
3de7ad5223 b2: for a bucket limited application key check the bucket name
Before this fix rclone would just use the authorised bucket regardless
of what bucket you put on the command line.

This uses the new `bucketName` response in the API and checks that the
user is using the correct bucket name to avoid accidents.

Fixes #2839
2018-12-20 12:07:35 +00:00
Garry McNulty
9cb3a68c38 crypt: check for maximum length before decrypting filename
The EME Transform() method will panic if the input data is larger than
2048 bytes.

Fixes #2826
2018-12-19 11:51:44 +00:00
Nick Craig-Wood
c1dd76788d httplib: make http serving with auth generate INFO messages on auth fail
2018/12/13 12:13:44 INFO  : /: 127.0.0.1:39696: Basic auth challenge sent
2018/12/13 12:13:54 INFO  : /: 127.0.0.1:40050: Unauthorized request from ncw

Fixes #2834
2018-12-14 13:38:49 +00:00
Nick Craig-Wood
5ee1816a71 filter: parallelise reading of --files-from - fixes #2835
Before this change rclone would read the list of files from the
files-from parameter and check they existed one at a time.  This could
take a very long time for lots of files.

After this change, rclone will check up to --checkers in parallel.
2018-12-13 13:22:30 +00:00
Nick Craig-Wood
63b51c6742 vendor: add golang.org/x/sync as a dependency 2018-12-13 10:45:52 +00:00
Nick Craig-Wood
e7684b7ed5 Add William Cocker to contributors 2018-12-06 21:53:53 +00:00
William Cocker
dda23baf42 s3 : update doc for Glacier storage class
s3 : update doc for Glacier storage class : related to #923
2018-12-06 21:53:38 +00:00
William Cocker
8575abf599 s3: add GLACIER storage class
Fixes #923
2018-12-06 21:53:05 +00:00
Nick Craig-Wood
feea0532cd azureblob: ignore directory markers - fixes #2806
This ignores 0 length blobs if
- they end with /
- they have the metadata hdi_isfolder = true
2018-12-06 21:47:03 +00:00
Nick Craig-Wood
d3e8ae1820 Add Mark Otway to contributors 2018-12-06 15:13:03 +00:00
Nick Craig-Wood
91a9a959a2 Add Mathieu Carbou to contributors 2018-12-06 15:12:58 +00:00
Mark Otway
04eae51d11 Fix install for Synology
7z check doesn't work due to misplaced comma, so installation fails on Synology.
2018-12-06 15:12:21 +00:00
Mathieu Carbou
8fb707e16d Fixes #1788: Retry-After support for Dropbox backend 2018-12-05 22:03:30 +00:00
Mathieu Carbou
4138d5aa75 Issue #1788: Pointing to Dropbox's v5.0.0 tag 2018-12-05 22:03:30 +00:00
Nick Craig-Wood
fc654a4cec http: fix backend with --files-from and non-existent files
Before this fix the http backend was returning the wrong error code
when files were not found.  This was causing --files-from to error on
missing files instead of skipping them like it should.
2018-12-04 17:40:44 +00:00
Nick Craig-Wood
26b5f55cba Update after goimports change 2018-12-04 10:11:57 +00:00
Nick Craig-Wood
3f572e6bf2 webdav: fix infinite loop on failed directory creation - fixes #2714 2018-12-02 21:03:12 +00:00
Nick Craig-Wood
941ad6bc62 azureblob: use the s3 pacer for 0 delay - fixes #2799 2018-12-02 20:55:16 +00:00
Nick Craig-Wood
5d1d93e163 azureblob: use the rclone HTTP client - fixes #2654
This enables --dump headers and --timeout to work properly.
2018-12-02 20:55:16 +00:00
Nick Craig-Wood
35fba5bfdd Add Garry McNulty to contributors 2018-12-02 20:52:04 +00:00
Garry McNulty
887834da91 b2: cleanup unfinished large files
The `cleanup` command will delete unfinished large file uploads that
were started more than a day ago (to avoid deleting uploads that are
potentially still in progress).

Fixes #2617
2018-12-02 20:51:13 +00:00
Nick Craig-Wood
107293c80e copy,move: restore --no-traverse flag
The --no-traverse flag was not implemented when the new sync routines
(using the march package) was implemented.

This re-implements --no-traverse in march by trying to find a match
for each object with NewObject rather than from a directory listing.
2018-12-02 20:28:13 +00:00
Nick Craig-Wood
e3c4ebd59a march: factor calling parameters into a structure 2018-12-02 18:07:26 +00:00
Nick Craig-Wood
d99ffde7c0 s3: change --s3-upload-concurrency default to 4 to increase perfomance #2772
Increasing the --s3-upload-concurrency to 4 (from 2) gives an
additional 45% throughput at the cost of 10MB extra memory per transfer.

After testing the upload perfoc
2018-12-02 17:58:34 +00:00
Nick Craig-Wood
198c34ce21 s3: implement --s3-upload-cutoff for single part uploads below this - fixes #2772
Before this change rclone would use multipart uploads for any size of
file.  However multipart uploads are less efficient for smaller files
and don't have MD5 checksums so it is advantageous to use single part
uploads if possible.

This implements single part uploads for all files smaller than the
upload_cutoff size.  Streamed files must be uploaded as multipart
files though.
2018-12-02 17:58:34 +00:00
Nick Craig-Wood
0eba88bbfe sftp: check directory is empty before issuing rmdir
Some SFTP servers allow rmdir on full directories which is allowed
according to the RFC so make sure we don't accidentally delete data
here.

See: https://forum.rclone.org/t/rmdir-and-delete-empty-src-dirs-file-does-not-exist/7737
2018-12-02 11:16:30 +00:00
Nick Craig-Wood
aeea4430d5 swift: efficiency: slim Object and reduce requests on upload
- Slim down Object to only include necessary data
- Don't HEAD an object after PUT - read the hash from the response
2018-12-02 10:23:55 +00:00
Nick Craig-Wood
4b15c4215c sftp: fix rmdir on Windows based servers (eg CrushFTP)
Before this change we used Remove to remove directories.  This works
fine on Unix based systems but not so well on Windows based ones.
Swap to using RemoveDirectory instead.
2018-11-29 21:34:37 +00:00
Nick Craig-Wood
50452207d9 swift: add --swift-no-chunk to disable segmented uploads in rcat/mount
Fixes #2776
2018-11-29 11:11:30 +00:00
Nick Craig-Wood
01fcad9b9c rc: fix docs for sync/{sync,copy,move} and operations/{copy,move}file 2018-11-29 11:11:30 +00:00
themylogin
eb41253764 azureblob: allow building azureblob backend on *BSD
FreeBSD support was added in Azure/azure-storage-blob-go@0562badec5
OpenBSD and NetBSD support was added in Azure/azure-storage-blob-go@1d6dd77d74
2018-11-27 12:20:48 +00:00
Nick Craig-Wood
89625e54cf vendor: update dependencies to latest 2018-11-26 14:10:33 +00:00
Nick Craig-Wood
58f7141c96 drive, googlecloudstorage: disallow on go1.8 due to dependent library changes
golang.org/x/oauth2/google no longer builds on go1.8
2018-11-26 14:10:33 +00:00
Nick Craig-Wood
e56c6402a7 serve restic: disallow on go1.8 because of dependent library changes
golang.org/x/net/http2 no longer builds on go1.8
2018-11-26 14:10:33 +00:00
Nick Craig-Wood
d0eb8ddc30 serve webdav: disallow on go1.8 due to dependent library changes
golang.org/x/net/webdav no longer builds with go1.8
2018-11-26 14:10:33 +00:00
Nick Craig-Wood
a6c28a5faa Start v1.45-DEV development 2018-11-24 15:20:24 +00:00
Nick Craig-Wood
d35bd15762 Version v1.45 2018-11-24 13:44:25 +00:00
Nick Craig-Wood
8b8220c4f7 azureblob: wait for up to 60s to create a just deleted container
When a container is deleted, a container with the same name cannot be
created for at least 30 seconds; the container may not be available
for more than 30 seconds if the service is still processing the
request.

We sleep so that we wait at most 60 seconds.  This is mostly useful in
the integration tests where containers get deleted and remade
immediately.
2018-11-24 10:57:37 +00:00
Nick Craig-Wood
5fe3b0ad71 Add Stephen Harris to contributors 2018-11-24 10:57:37 +00:00
Stephen Harris
4c8c87a935 Update PROXY section of the FAQ 2018-11-23 20:14:36 +00:00
Nick Craig-Wood
bb10a51b39 test_all: limit to go1.11 so the template used is supported 2018-11-23 17:17:19 +00:00
Nick Craig-Wood
df01f7a4eb test_all: fix regexp for retrying nested tests 2018-11-23 17:17:19 +00:00
Nick Craig-Wood
e84790ef79 swift: add pacer for retries to make swift more reliable #2740 2018-11-22 22:15:52 +00:00
Nick Craig-Wood
369a8ee17b ncdu: fix deleting files 2018-11-22 21:41:17 +00:00
Nick Craig-Wood
84e21ade6b cmount: fix on Linux - only apply volname for Windows and macOS 2018-11-22 20:41:05 +00:00
Sebastian Bünger
703b0535a4 yandex: update docs 2018-11-22 20:14:50 +00:00
Sebastian Bünger
155264ae12 yandex: complete rewrite
Get rid of the api client and use rest/pacer for all API calls
Add Copy, Move, DirMove, PublicLink, About optional interfaces
Improve general error handling
Remove ListR for now due to inconsitent behaviour
fixes #2586, progress on #2740 and #2178
2018-11-22 20:14:50 +00:00
Nick Craig-Wood
31e2ce03c3 fstests: re-arrange backend integration tests so they can be retried
Before this change backend integration tests depended on each other,
so tests could not be retried.

After this change we nest tests to ensure that tests are provided with
the starting state they expect.

Tell the integration test runner that it can retry backend tests also.

This also includes bin/test_independence.go which runs each test
individually for a backend to prove that they are independent.
2018-11-22 20:12:12 +00:00
Nick Craig-Wood
e969505ae4 info: fix control character map output 2018-11-20 14:04:27 +00:00
Nick Craig-Wood
26e2f1a998 Add Alexander to contributors 2018-11-20 10:22:11 +00:00
Alexander
2682d5a9cf - install with busybox if any 2018-11-20 10:22:00 +00:00
Nick Craig-Wood
2191592e80 Add Henry Ptasinski to contributors 2018-11-19 13:33:59 +00:00
Nick Craig-Wood
18f758294e Add Peter Kaminski to contributors 2018-11-19 13:33:59 +00:00
Henry Ptasinski
f95c1c61dd s3: add config info for Wasabi's US-West endpoint
Wasabi has two location, US East and US West, with different endpoint URLs.
When configuring S3 to use Wasabi, provide the endpoint information for both
locations.
2018-11-19 13:33:42 +00:00
Nick Craig-Wood
8c8dcdd521 webdav: fix config parsing so --webdav-user and --webdav-pass flags work 2018-11-17 13:14:54 +00:00
Nick Craig-Wood
141c133818 fstest: Wait for longer if neccessary in TestFsChangeNotify 2018-11-16 07:45:24 +00:00
Nick Craig-Wood
0f03e55cd1 fstests: ignore main directory creation in TestFsChangeNotify 2018-11-15 18:39:28 +00:00
Nick Craig-Wood
9e6ba92a11 fstests: attempt to fix TestFsChangeNotify flakiness
This now uses testPut to upload the test files which will retry on
errors properly.
2018-11-15 18:39:28 +00:00
Nick Craig-Wood
762561f88e fstest: factor out retry logic from put code and make testPut return the object too 2018-11-15 18:39:28 +00:00
Nick Craig-Wood
084fe38922 fstests: fixes the integration test errors running crypt over swift.
Skip tests involving errors creating or removing dirs on non root
bucket based fs
2018-11-15 18:39:28 +00:00
Peter Kaminski
63a2a935fc fix typos in original files, per #2727 review request 2018-11-14 22:48:58 +00:00
Peter Kaminski
64fce8438b docs: Fix a couple of minor typos in rclone_mount.md
* "transferring" instead of "transfering"
* "connection" instead of "connnection"
* "mount" instead of "mount mount"
2018-11-14 22:48:58 +00:00
Nick Craig-Wood
f92beb4e14 fstest: Fix TestPurge causing errors with subsequent tests on azure
Before this change TestPurge would remove a container and subsequent
tests would fail because the container was still being deleted so
couldn't be created.

This was fixed by introducing an fstest.NewRunIndividual() test runner
for TestPurge which causes the test to be run on a new container.
2018-11-14 17:14:02 +00:00
Nick Craig-Wood
f7ce2e8d95 azureblob: fix erroneous Rmdir error "directory not empty"
Before this change Rmdir would check the root rather than the
directory specified for being empty and return "directory not empty"
when it shouldn't have done.
2018-11-14 17:13:39 +00:00
Nick Craig-Wood
3975d82b3b Add brused27 to contributors 2018-11-13 17:00:26 +00:00
brused27
d87aa33ec5 azureblob: Avoid context deadline exceeded error by setting a large TryTimeout value - Fixes #2647 2018-11-13 16:59:53 +00:00
Anagh Kumar Baranwal
1b78f4d1ea Changed the docs scripts to use $HOME & $USER instead of specific values
Signed-off-by: Anagh Kumar Baranwal <anaghk.dos@gmail.com>
2018-11-13 11:00:34 +00:00
Nick Craig-Wood
b3704597f3 cmount: make --volname work for Windows - fixes #2679 2018-11-12 16:32:02 +00:00
Nick Craig-Wood
16f797a7d7 filter: add --ignore-case flag - fixes #502
The --ignore-case flag causes the filtering of file names to be case
insensitive.  The flag name comes from GNU tar.
2018-11-12 14:29:37 +00:00
Nick Craig-Wood
ee700ec01a lib/readers: add mutex to RepeatableReader - fixes #2572 2018-11-12 12:02:05 +00:00
Nick Craig-Wood
9b3c951ab7 Add Jake Coggiano to contributors 2018-11-12 11:34:28 +00:00
Jake Coggiano
22d17e79e3 dropbox: add dropbox impersonate support - fixes #2577 2018-11-12 11:33:39 +00:00
Jake Coggiano
6d3088a00b vendor: add github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/team/ 2018-11-12 11:33:39 +00:00
Nick Craig-Wood
84202c7471 onedrive: note 50,000 files is limit for one directory #2707 2018-11-11 15:22:19 +00:00
Nick Craig-Wood
96a05516f9 acd,box,onedrive,pcloud: remote log.Fatal from NewFs
And replace with error returns.
2018-11-11 11:00:14 +00:00
Nick Craig-Wood
4f6a942595 cmd: Make --progress update the stats right at the end
Before this when rclone exited the stats would just show the last
printed version, rather than the actual final state.
2018-11-11 09:57:37 +00:00
Nick Craig-Wood
c4b0a37b21 rc: improve docs on debugging 2018-11-10 10:18:13 +00:00
Nick Craig-Wood
9322f4baef Add Erik Swanson to contributors 2018-11-08 12:58:41 +00:00
Erik Swanson
fa0a1e7261 s3: fix role_arn, credential_source, ...
When the env_auth option is enabled, the AWS SDK's session constructor
now loads configuration from ~/.aws/config and environment variables,
and credentials per the selected (or default) AWS_PROFILE's settings.

This is accomplished by **NOT** including any Credential provider in the
aws.Config passed to the session constructor: If the Config.Credentials
is non-nil, that will always be used and the user's configuration re
role_arn, credential_source, source_profile, etc... from the shared
config will be completely ignored.

(The conditional creation and configuration of the stscreds Credential
provider is complicated enough that it is not worth re-creating that
logic.)
2018-11-08 12:58:23 +00:00
Nick Craig-Wood
4ad08794c9 fserrors: add "server closed idle connection" to retriable errors
This seems to be related to this go issue: https://github.com/golang/go/issues/19943

See: https://forum.rclone.org/t/copy-from-dropbox-to-google-drive-yields-failed-to-copy-failed-to-open-source-object-server-closed-idle-connection-error/7460
2018-11-08 11:12:25 +00:00
Nick Craig-Wood
c0f600764b Add Scott Edlund to contributors 2018-11-07 14:27:06 +00:00
Scott Edlund
f139e07380 enable softfloat on MIPS arch
MIPS does not have a floating point unit.  Enable softfloat to build binaries run on devices that do not have MIPS_FPU enabled in their kernel.
2018-11-07 14:26:48 +00:00
Nick Craig-Wood
c6786eeb2d move: don't create directories with --dry-run - fixes #2676 2018-11-06 13:34:15 +00:00
Nick Craig-Wood
57b85b8155 rc: fix job tests on Windows 2018-11-06 13:03:48 +00:00
Nick Craig-Wood
2b1194c57e rc: update docs with new methods 2018-11-05 15:44:40 +00:00
Nick Craig-Wood
e6dd121f52 config: add rc operations for config 2018-11-05 15:44:40 +00:00
Nick Craig-Wood
e600217666 config: create config directory on save if it is missing 2018-11-05 15:44:40 +00:00
Nick Craig-Wood
bc17ca7ed9 rc: implement core/obscure 2018-11-05 15:44:40 +00:00
Nick Craig-Wood
1916410316 rc: add core/version and put definitions next to implementations 2018-11-05 15:44:40 +00:00
Nick Craig-Wood
dddfbec92a cmd/version: factor version number parsing routines into fs/version 2018-11-05 15:44:40 +00:00
Nick Craig-Wood
75a88de55c rc/rcserver: with --rc-files if auth set, pass on to URL opened
If `--rc-user` or `--rc-pass` is set then the URL that is opened with
`--rc-files` will have the authorization in the URL in the
`http://user:pass@localhost/` style.
2018-11-05 15:44:40 +00:00
Nick Craig-Wood
2466f4d152 sync: add rc commands for sync/copy/move 2018-11-05 15:44:40 +00:00
Nick Craig-Wood
39283c8a35 operations: implement operations remote control commands 2018-11-05 15:44:40 +00:00
Nick Craig-Wood
46c2f55545 copyurl: factor code into operations and write test 2018-11-04 20:42:57 +00:00
Nick Craig-Wood
fc2afcbcbd lsjson: factor internals of lsjson command into operations 2018-11-04 20:42:57 +00:00
Nick Craig-Wood
fa0a9653d2 rc: methods marked as AuthRequired need auth unless --rc-no-auth
Methods which can read or mutate external storage will require
authorisation - enforce this.  This can be overidden by `--rc-no-auth`.
2018-11-04 20:42:57 +00:00
Nick Craig-Wood
181267e20e cmd/rc: add --user and --pass flags and interpret --rc-user, --rc-pass, --rc-addr 2018-11-04 20:42:57 +00:00
Nick Craig-Wood
75e8ea383c rc: implement rc.PutCachedFs for prefilling the remote cache 2018-11-04 20:42:57 +00:00
Nick Craig-Wood
8c8b58a7de rc: expire remote cache and fix tests under race detector 2018-11-04 20:42:57 +00:00
Nick Craig-Wood
b961e07c57 rc: ensure rclone fails to start up if the --rc port is in use already 2018-11-04 15:11:51 +00:00
Nick Craig-Wood
0b80d1481a cache: make tests not start an rc but use the internal framework 2018-11-04 15:11:51 +00:00
Nick Craig-Wood
89550e7121 rcserver: serve directories as well as files 2018-11-04 15:11:51 +00:00
Nick Craig-Wood
370c218c63 cmd/http: factor directory serving routines into httplib/serve and write tests 2018-11-04 12:46:44 +00:00
Nick Craig-Wood
b972dcb0ae rc: implement options/blocks,get,set and register options 2018-11-03 11:32:00 +00:00
Nick Craig-Wood
0bfa9811f7 rc: factor server code into rcserver and implement serving objects
If a GET or HEAD request is receivied with a URL parameter of fs then
it will be served from that remote.
2018-11-03 11:32:00 +00:00
Nick Craig-Wood
aa9b2c31f4 serve/restic: factor object serving into cmd/httplib/serve 2018-11-03 11:32:00 +00:00
Nick Craig-Wood
cff75db6a4 rcd: implement new command just to serve the remote control API 2018-11-03 11:32:00 +00:00
Nick Craig-Wood
75252e4a89 rc: add --rc-files flag to serve files on the rc http server
This enables building a browser based UI for rclone
2018-11-03 11:32:00 +00:00
Nick Craig-Wood
2089405e1b fs/rc: add more infrastructure to help writing rc functions
- Fs cache for rc commands
- Helper functions for parsing the input
- Reshape command for manipulating JSON blobs
- Background Job starting, control, query and expiry
2018-11-02 17:32:20 +00:00
Nick Craig-Wood
a379eec9d9 fstest/mockfs: create mock fs.Fs for testing 2018-11-02 17:32:20 +00:00
Nick Craig-Wood
45d5339fcb cmd/rc: add --json flag for structured JSON input 2018-11-02 17:32:20 +00:00
Nick Craig-Wood
bb5637d46a serve http, webdav, restic: ensure rclone exits if the port is in use 2018-11-02 17:32:20 +00:00
Nick Craig-Wood
1f05d5bf4a delete: clarify that it only deletes files not directories 2018-11-02 17:07:45 +00:00
HerrH
ff87da9c3b Added some more links for easier finding
Expanded the Installation & Docs section with links to the website and added a link to the full list of storage providers and features.
2018-11-02 16:56:20 +00:00
ssaqua
3d81b75f44 dedupe: check for existing filename before renaming a dupe file 2018-11-02 16:51:52 +00:00
Nick Craig-Wood
baba6d67e6 s3: set ACL for server side copies to that provided by the user - fixes #2691
Before this change the ACL for objects which were server side copied
was left at the default "private" settings. S3 doesn't copy the ACL
from the source when you copy an object, you have to set it afresh
which is what this does.
2018-11-02 16:22:31 +00:00
Nick Craig-Wood
04c0564fe2 Add Ralf Hemberger to contributors 2018-11-02 09:53:23 +00:00
Ralf Hemberger
91cfdb81f5 change spaces to tab 2018-11-02 09:50:34 +00:00
Ralf Hemberger
deae7bf33c WebDav - Add RFC3339 date format - fixes #2712 2018-11-02 09:50:34 +00:00
Henning Surmeier
04a0da1f92 ncdu: remove option ('d' key)
delete files by pressing 'd' in the ncdu listing

GUI Improvements:
Boxes now have a border around them
Boxes can ask questions and allow the selection of options. The
selected option will be given to the UI.boxMenuHandler function.

Fixes #2571
2018-10-28 20:44:03 +00:00
Henning Surmeier
9486df0226 ncdu/scan: remove option for memory representation
Remove files/directories from the in memory structs of the cloud
directory. Size and Count will be recalculated and populated upwards
to the parent directories.
2018-10-28 20:44:03 +00:00
Nick Craig-Wood
948a5d25c2 operations: Fix Purge and Rmdirs when dir is not empty
Before this change, Purge on the fallback path would try to delete
directories starting from the root rather than the dir passed in.
Rmdirs would also attempt to delete the root.
2018-10-27 11:51:17 +01:00
Nick Craig-Wood
f7c31cd210 Add Florian Gamboeck to contributors 2018-10-27 00:28:11 +01:00
Florian Gamboeck
696e7b2833 backend/cache: Print correct info about Cache Writes 2018-10-27 00:27:47 +01:00
Anagh Kumar Baranwal
e76cf1217f Added docs to check for key generation on Mega
Signed-off-by: Anagh Kumar Baranwal <anaghk.dos@gmail.com>
2018-10-25 22:49:21 +01:00
Nick Craig-Wood
543e37f662 Require go1.8 for compilation 2018-10-25 17:06:33 +01:00
Nick Craig-Wood
c514cb752d vendor: update to latest versions of everything 2018-10-25 17:06:33 +01:00
Nick Craig-Wood
c0ca93ae6f opendrive: fix retries of upload chunks - fixes #2646
Before this change, upload chunks were being emptied on retry.  This
change introduces a RepeatableReader to fix the problem.
2018-10-25 11:50:38 +01:00
Nick Craig-Wood
38a89d49ae fstest/test_all: tidy HTML report
- link test number to online copy
- style links
- attempt to make a nicer colour scheme
2018-10-25 11:33:17 +01:00
Anagh Kumar Baranwal
6531126eb2 Fixes the rc docs creation
Signed-off-by: Anagh Kumar Baranwal <anaghk.dos@gmail.com>
2018-10-25 11:29:59 +01:00
Nick Craig-Wood
25d0e59ef8 fstest/test_all: make sure Version is correct in build 2018-10-25 08:36:09 +01:00
Nick Craig-Wood
b0db08fd2b fstest/test_all: constrain to go1.10 and above 2018-10-24 21:33:42 +01:00
Nick Craig-Wood
07addf74fd fstest/test_all: upload a copy of the report to "current" 2018-10-24 12:21:07 +01:00
Nick Craig-Wood
52c7c738ca fstest/test_all: limit concurrency and run tests in random order 2018-10-24 10:46:58 +01:00
Nick Craig-Wood
5c32b32011 fstest/test_all: fix directories that tests are run in
- Don't build a binary for backend tests
- Run tests in their relevant directories
2018-10-23 17:31:11 +01:00
Nick Craig-Wood
fe61cff079 crypt: ensure integration tests run correctly when -remote is set 2018-10-23 17:12:38 +01:00
Nick Craig-Wood
fbab1e55bb fstest/test_all: adapt to nested test definitions 2018-10-23 16:56:35 +01:00
Nick Craig-Wood
1bfd07567e fstest/test_all: add oneonly flag to only run one test per backend if required 2018-10-23 14:07:48 +01:00
Nick Craig-Wood
f97c4c8d9d fstest/test_all: rework integration tests to improve output
- Make integration tests use a config file
- Output individual logs for each test
- Make HTML report and open browser
- Optionally email and upload results
2018-10-23 14:07:48 +01:00
Anagh Kumar Baranwal
a3c55462a8 Set python version explicitly to 2 to avoid issues on systems where
the default python version is `3`

Signed-off-by: Anagh Kumar Baranwal <anaghk.dos@gmail.com>
2018-10-23 12:14:52 +01:00
Anagh Kumar Baranwal
bbb9a504a8 Added docs to use the -P/--progress flag for real time statistics
Signed-off-by: Anagh Kumar Baranwal <anaghk.dos@gmail.com>
2018-10-23 12:14:52 +01:00
Jon Fautley
dedc7d885c sftp: Ensure file hash checking is really disabled 2018-10-23 12:03:50 +01:00
Nick Craig-Wood
c5ac96e9e7 Make --files-from only read the objects specified and don't scan directories
Before this change using --files-from would scan all the directories
that the files could possibly be in causing rclone to do more work
that was necessary.

After this change, rclone constructs an in memory tree using the
--fast-list mechanism but from all of the files in the --files-from
list and without scanning any directories.

Any objects that are not found in the --files-from list are ignored
silently.

This mechanism is used for sync/copy/move (march) and all of the
listing commands ls/lsf/md5sum/etc (walk).
2018-10-20 18:13:31 +01:00
Nick Craig-Wood
9959c5f17f webdav: add Content-Type to PUT requests - fixes #2664 2018-10-18 13:18:24 +01:00
Nick Craig-Wood
e8d0a363fc opendrive: fix transfer of files with + and & in - fixes #2657 2018-10-17 14:22:04 +01:00
albertony
935b7c1c0f jottacloud: fix bug in --fast-list handing of empty folders - fixes #2650 2018-10-17 13:58:36 +01:00
Fabian Möller
15ce0ae57c fstests: fix maximum tested size in TestFsPutChunked
Before this it was possible hat maxChunkSize was incorrectly set to 200.
2018-10-16 11:50:47 +02:00
Nick Craig-Wood
67703a73de Start v1.44-DEV development 2018-10-15 12:33:27 +01:00
Nick Craig-Wood
f96ce5674b Version v1.44 2018-10-15 11:03:08 +01:00
Nick Craig-Wood
7f0b204292 azureblob: work around SDK bug which causes errors for chunk-sized files (again)
Until https://github.com/Azure/azure-storage-blob-go/pull/75 is merged
the SDK can't upload a single blob of exactly the chunk size, so
upload files of this size with a multpart upload as a work around.

The previous fix for this 6a773289e7 turned out to cause problems
uploading files with maximum chunk size so needed to be redone.

Fixes #2653
2018-10-15 09:05:34 +01:00
Nick Craig-Wood
83b1ae4833 Add buergi to contributors 2018-10-14 17:20:34 +01:00
buergi
753cc63d96 webdav: add workaround for missing mtime - fixes #2420 2018-10-14 17:19:23 +01:00
Nick Craig-Wood
5dac8e055f union: fix --backup-dir on union backend
Before this fix --backup-dir would fail.

This is fixed by wrapping objects returned so that they belong to the
union Fs rather than the underlying Fs.
2018-10-14 15:19:02 +01:00
Nick Craig-Wood
c3a8eb1c10 fstests: make findObject() sleep a bit longer to fix b2 largePut tests 2018-10-14 14:45:23 +01:00
Nick Craig-Wood
0f2a5403db acd,box,onedrive,opendrive,ploud: fix Features() retaining the original receiver
Before this change the Features() method would return a different Fs
to that the Features() method was called on if the remote was
instantiated on a file.

The practical effect of this is that optional features, eg `rclone
about` wouldn't work properly when called on a file, and likely this
has been causing low level problems for users of these backends for
ages.

Ideally there would be a test for this, but it turns out that this is
really hard, so instead of that all the backends have been converted
to not copy the Fs and a big warning comment inserted for future
readers.

Fixes #2182
2018-10-14 14:41:26 +01:00
Nick Craig-Wood
dcce84714e onedrive: fix link command for non root 2018-10-14 14:17:53 +01:00
Nick Craig-Wood
eb8130f48a fstests: update TestPublicLink comment to show how to run solo 2018-10-14 14:17:05 +01:00
Nick Craig-Wood
aa58f66806 build: add longer timeout to integration tests 2018-10-14 14:16:33 +01:00
Nick Craig-Wood
a3dc591b8e Add teresy to contributors 2018-10-14 00:19:49 +01:00
teresy
5ee1bd7ba4 Remove redundant nil checks 2018-10-14 00:19:35 +01:00
Nick Craig-Wood
dbedf33b9f s3: fix v2 signer on files with spaces - fixes #2438
Before this fix the v2 signer was failing for files with spaces in.
2018-10-14 00:10:29 +01:00
Nick Craig-Wood
0f02c9540c s3: make --s3-v2-auth flag
This is an alternative to setting the region to "other-v2-signature"
which is inconvenient for multi-region providers.
2018-10-14 00:10:29 +01:00
Nick Craig-Wood
06922674c8 drive, s3: review hidden config items 2018-10-13 23:30:13 +01:00
Nick Craig-Wood
8ad7da066c drive: when listing team drives, continue on failure
This means that if the team drive listing returns a 500 error (which
seems reasonably common) rclone will continue to the point where it
asks for the team drive ID.

https://forum.rclone.org/t/many-team-drives-causes-rclone-to-fail/7159
2018-10-13 23:30:13 +01:00
Nick Craig-Wood
e1503add41 azureblob, b2, drive: implement set upload cutoff for chunked upload tests 2018-10-13 22:49:12 +01:00
Nick Craig-Wood
6fea75afde fstests: fix upload offsets not being set and redownload test files
In chunked upload tests:

- Add ability to set upload offset
- Read back the uploaded file to check it is OK
2018-10-13 22:49:12 +01:00
Nick Craig-Wood
6a773289e7 azureblob: work around SDK bug which causes errors for chunk-sized files
See https://github.com/Azure/azure-storage-blob-go/pull/75 for details
2018-10-13 22:49:12 +01:00
Nick Craig-Wood
ade252f13b build: fixup code formatting after goimports change 2018-10-13 22:47:12 +01:00
Nick Craig-Wood
bb2e361004 jottacloud: Fix socket leak on Object.Remove - fixes #2637 2018-10-13 22:47:12 +01:00
Nick Craig-Wood
b24facb73d rest: Fix documentation so it is clearer when resp.Body is closed 2018-10-13 22:47:12 +01:00
Nick Craig-Wood
014d58a757 Add David Haguenauer to contributors 2018-10-13 12:56:21 +01:00
David Haguenauer
1d16e16b30 docs: replace "Github" with "GitHub"
This the way GitHub refer to themselves.
2018-10-13 12:55:45 +01:00
Nick Craig-Wood
249a523dd3 build: fix golint install with new path 2018-10-12 11:35:35 +01:00
Nick Craig-Wood
8d72ef8d1e cmd: Don't print non-ASCII characters with --progress on windows - fixes #2501
This bug causes lots of strange behaviour with non-ASCII characters and --progress

https://github.com/Azure/go-ansiterm/issues/26
2018-10-11 21:25:04 +01:00
Nick Craig-Wood
bc8f0208aa rest: Remove auth headers on HTTP redirect
Before this change the rest package would forward all the headers on
an HTTP redirect, including the Authorization: header.  This caused
problems when forwarded to a signed S3 URL ("Only one auth mechanism
allowed") as well as being a potential security risk.

After we use the go1.8+ mechanism for doing this instead of using our
own which does it correctly removing the Authorization: header when
redirecting to a different host.

This hasn't fixed the behaviour for rclone compiled with go1.7.

Fixes #2635
2018-10-11 21:20:33 +01:00
Nick Craig-Wood
ee25b6106a Add jackyzy823 to contributors 2018-10-11 14:50:33 +01:00
Nick Craig-Wood
5c1b135304 Add dcpu to contributors 2018-10-11 14:50:33 +01:00
HerrH
2f2029fed5 Improved and updated the readme
Updated providers list, added links to docs, improved readability
2018-10-11 14:50:21 +01:00
Fabian Möller
57273d364b fstests: add TestFsPutChunked 2018-10-11 14:47:58 +01:00
Fabian Möller
84289d1d69 readers: add NewPatternReader 2018-10-11 14:47:58 +01:00
Fabian Möller
98e2746e31 backend: add fstests.ChunkedUploadConfig
- azureblob
- b2
- drive
- dropbox
- onedrive
- s3
- swift
2018-10-11 14:47:58 +01:00
Fabian Möller
c00ec0cbe4 fstests: add ChunkedUploadConfig 2018-10-11 14:47:58 +01:00
Fabian Möller
1a40bceb1d backend: unify NewFs path handling for wrapping remotes
Use the same function to join the root paths for the wrapping remotes
alias, cache and crypt.
The new function fspath.JoinRootPath is equivalent to path.Join, but if
the first non empty element starts with "//", this is preserved to allow
Windows network path to be used in these remotes.
2018-10-10 17:50:27 +01:00
jackyzy823
411a6cc472 onedrive: add link sharing support #2178 2018-10-09 20:11:48 +08:00
Fabian Möller
1e2676df26 union: fix ChangeNotify to support multiple remotes
To correctly support multiple remotes, each remote has to receive a
value on the input channel.
2018-10-07 11:13:37 +02:00
Nick Craig-Wood
364fca5cea union: implement optional interfaces (Move, DirMove, Copy etc) - fixes #2619
Implement optional interfaces
- Purge
- PutStream
- Copy
- Move
- DirMove
- DirCacheFlush
- ChangeNotify
- About

Make Hashes() return the intersection of all the hashes supported by the remotes
2018-10-07 00:06:29 +01:00
Nick Craig-Wood
87e1efa997 mount, vfs: Remove EXPERIMENTAL tags
rclone mount and the --vfs-cache-mode has been tested extensively by
users now so removing the EXPERIMENTAL tag is appropriate.
2018-10-06 11:47:46 +01:00
Nick Craig-Wood
6709084e2f config: Show URL of backend help page when starting config 2018-10-06 11:47:46 +01:00
Nick Craig-Wood
6b1f915ebc fs: Implement RegInfo.FileName to return the on disk filename for a backend
Use it in make_backend_docs.py
2018-10-06 11:47:46 +01:00
Nick Craig-Wood
78b9bd77f5 docs: auto generate backend options documentation
This inserts the output of "rclone help backend xxx" into the help
pages for each backend.
2018-10-06 11:47:46 +01:00
Nick Craig-Wood
a9273c5da5 docs: move documentation for options from docs/content into backends
In the following commit, the documentation will be autogenerated.
2018-10-06 11:47:46 +01:00
Nick Craig-Wood
14128656db cmd: Implement specialised help for flags and backends - fixes #2541
Instead of showing all flags/backends all the time, you can type

    rclone help flags
    rclone help flags <regexp>
    rclone help backends
    rclone help backend <name>
2018-10-06 11:47:45 +01:00
Nick Craig-Wood
1557287c64 fs: Make Option.GetValue() public #2541 2018-10-06 11:47:45 +01:00
Nick Craig-Wood
e7e467fb3a cmd: factor FlagName into fs.Option #2541 2018-10-06 11:47:45 +01:00
Nick Craig-Wood
5fde7d8b12 cmd: split flags up into global and backend flags #2541 2018-10-06 11:47:45 +01:00
Nick Craig-Wood
3c086f5f7f cmd: Make default help less verbose #2541
This stops the default help showing all the flags, backends, commands
2018-10-06 11:47:45 +01:00
dcpu
c0084f43dd cache: Remove entries that no longer exist in the source
list directory with 25k files

before(1.43.1)
5m24s

after
3m21s
2018-10-06 11:23:33 +01:00
Nick Craig-Wood
ddbd4fd881 Add Paul Kohout to contributors 2018-10-04 08:25:39 +01:00
Paul Kohout
7826e39fcf s3: use configured server-side-encryption and storace class options when calling CopyObject() - fixes #2610 2018-10-04 08:25:20 +01:00
Nick Craig-Wood
06ae4258be cmd: Fix -P not ending with a new line
Before this fix rclone didn't wait for the stats to be finished before
exiting, so the final new line was never printed.

After this change rclone will wait for the stats routine to cease
before exiting.
2018-10-03 21:46:18 +01:00
Alex Chen
d9037fe2be onedrive: ignore OneNote files by default - fixes #211 2018-10-03 12:46:25 +08:00
Fabian Möller
1d14972e41 vfs: reduce directory cache cleared by poll-interval
Reduce the number of nodes purged from the dir-cache when ForgetPath is
called. This is done by only forgetting the cache of the received path
and invalidating the parent folder cache by resetting *Dir.read.

The parent will read the listing on the next access and reuse the
dir-cache of entries in *Dir.items.
2018-10-02 10:21:14 +01:00
Fabian Möller
05fa9cb379 drive: improve directory notifications in ChangeNotify
When moving a directory in drive, most of the time only a notification
for the directory itself is created, not the old or new parents.

This tires to find the old path in the dirCache and the new path with
the dirCache of the new parent, which can result in two notifications
for a moved directory.
2018-10-02 10:14:14 +01:00
Nick Craig-Wood
59e14c25df vfs: enable rename for nearly all remotes using server side Move or Copy
Before this change remotes without server side Move (eg swift, s3,
gcs) would not be able to rename files.

After it means nearly all remotes will be able to rename files on
rclone mount with the notable exceptions of b2 and yandex.

This changes checks to see if the remote can do Move or Copy then
calls `operations.Move` to do the actual move.  This will do a server
side Move or Copy but won't download and re-upload the file.

It also checks to see if the destination exists first which avoids
conflicts or duplicates.

Fixes #1965
Fixes #2569
2018-09-29 14:56:20 +01:00
Nick Craig-Wood
fc640d3a09 Add Frantisek Fuka to contributors 2018-09-29 14:55:11 +01:00
Frantisek Fuka
e1f67295b4 b2: add note about cleanup in docs
Added: "Note that `cleanup` does not remove partially uploaded filesfrom the bucket."
2018-09-29 14:47:31 +01:00
Henning Surmeier
22ac80e83a webdav/sharepoint: renew cookies after 12hrs 2018-09-26 13:04:41 +01:00
Nick Craig-Wood
c7aa6b587b Add xnaas to contributors 2018-09-26 10:07:13 +01:00
xnaas
8d1848bebe docs: note --track-renames doesn't work with crypt 2018-09-26 10:06:19 +01:00
Fabian Möller
527c0af1c3 drive: cleanup changeNotifyRunner 2018-09-25 17:54:48 +02:00
Fabian Möller
a20fae0364 drive: code cleanup 2018-09-25 15:20:23 +01:00
Fabian Möller
15b1a1f909 drive: add support for apps-script to json export 2018-09-25 15:20:23 +01:00
Fabian Möller
80b25daac7 drive: add support for multipart document extensions 2018-09-25 15:20:23 +01:00
Fabian Möller
70b30d5ca4 drive: add document links 2018-09-25 15:20:23 +01:00
Fabian Möller
0b2fc621fc drive: restructure Object type 2018-09-25 15:20:23 +01:00
Fabian Möller
171e39b230 drive: add --drive-import-formats
Add a new flag to the drive backend to allow document conversions oni upload.
The existing --drive-formats flag has been renamed to --drive-export-formats.
The old flag is still working to be backward compatible.
2018-09-25 15:20:23 +01:00
Fabian Möller
690a44e40e drive: rewrite mime type and extension handling
Make use of the mime package to find matching extensions and mime types.
For simplicity, all extensions are now prefixed with "." to match the
mime package requirements.

Parsed extensions get converted if needed.
2018-09-25 15:20:23 +01:00
Fabian Möller
d9a3b26e47 vfs: add vfs/poll-interval rc command
This command can be used to query the current status of the
poll-interval option and also update the value.
2018-09-25 14:01:13 +02:00
Fabian Möller
1eec59e091 fs: update ChangeNotifier interface
This introduces a channel to the ChangeNotify function, which can be
used to update the poll-interval and cleanly exit the polling function.
2018-09-25 14:01:13 +02:00
Nick Craig-Wood
96ce49ec4e Add ssaqua to contributors 2018-09-24 17:08:47 +01:00
ssaqua
ae63e4b4f0 list: change debug logs for excluded items 2018-09-24 17:08:35 +01:00
Nick Craig-Wood
e2fb588eb9 Add frenos to contributors 2018-09-24 17:05:03 +01:00
frenos
382a6863b5 rc: add support for OPTIONS and basic CORS - #2575 2018-09-24 17:04:47 +01:00
Nick Craig-Wood
7b975bc1ff alias: Fix handling of Windows network paths
Before this fix, the alias backend would mangle Windows paths like
//server/drive as it was treating them as unix paths.

See https://forum.rclone.org/t/smb-share-alias/6857
2018-09-21 18:24:21 +01:00
Nick Craig-Wood
467fe30a5e vendor: update to latest versions of everything 2018-09-21 18:23:37 +01:00
Nick Craig-Wood
4415aa5c2e build: fix make update 2018-09-21 18:23:37 +01:00
Nick Craig-Wood
17ab38502d Revamp issue and PR templates and CONTRIBUTING guide
Thanks to @fd0 of the restic project for a very useful blog post and
something to plagiarise :-)

https://restic.net/blog/2018-09-09/GitHub-issue-templates
2018-09-21 18:17:32 +01:00
Nick Craig-Wood
9fa8c959ee local: preallocate files on linux with fallocate(2) 2018-09-19 16:04:57 +01:00
Nick Craig-Wood
f29c6049fc local: preallocate files on Windows to reduce fragmentation #2469
Before this change on Windows, files copied locally could become
heavily fragmented (300+ fragments for maybe 100 MB), no matter how
much contiguous free space there was (even if it's over 1TiB). This
can needlessly yet severely adversely affect performance on hard
disks.

This changes uses NtSetInformationFile to pre-allocate the space to
avoid this.

It does nothing on other OSes other than Windows.
2018-09-19 16:04:57 +01:00
Nick Craig-Wood
e44fa5db8e build: update git bisect scripts 2018-09-19 16:04:57 +01:00
Fabian Möller
03ea05b860 drive: add workaround for slow downloads
Add --drive-v2-download-min-size flag to allow downloading files via the
drive v2 API. If files are greater than this flag, a download link is
generated when needed. The flag is disabled by default.
2018-09-18 15:55:50 +01:00
Fabian Möller
b8678c9d4b vendor: add google.golang.org/api/drive/v2 2018-09-18 15:55:50 +01:00
Fabian Möller
13823a7743 drive: fix escaped chars in documents during list
Fixes #2591
2018-09-18 15:53:44 +01:00
sandeepkru
b94d87ae2d azureblob and fstests - Modify integration tests to include new
optional setting to test SetTier on only few supported tiers.

Remove unused optional interface ListTiers and backend and internal tests
2018-09-18 13:56:09 +01:00
sandeepkru
e0c5f7ff1b fs - Remove unreferenced ListTierer optional interface 2018-09-18 13:56:09 +01:00
Nick Craig-Wood
b22ecbe174 Add Joanna Marek to contributors 2018-09-18 10:27:33 +01:00
Nick Craig-Wood
c41be436c6 Add Antoine GIRARD to contributors 2018-09-18 10:27:33 +01:00
Joanna Marek
e022ffce0f accounting: change too long names cutting mechanism - fixes #2490 2018-09-18 10:27:23 +01:00
albertony
cfe65f1e72 jottacloud: minor update in docs 2018-09-18 10:25:30 +01:00
Sebastian Bünger
b18595ae07 jottacloud: Fix handling of reserved characters. fixes #2531 2018-09-17 12:42:35 +01:00
Nick Craig-Wood
d27630626a webdav: add a small pause after failed upload before deleting file #2517
This fixes the integration tests for `serve webdav` which uses the
webdav backend tests.
2018-09-17 08:51:50 +01:00
Nick Craig-Wood
c473c7cb53 ftp: add a small pause after failed upload before deleting file #2517
This fixes the integration tests for `serve ftp` which uses the ftp
backend tests.
2018-09-17 08:51:50 +01:00
Nick Craig-Wood
ef3526b3b8 vfs: fix race condition detected by serve ftp tests 2018-09-17 08:50:34 +01:00
Nick Craig-Wood
d4ee7277c0 serve ftp: disable on plan9 since it doesn't compile 2018-09-17 08:50:34 +01:00
Antoine GIRARD
4a3efa5d45 cmd/serve: add ftp server - implement #2151 2018-09-17 08:50:34 +01:00
Nick Craig-Wood
a14f0d46d7 vendor: add github.com/goftp/server 2018-09-17 08:50:34 +01:00
Nick Craig-Wood
a25875170b webdav: Add another time format to fix #2574 2018-09-15 21:46:56 +01:00
Alex Chen
a288646419 onedrive: fix sometimes special chars in filenames not replaced 2018-09-14 21:38:55 +08:00
Nick Craig-Wood
b3d8bc61ac build: add example scripts for bisecting rclone and go 2018-09-14 11:17:51 +01:00
sandeepkru
7accd30da8 cmd and fs: Added new command settier which performs storage tier changes on
supported remotes
2018-09-12 21:09:08 +01:00
sandeepkru
9594fd0a0c fstests: Added integration tests on SetTier operation 2018-09-12 21:09:08 +01:00
sandeepkru
aac84c554a azureblob: Implemented settier command support on azureblob remote, this supports to
change tier on objects. Added internal test to check if feature flags are set correctly
2018-09-12 21:09:08 +01:00
sandeepkru
5716a58413 fs: Added new optional interfaces SetTierer, GetTierer and ListTierer, these are to
perform object tier changes on supported remotes
2018-09-12 21:09:08 +01:00
sandeepkru
233507bfe0 vendor: Update AzureSDK version to latest one, fixes failing integration tests 2018-09-12 08:14:38 +01:00
sandeepkru
5b27702b61 AzureBlob new sdk changes 2018-09-12 08:14:38 +01:00
Nick Craig-Wood
b2a6a97443 Add Craig Miskell to contributors 2018-09-11 07:57:16 +01:00
Craig Miskell
2543278c3f S3: Use (custom) pacer, to retry operations when reasonable - fixes #2503 2018-09-11 07:57:03 +01:00
Jon Craton
19cf3bb9e7 Fixed typo (duplicate word) (#2563) 2018-09-10 19:09:48 -07:00
Fabian Möller
3c44ef788a cache: add plex_insecure option to skip certificate validation
Fixes #2215
2018-09-10 21:19:25 +01:00
Fabian Möller
e5663de09e docs: add section for .plex.direct urls to cache 2018-09-10 21:15:18 +01:00
Nick Craig-Wood
7cfd4e56f8 Add Santiago Rodríguez to contributors
Another email address for Sandeep.
2018-09-10 20:49:55 +01:00
Santiago Rodríguez
282540c2d4 azureblob: add --azureblob-list-chunk parameter - Fixes #2390
This parameter can be used to adjust the size of the listing chunks
which can be used to workaround problems listing large buckets.
2018-09-10 20:45:06 +01:00
albertony
1e7a7d756f jottacloud: fix for --fast-list 2018-09-10 20:38:20 +01:00
Fabian Möller
f6ee0795ac cache: preserve leading / in wrapped remote path
When combining the remote value and the root path, preserve the absence
or presence of the / at the beginning of the wrapped remote path.

e.g. a remote "cloud:" and root path "dir" becomes "cloud:dir" instead
of "cloud:/dir".

Fixes #2553
2018-09-10 20:35:50 +01:00
Fabian Möller
eb5a95e7de crypt: preserve leading / in wrapped remote path
When combining the remote value and the root path, preserve the absence
or presence of the / at the beginning of the wrapped remote path.

e.g. a remote "cloud:" and root path "dir" becomes "cloud:dir" instead
of "cloud:/dir".
2018-09-10 20:35:50 +01:00
Sandeep
5b1c162fb2 Added sandeepkru to maintainers list (#2560) 2018-09-08 20:58:23 -07:00
albertony
d51501938a jottacloud: add link sharing support 2018-09-08 09:38:57 +01:00
Nick Craig-Wood
a823d518ac docs: changelog from v1.43.1 branch 2018-09-07 17:16:11 +01:00
Nick Craig-Wood
87d4e32a6b build: instructions on how to make a point release 2018-09-07 17:10:29 +01:00
Nick Craig-Wood
820d2a7149 Add Felix Brucker to contributors 2018-09-07 15:14:28 +01:00
Nick Craig-Wood
9fe39f25e1 union: add missing docs 2018-09-07 15:14:08 +01:00
Nick Craig-Wood
1b2cc781e5 union: fix so all integration tests pass
* Fix error handling in List and NewObject
* Fix Precision in case we have precision > time.Second
* Fix Features - all binary features are possible
* Fix integration tests using new test facilities
2018-09-07 15:14:08 +01:00
Nick Craig-Wood
e05ec2b77e fstests: Allow object name and fs check to be skipped 2018-09-07 15:14:08 +01:00
Felix Brucker
9e3ea3c6ac union: Implement union backend which reads from multiple backends 2018-09-07 15:14:08 +01:00
Anagh Kumar Baranwal
0fb12112f5 docs: display changes
- Reduced size of the social menu and increased the size of the content
- Added scrollable property to the index menus
- Fixed code wrapping issue

Fixes #2103

Signed-off-by: Anagh Kumar Baranwal <anaghk.dos@gmail.com>
2018-09-07 14:54:22 +01:00
Cédric Connes
1b95ca2852 stats: handle FatalError and NoRetryError when reported to stats 2018-09-07 14:44:50 +01:00
albertony
e07a850be3 jottacloud: add permanent delete support: --jottacloud-hard-delete 2018-09-07 12:58:18 +01:00
albertony
3fccce625c jottacloud: add --fast-list support - fixes #2532 2018-09-07 12:49:39 +01:00
albertony
a1f935e815 jottacloud: minor improvement in quota info (omit if unlimited) 2018-09-07 12:49:39 +01:00
Alex Chen
22ee4151fd build: make CIs available for forks
This makes it possible to run CI on a fork of rclone which is useful for contributors.
2018-09-07 10:17:26 +01:00
Fabian Möller
cc23ad71ce ncdu: return error instead of log.Fatal in Show 2018-09-07 10:06:37 +01:00
sandeepkru
57b9fff904 azureblob - BugFix - Incorrect StageBlock invocation in multi-part uploads
fixes #2518. Incorrect formation of block list.
2018-09-06 22:24:40 +01:00
Alex Chen
692ad482dc onedrive: fix new fields not saved when editing old config - fixes #2527 2018-09-07 00:07:16 +08:00
Sebastian Bünger
c6f1c3c7f6 box: Implement link sharing. #2178 2018-09-04 22:01:22 +01:00
Nick Craig-Wood
164d1e05ca hubic: retry auth fetching if it fails to make hubic more reliable 2018-09-04 21:00:36 +01:00
Nick Craig-Wood
c644241392 hubic: fix uploads - fixes #2524
Uploads were broken because chunk size was set to zero.  This was a
consequence of the backend config re-organization which meant that
chunk size had lost its default.

Sharing some backend config between swift and hubic fixes the problem
and means hubic gains its own --hubic-chunk-size flag.
2018-09-04 20:27:48 +01:00
Cnly
89be5cadaa onedrive: fix make check 2018-09-05 00:37:52 +08:00
Cnly
f326f94b97 onedrive: use single-part upload for empty files - fixes #2520 2018-09-05 00:08:00 +08:00
Nick Craig-Wood
3d2117887d Add Anagh Kumar Baranwal to contributors 2018-09-04 16:16:46 +01:00
Anagh Kumar Baranwal
5a6750e1cd cache: documentation fix for cache-chunk-total-size - Fixes #2519
Signed-off-by: Anagh Kumar Baranwal <anaghk.dos@gmail.com>
2018-09-04 16:16:35 +01:00
Fabian Möller
6b8b9d19f3 googlecloudstorage: fix service_account_file been ignored - Fixes #2523 2018-09-04 15:31:20 +01:00
Nick Craig-Wood
4ca26eb38c cmd: fix crash with --progress and --stats 0 #2501 2018-09-04 14:39:48 +01:00
Alex Chen
37b2754f37 Add Alex Chen (Cnly) to MAINTAINERS.md 2018-09-04 08:40:01 +08:00
Nick Craig-Wood
172beb2ae3 Add cron410 to contributors 2018-09-03 20:28:15 +01:00
cron410
5eba392a04 cache: clarify docs 2018-09-03 20:28:15 +01:00
Fabian Möller
deda093637 cache: fix error return value of cache/fetch rc method 2018-09-03 17:32:11 +02:00
dcpu
a4c4019032 cache: improve performance by not sending info requests for cached chunks 2018-09-03 15:41:06 +01:00
Nick Craig-Wood
2e37942592 Add albertony to contributors 2018-09-03 15:31:25 +01:00
albertony
09d7bd2d40 config: don't create default config dir when user supplies --config
Avoid creating empty default configuration directory when user supplies path to config file.

Fixes #2514.
2018-09-03 15:30:53 +01:00
Nick Craig-Wood
ff0efb1501 Add Sheldon Rupp to contributors 2018-09-03 15:04:37 +01:00
Sheldon Rupp
0f1d4a7ca8 docs: fix typo 2018-09-03 15:03:49 +01:00
Fabian Möller
a0b3fd3a33 cache: fix worker scale down
Ensure that calling scaleWorkers will create/destroy the right amount of
workers.
2018-09-03 12:29:35 +02:00
Fabian Möller
cdbe3691b7 cache: add cache/fetch rc function 2018-09-03 12:29:35 +02:00
Fabian Möller
3a0b3b0f6e drive: reformat long API call lines 2018-09-03 12:22:05 +02:00
Nick Craig-Wood
d3afef3e1b Add dcpu to contributors 2018-09-02 18:11:42 +01:00
dcpu
f4aaec9ce5 log: Add --log-format flag - fixes #2424 2018-09-02 18:11:09 +01:00
Nick Craig-Wood
bd5d326160 build: enable caching of the go build cache for Travis and Appveyor 2018-09-02 17:43:09 +01:00
Cnly
5b9b9f1572 onedrive: graph: clarify option for root Sharepoint site 2018-09-02 16:06:25 +01:00
Cnly
571c8754de onedrive: graph: update docs 2018-09-02 16:06:25 +01:00
Cnly
fb9a95e68e onedrive: graph: Remove unnecessary error checks 2018-09-02 16:06:25 +01:00
Cnly
85e0839c8b onedrive: graph: Refine config handling 2018-09-02 16:06:25 +01:00
Cnly
1749fb8ebf onedrive: graph: Refine config keys naming 2018-09-02 16:06:25 +01:00
Oliver Heyme
e114be11ec onedrive: Removed upload cutoff and always do session uploads
Set modtime on copy

Added versioning issue to OneDrive documentation

(cherry picked from commit 7f74403)
2018-09-02 16:06:25 +01:00
Cnly
b709f73aab onedrive: rework to support Microsoft Graph
The initial work on this was done by Oliver Heyme with updates from
Cnly.

Oliver Heyme:

* Changed to Microsoft graph
* Enable writing
* Added more options for adding a OneDrive Remote
* Better error handling
* Send modDate at create upload session and fix list children

Cnly:

* Simple upload API only supports max 4MB files
* Fix supported hash types for different drive types
* Fix unchecked err

Co-authored-by: Oliver Heyme <olihey@googlemail.com>
Co-authored-by: Cnly <minecnly@gmail.com>
2018-09-02 16:06:25 +01:00
Nick Craig-Wood
05a615ef22 Add Dr. Tobias Quathamer to contributors 2018-09-02 15:51:47 +01:00
Dr. Tobias Quathamer
76450c01f3 cache: remove accidentally committed files
* Delete cache_upload_test.go.orig
* Delete cache_upload_test.go.rej
2018-09-02 15:51:22 +01:00
Nick Craig-Wood
86e64c626c Add Cédric Connes to contributors 2018-09-02 15:08:38 +01:00
Cédric Connes
9b827be418 local: skip bad symlinks in dir listing with -L enabled - fixes #1509 2018-09-02 14:47:54 +01:00
Nick Craig-Wood
7e5c6725c1 docs: Fix version in changelog 2018-09-01 18:40:34 +01:00
Nick Craig-Wood
543d75723b Start v1.43-DEV development 2018-09-01 18:37:48 +01:00
Nick Craig-Wood
b4d94f255a build: server side copy the current release files in 2018-09-01 18:22:19 +01:00
Nick Craig-Wood
b0dd218fea build: make tidy-beta for removing old beta releases 2018-09-01 18:21:54 +01:00
Nick Craig-Wood
6396872d75 build: when building a tag release don't suffix the version 2018-09-01 16:57:34 +01:00
Nick Craig-Wood
6cf684f2a1 build: fix addition of β symbol for release build and replace with -beta
Before this fix the release build was being build with a β suffix.

This also replaces β with -beta so we can use the same version tag
everywhere as some systems don't like the β symbol.
2018-09-01 14:51:32 +01:00
Nick Craig-Wood
20c55a6829 Version v1.43 2018-09-01 12:58:00 +01:00
Nick Craig-Wood
a3fec7f030 build: build release binaries on travis and appveyor, not locally 2018-09-01 12:50:35 +01:00
Nick Craig-Wood
8e2b3268be build: Automatically compile the changelog to make editing easier 2018-09-01 12:50:35 +01:00
Nick Craig-Wood
32ab4e9ac6 pcloud: delete half uploaded files on upload error
Sometimes pcloud will leave a half uploaded file when the transfer
actually failed.  This patch deletes the file if it exists.

This problem was spotted by the integration tests.
2018-09-01 10:01:02 +01:00
Nick Craig-Wood
b4d86d5450 onedrive: fix rmdir sometimes deleting directories with contents
Before this change we were using the ChildCount in the Folder facet to
determine if a directory was empty or not.  However this seems to be
unreliable, or updated asynchronously which meant that `rclone rmdir`
sometimes deleted directories that had files in.

This problem was spotted by the integration tests.

Listing the directory instead of relying on the ChildCount fixes the
problem and the integration tests, without changing the cost (one http
transaction).
2018-08-31 23:12:13 +01:00
Nick Craig-Wood
d49ba652e2 local: fix mkdir error when trying to copy files to the root of a drive on windows
This was causing errors which looked like this when copying a file to
the root of a drive:

    mkdir \\?: The filename, directory name, or volume label syntax is incorrect.

This was caused by an incorrect path splitting routine which was
removing \ of the end of UNC paths when it shouldn't have been.  Fixed
by using the standard library `filepath.Dir` instead.
2018-08-31 21:10:36 +01:00
Nick Craig-Wood
7d74686698 fs/accounting: increase maximum burst size of token bucket
This stops occasional errors when using --bwlimit which look like this

    Token bucket error: rate: Wait(n=2255475) exceeds limiter's burst 2097152
2018-08-30 17:24:08 +01:00
Sebastian Bünger
2d7c5ebc7a jottacloud: Implement optional about interface. 2018-08-30 17:15:49 +01:00
Sebastian Bünger
86e3436d55 jottacloud: Add optional MimeTyper interface. 2018-08-30 17:15:49 +01:00
Nick Craig-Wood
f243d2a309 Add bsteiss to contributors 2018-08-30 17:08:40 +01:00
bsteiss
aaa3d7e63b s3: add support for KMS Key ID - fixes #2217
This code supports aws:kms and the kms key id for the s3 backend.
2018-08-30 17:08:27 +01:00
Nick Craig-Wood
e4c5f248c0 build: make appveyor just use latest release go version 2018-08-30 16:55:02 +01:00
Nick Craig-Wood
5afaa48d06 Add Denis to contributors 2018-08-30 16:47:32 +01:00
Denis
1c578ced1c cmd: add copyurl command - Fixes #1320 2018-08-30 16:45:41 +01:00
Nick Craig-Wood
de6ec8056f fs/accounting: fix moving average speed for file stats
Before this change the moving average for the individual file stats
would start at 0 and only converge to the correct value over 15-30
seconds.

This change starts the weighting period as 1 and moves it up once per
sample which gets the average to a better value instantly.
2018-08-28 22:55:51 +01:00
Nick Craig-Wood
66fe4a2523 fs/accounting: fix stats display which was missing transferring data
Before this change the total stats were ignoring the in flight
checking, transferring and bytes.
2018-08-28 22:21:17 +01:00
Nick Craig-Wood
f5617dadf3 fs/accounting: factor out eta and percent calculations and write tests 2018-08-28 22:21:17 +01:00
Nick Craig-Wood
5e75a9ef5c build: switch to using go1.11 modules for managing dependencies 2018-08-28 17:08:22 +01:00
Nick Craig-Wood
da1682a30e vendor: switch to using go1.11 modules 2018-08-28 16:08:48 +01:00
Nick Craig-Wood
5c75453aba version: fix test under Windows 2018-08-28 16:07:36 +01:00
Nick Craig-Wood
84ef6b2ae6 Add Alex Chen to contributors 2018-08-27 08:34:57 +01:00
Nick Craig-Wood
7194c358ad azureblob,b2,qingstor,s3,swift: remove leading / from paths - fixes #2484 2018-08-26 23:19:28 +01:00
Nick Craig-Wood
502d8b0cdd vendor: remove github.com/VividCortex/ewma dependency 2018-08-26 22:05:30 +01:00
Nick Craig-Wood
ca44fb1fba accounting: fix time to completion estimates
Previous to this change package used for this
github.com/VividCortex/ewma took a 0 average to mean reset the
statistics.  This happens quite often when transferring files though a
buffer.

Replace that implementation with a simple home grown one (with about
the same constant), without that feature.
2018-08-26 22:00:48 +01:00
Nick Craig-Wood
842ed7d2a9 swift: make it so just storage_url or auth_token can be overidden
Before this change if only one of storage_url or auth_token were
supplied then rclone would overwrite both of them when authenticating.
This effectively meant you could supply both of them or none of them
only.

Now rclone still does the authentication to read the missing
storage_url or auth_token then afterwards re-writes the auth_token or
storage_url back to what the user desired.

Fixes #2464
2018-08-26 21:54:50 +01:00
Nick Craig-Wood
b3217d2cac serve webdav: make Content-Type without reading the file and add --etag-hash
Before this change x/net/webdav would open each file to find out its
Content-Type.

Now we override the FileInfo and provide that directly from rclone.

An --etag-hash has also been implemented to override the ETag with the
hash passed in.

Fixes #2273
2018-08-26 21:50:41 +01:00
Nick Craig-Wood
94950258a4 fs: allow backends to be named using their Name or Prefix #2449
This means that, for example Google Cloud Storage can be known as
`:gcs:bucket` on the command line, as well as `:google cloud
storage:bucket`.
2018-08-26 17:59:31 +01:00
Nick Craig-Wood
8656bd2bb0 fs: Allow on the fly remotes with :backend: syntax - fixes #2449
This change allows remotes to be created on the fly without a config
file by using the remote type prefixed with a : as the remote name, Eg
:s3: to make an s3 remote.

This assumes the user is supplying the backend config via command line
flags or environment variables.
2018-08-26 17:59:31 +01:00
Nick Craig-Wood
174ca22936 mount,cmount: clip the number of blocks to 2^32-1 on macOS
OSX FUSE only supports 32 bit number of blocks which means that block
counts have been wrapping.  This causes f_bavail to be 0 which in turn
causes problems with programs like borg backup.

Fixes #2356
2018-08-26 17:32:59 +01:00
Nick Craig-Wood
4eefd05dcf version: print the release and beta versions with --check - Fixes #2348 2018-08-26 17:28:28 +01:00
Nick Craig-Wood
1cccfa7331 cmd: Make --progress work on Windows 2018-08-26 17:20:38 +01:00
Nick Craig-Wood
18685e6b0b vendor: add github.com/Azure/go-ansiterm for --progress on Windows support 2018-08-26 17:20:38 +01:00
Nick Craig-Wood
b6db90cc32 cmd: add --progress/-P flag to show progress
Fixes #2347
Fixes #1210
2018-08-26 17:20:38 +01:00
Nick Craig-Wood
b596ccdf0f build: update to use go1.11 for the build
Also select latest patch release for go using 1.yy.x feature in travis
2018-08-26 15:26:44 +01:00
Nick Craig-Wood
a64e0922b9 vendor: update github.com/ncw/swift to fix server side copy bug 2018-08-26 15:03:19 +01:00
sandeepkru
3751ceebdd azureblob: Added blob tier feature, a new configuration of azureblob-access-tier
is added to tier blobs between Hot, Cool and Archive. Addresses #2901
2018-08-21 21:52:45 +01:00
Nick Craig-Wood
9f671c5dd0 fs: fix tests for *SepList 2018-08-21 10:58:59 +01:00
Alex Chen
c6c74cb869 mountlib: fix mount --daemon not working with encrypted config - fixes #2473
This passes the configKey to the child process as an Obscured temporary file with an environment variable to the
2018-08-21 09:41:16 +01:00
Nick Craig-Wood
f9cf70e3aa jottacloud: docs, fixes and tests for MD5 calculation
* Add docs for --jottacloud-md5-memory-limit
  * Factor out readMD5 function and add tests
  * Fix accounting
  * Make sure temp file is deleted at the start (not Windows)
2018-08-21 08:57:53 +01:00
Oliver Heyme
ee4485a316 jottacloud: calculate missing MD5s - fixes #2462
If an MD5 can't be found on the source then this streams the object
into memory or on disk to calculate it.
2018-08-21 08:57:53 +01:00
Nick Craig-Wood
455219f501 crypt: fix accounting when checking hashes on upload
In e52ecba295 we forgot to unwrap and
re-wrap the accounting which mean the the accounting was no longer
first in the chain of readers.  This lead to accounting inaccuracies
in remotes which wrap and unwrap the reader again.
2018-08-21 08:57:53 +01:00
Nick Craig-Wood
1b8f4b616c fs: move CommaSepList and SpaceSepList here from config
fs can't import config so having them there means they are not usable
by rclone core.
2018-08-20 17:52:05 +01:00
Fabian Möller
f818df52b8 config: add List type 2018-08-20 17:38:51 +01:00
Cnly
29fa840d3a onedrive: Add back the check for DirMover interface 2018-08-20 17:31:28 +01:00
Nick Craig-Wood
7712a0e111 fs/asyncreader: skip some tests to work around race detector bug
The race detector currently detects a race with len(chan) against
close(chan).

See: https://github.com/golang/go/issues/27070

Skip the tests which trip this bug under the race detector.
2018-08-20 12:34:29 +01:00
Nick Craig-Wood
77806494c8 mount,cmount: adapt to sdnotify API change 2018-08-20 12:34:29 +01:00
Nick Craig-Wood
ff8de59d2b vendor: update minimum number of packages so compile with go1.11 works 2018-08-20 12:34:29 +01:00
Nick Craig-Wood
c19d1ae9a5 build: fix whitespace changes due to go1.11 gofmt changes 2018-08-20 12:26:06 +01:00
Nick Craig-Wood
64ecc2f587 build: use go1.11rc1 to make the beta releases 2018-08-20 12:26:06 +01:00
Nick Craig-Wood
7c911bf2d6 b2: fix app key support on upload to a bucket - fixes #2428 2018-08-18 19:05:32 +01:00
Nick Craig-Wood
41f709e13b yandex: fix listing/deleting files in the root - fixes #2471
Before this change `rclone ls yandex:hello.txt` would fail whereas
`rclone ls yandex:/hello.txt` would succeed.  Now they both succeed.
2018-08-18 12:12:19 +01:00
Nick Craig-Wood
6c5ccf26b1 vendor: update github.com/t3rm1n4l/go-mega to fix failed logins - fixes #2443 2018-08-18 11:46:25 +01:00
Fabian Möller
6dc5aa7454 docs: clearify buffer-size is per transfer/filehandle 2018-08-17 18:11:40 +01:00
Fabian Möller
552eb8e06b vfs: try to seek buffer on read only files 2018-08-17 18:10:28 +01:00
Nick Craig-Wood
7d35b14138 Add Martin Polden to contributors 2018-08-17 18:08:48 +01:00
Martin Polden
6199b95b61 jottacloud: Handle empty time values 2018-08-17 18:08:29 +01:00
Oliver Heyme
040768383b jottacloud: fix MD5 error check 2018-08-17 18:05:04 +01:00
Nick Craig-Wood
6390dec7db fs/accounting: add --stats-one-line flag for single line stats 2018-08-17 17:58:00 +01:00
Nick Craig-Wood
80a3db34a8 fs/accounting: show the total progress of the sync in the stats #379 2018-08-17 17:58:00 +01:00
Nick Craig-Wood
cb7a461287 sync: add a buffer for checks, uploads and renames #379
--max-backlog controls the queue length.

Add statistics for the check/upload/rename queues.

This means that checking can complete before the uploads which will
give rclone the ability to show exactly what is outstanding.
2018-08-17 17:58:00 +01:00
Nick Craig-Wood
eb84b58d3c webdav: Attempt to remove failed uploads
Some webdav backends (eg rclone serve webdav) leave behind half
written files on error.  This causes the integration tests to
fail. Here we remove the file if it exists.
2018-08-16 16:00:30 +01:00
Nick Craig-Wood
58339a5cb6 fstests: In TestFsPutError reliably provoke test failure
This change to go1.11 causes the TestFsPutError test to fail

https://go-review.googlesource.com/c/go/+/114316

This is because it now passes the half written file to the backend
whereas it didn't previously because of the buffering.

In this commit the size of the data written was increased to 5k from
50 bytes to provoke the test failure under go1.10 also.
2018-08-16 15:52:15 +01:00
Nick Craig-Wood
751bfd456f box: make --box-commit-retries flag defaulting to 100 - Fixes #2054
Sometimes it takes many more commit retries than expected to commit a
multipart file, so split this number into its own config variable and
default it to 100 which should always be enough.
2018-08-11 16:33:55 +01:00
Andres Alvarez
990919f268 Add disclaimer about generated passwords being stored in an obscured format 2018-08-11 15:07:50 +01:00
Nick Craig-Wood
6301e15b79 Add Sebastian Bünger to contributors 2018-08-10 11:15:04 +01:00
Sebastian Bünger
007c7757d4 Add docs for Jottacloud 2018-08-10 11:14:34 +01:00
Sebastian Bünger
dd3e912731 fs/OpenOptions: Make FixRangeOption clamp range to filesize. 2018-08-10 11:14:34 +01:00
Sebastian Bünger
10ed455777 New backend: Jottacloud 2018-08-10 11:14:34 +01:00
Nick Craig-Wood
05bec70c3e Add Matt Tucker to contributors 2018-08-10 10:28:41 +01:00
Matt Tucker
c54f5a781e Fix typo in Box documentation 2018-08-10 10:28:16 +01:00
Nick Craig-Wood
6156bc5806 cache: fix nil pointer deref - fixes #2448 2018-08-07 21:33:13 +01:00
Nick Craig-Wood
e979cd62c1 rc: fix formatting in docs 2018-08-07 21:05:21 +01:00
Nick Craig-Wood
687477b34d rc: add core/stats and vfs/refresh to the docs 2018-08-07 20:58:00 +01:00
Nick Craig-Wood
40d383e223 Add reddi1 to contributors 2018-08-07 20:56:55 +01:00
reddi1
6bfdabab6d rc: added core/stats to return the stats - fixes #2405 2018-08-07 20:56:40 +01:00
Nick Craig-Wood
f7c1c61dda Add Andres Alvarez to contributors 2018-08-07 20:52:04 +01:00
Andres Alvarez
c1f5add049 Add tests for reveal functions 2018-08-07 20:51:50 +01:00
Andres Alvarez
8989c367c4 Add reveal command 2018-08-07 20:51:50 +01:00
Nick Craig-Wood
d95667b06d Add Cnly to contributors 2018-08-07 09:33:25 +01:00
Cnly
0f845e3a59 onedrive: implement DirMove - fixes #197 2018-08-07 09:33:19 +01:00
Fabian Möller
2e80d4c18e vfs: update vfs/refresh rc command documentation 2018-08-07 09:31:12 +01:00
Fabian Möller
6349147af4 vfs: add non recursive mode to vfs/refresh rc command 2018-08-07 09:31:12 +01:00
Fabian Möller
782972088d vfs: add the vfs/refresh rc command
vfs/refresh will walk the directory tree for the given paths and
freshen the directory cache. It will use the fast-list capability
of the remote when enabled.
2018-08-07 09:31:12 +01:00
Fabian Möller
38381d3786 lsjson: add option to show the original object IDs 2018-08-07 09:28:55 +01:00
Fabian Möller
eb6aafbd14 cache: implement fs.ObjectUnWrapper 2018-08-07 09:28:55 +01:00
Nick Craig-Wood
7f3d5c31d9 Add Ruben Vandamme to contributors 2018-08-06 22:07:40 +01:00
Ruben Vandamme
578f56bba7 Swift: Add storage_policy 2018-08-06 22:07:25 +01:00
Nick Craig-Wood
f7c0b2407d drive: add docs for --fast-list and add to integration tests 2018-08-06 21:38:50 +01:00
Fabian Möller
dc5a734522 drive: implement ListR 2018-08-06 21:31:47 +01:00
Nick Craig-Wood
3c2ffa7f57 Add Oleg Kovalov to contributors 2018-08-06 21:14:14 +01:00
Oleg Kovalov
06c9f76cd2 all: fix go-critic linter suggestions 2018-08-06 21:14:03 +01:00
Nick Craig-Wood
44abf6473e Add dan smith to contributors 2018-08-06 21:08:37 +01:00
dan smith
b99595b7ce docs: remove references to copy and move for --track-renames
this change was omitted in the fix for #2008
2018-08-06 21:08:19 +01:00
Nick Craig-Wood
a119ca9f10 b2: Support Application Keys - fixes #2428
This supports B2 application keys limited to a bucket by making sure
we only list the buckets of the bucket ID that the key is limited to.
2018-08-06 14:32:53 +01:00
Nick Craig-Wood
ffd11662ba cache: fix nil pointer deref when using lsjson on cached directory
This stops embedding the fs.Directory into the cache.Directory because
it can be nil and implements an ID method which checks the nil status.

See: https://forum.rclone.org/t/runtime-error-with-cache-remote-and-lsjson/6305
2018-08-05 09:42:31 +01:00
Henning
1f3778dbfb webdav: sharepoint recursion with different depth - fixes #2426
this change adds the depth parameter to listAll and readMetaDataForPath.
this allows recursive calls of these methods with a different depth
header.

Sharepoint won't list files if the depth header is != 0. If that is the
case, it will just return a error 404 although the file exists.
Since it is not possible to determine if a path should be a file or a
directory, rclone has to make a request with depth = 1 first. On success
we are sure that the path is a directory and the listing will work.
If this request returns error 404, the path either doesn't exist or it
is a file.

To be sure, we can try again with depth set to 0. If it still fails, the
path really doesn't exist, else we found our file.
2018-08-04 11:02:47 +01:00
Nick Craig-Wood
f9eb9c894d mega: add --mega-hard-delete flag - fixes #2409 2018-08-03 15:07:51 +01:00
Nick Craig-Wood
f7a92f2c15 Add Andrew to contributors 2018-08-03 13:00:25 +01:00
Andrew
42959fe9c3 Swap cache-db-path and cache-chunk-path
Have the db path option come first in doc, as the chunk path references db and isn't needed if the preceding (db path) command is used.
2018-08-03 13:00:13 +01:00
Nick Craig-Wood
f72eade707 box: Fix upload of > 2GB files on 32 bit platforms
Before this change the Part structure had an int for the Offset and
uploading large files would produce this error

    json: cannot unmarshal number 2147483648 into Go struct field Part.offset of type int

Changing the field to an int64 fixes the problem.
2018-07-31 10:33:55 +01:00
Nick Craig-Wood
bbda4ab1f1 Add HerrH to contributors 2018-07-30 23:14:33 +01:00
HerrH
916b6e3a40 b2: Use create instead of make in the docs 2018-07-30 23:14:03 +01:00
Fabian Möller
dd670ad1db drive: handle gdocs when filtering file names in list
Fixes #2399
2018-07-30 13:01:16 +01:00
Fabian Möller
7983b6bdca vfs: enable vfs-read-chunk-size by default 2018-07-29 18:17:05 +01:00
Fabian Möller
9815b09d90 fs: add multipliers for SizeSuffix 2018-07-29 18:17:05 +01:00
Fabian Möller
9c90b5e77c stats: use appropriate Lock func's 2018-07-22 11:33:19 +02:00
Nick Craig-Wood
01af8e2905 s3: docs for how to configure Aliyun OSS / Netease NOS - thanks @xiaolei0125 2018-07-20 15:49:07 +01:00
Nick Craig-Wood
f06ba393b8 s3: Add --s3-force-path-style - fixes #2401 2018-07-20 15:41:40 +01:00
Nick Craig-Wood
473e3c3eb8 mount/cmount: implement --daemon-timeout flag for OSXFUSE
By default the timeout is 60s which isn't long enough for long
transactions.  The symptoms are rclone just quitting for no reason.
Supplying the --daemon-timeout flag fixes this causing the kernel to
wait longer for rclone.
2018-07-19 13:26:51 +01:00
Nick Craig-Wood
ab78eb13e4 sync: correct help for --delete-during and --delete-after 2018-07-18 19:30:14 +01:00
Nick Craig-Wood
b1f31c2acf cmd: fix boolean backend flags - fixes #2402
Before this change, boolean flags such as `--b2-hard-delete` were
failing to be recognised unless they had a parameter.

This bug was introduced as part of the config re-organisation:
f3f48d7d49
2018-07-18 15:43:57 +01:00
ishuah
dcc74fa404 move: fix delete-empty-src-dirs flag to delete all empty dirs on move - fixes #2372 2018-07-17 10:34:34 +01:00
Nick Craig-Wood
6759d36e2f vendor: get Gopkg.lock back in sync 2018-07-16 22:02:11 +01:00
Nick Craig-Wood
a4797014c9 local: fix crash when deprecated --local-no-unicode-normalization is supplied 2018-07-16 21:38:34 +01:00
Nick Craig-Wood
4d7d240c12 config: Add advanced section to the config editor 2018-07-16 21:20:47 +01:00
Nick Craig-Wood
d046402d80 config: Make sure Required values are entered 2018-07-16 21:20:47 +01:00
Nick Craig-Wood
9bdf465c10 config: make config wizard understand types and defaults 2018-07-16 21:20:47 +01:00
Nick Craig-Wood
f3f48d7d49 Implement new backend config system
This unifies the 3 methods of reading config

  * command line
  * environment variable
  * config file

And allows them all to be configured in all places.  This is done by
making the []fs.Option in the backend registration be the master
source of what the backend options are.

The backend changes are:

  * Use the new configmap.Mapper parameter
  * Use configstruct to parse it into an Options struct
  * Add all config to []fs.Option including defaults and help
  * Remove all uses of pflag
  * Remove all uses of config.FileGet
2018-07-16 21:20:47 +01:00
Nick Craig-Wood
3c89406886 config: Make fs.ConfigFileGet return an exists flag 2018-07-16 08:50:52 +01:00
Nick Craig-Wood
85d09729f2 fs: factor OptionToEnv and ConfigToEnv into fs 2018-07-16 08:50:52 +01:00
Nick Craig-Wood
b3bd2d1c9e config: add configstruct parser to parse maps into config structures 2018-07-16 08:50:52 +01:00
Nick Craig-Wood
4c586a9264 config: add configmap package to manage config in a generic way 2018-07-16 08:50:52 +01:00
Nick Craig-Wood
1c80e84f8a fs: Implement Scan method for SizeSuffix and Duration 2018-07-16 08:50:52 +01:00
Nick Craig-Wood
028f8a69d3 acd: Make very clear in the docs that rclone has no ACD keys #2385 2018-07-15 14:21:19 +01:00
Nick Craig-Wood
b0d1fa1d6b azblob: fix precedence error on testing for StorageError types 2018-07-15 13:56:52 +01:00
Nick Craig-Wood
dbb4b2c900 fs/config: don't print errors about --config if supplied - fixes #2397
Before this change if the rclone was running in an environment which
couldn't find the HOME directory, it would print a warning about
supplying a --config flag even if the user had done so.
2018-07-15 12:39:11 +01:00
Nick Craig-Wood
99201f8ba4 Add sandeepkru to contributors 2018-07-14 10:50:58 +01:00
sandeepkru
5ad8bcb43a backend/azureblob: Port new Azure Blob Storage SDK #2362
This change includes removing older azureblob storage SDK, and getting
parity to existing code with latest blob storage SDK.
This change is also pre-req for addressing #2091
2018-07-14 10:49:58 +01:00
sandeepkru
6efedc4043 vendor: Port new Azure Blob Storage SDK #2362
Removed references to older sdk and added new version sdk(2018-03-28)
2018-07-14 10:49:58 +01:00
Nick Craig-Wood
a3d9a38f51 fs/fserrors: make sure Cause never returns nil 2018-07-13 10:31:40 +01:00
Yoni Jah
b1bd17a220 onedrive: shared folder support - fixes #1200 2018-07-11 18:48:59 +01:00
Nick Craig-Wood
793f594b07 gcs: fix index out of range error with --fast-list fixes #2388 2018-07-09 17:00:52 +01:00
Nick Craig-Wood
4fe6614ae1 s3: fix index out of range error with --fast-list fixes #2388 2018-07-09 17:00:52 +01:00
Nick Craig-Wood
4c2fbf9b36 Add Jasper Lievisse Adriaanse to contributors 2018-07-08 11:01:56 +01:00
Jasper Lievisse Adriaanse
ed4f1b2936 sftp: fix typo in help text 2018-07-08 11:01:35 +01:00
Nick Craig-Wood
144c1a04d4 fs: Fix parsing of paths under Windows - fixes #2353
Before this copyto would parse windows paths incorrectly.

This change moves the parsing code into fspath and makes sure
fspath.Split calls fspath.Parse which does the parsing correctly for

This also renames fspath.RemoteParse to fspath.Parse for consistency
2018-07-06 23:16:43 +01:00
Nick Craig-Wood
25ec7f5c00 Add Onno Zweers to contributors 2018-07-05 10:05:24 +01:00
Onno Zweers
b15603d5ea webdav: document dCache and Macaroons 2018-07-05 10:04:57 +01:00
Nick Craig-Wood
71c974bf9a azureblob: documentation for authentication methods 2018-07-05 09:39:06 +01:00
Nick Craig-Wood
03c5b8232e Update github.com/Azure/azure-sdk-for-go #2118
This pulls in https://github.com/Azure/azure-sdk-for-go/issues/2119
which fixes the SAS URL support.
2018-07-04 09:25:13 +01:00
Nick Craig-Wood
72392a2d72 azureblob: list the container to see if it exists #2118
This means that SAS URLs which are tied to a single container will work.
2018-07-04 09:23:00 +01:00
Nick Craig-Wood
b062ae9d13 azureblob: add connection string and SAS URL auth - fixes #2118 2018-07-04 09:22:59 +01:00
Nick Craig-Wood
8c0335a176 build: fix for goimports format change
See https://github.com/golang/go/issues/23709
2018-07-03 22:33:15 +01:00
Nick Craig-Wood
794e55de27 mega: wait for events instead of arbitrary sleeping 2018-07-02 14:50:09 +01:00
Nick Craig-Wood
038ed1aaf0 vendor: update github.com/t3rm1n4l/go-mega - fixes #2366
This update fixes files being missing from mega directory listings.
2018-07-02 14:50:09 +01:00
Nick Craig-Wood
97beff5370 build: keep track of compile failures better in cross-compile 2018-07-02 10:09:18 +01:00
Nick Craig-Wood
b9b9bce0db ftp: fix Put mkParentDir failed: 521 for BunnyCDN - fixes #2363
According to RFC 959, error 521 is the correct error return to mean
"dir already exists", so add support for this.
2018-06-30 14:29:47 +01:00
Nick Craig-Wood
947e10eb2b config: fix error reading password from piped input - fixes #1308 2018-06-28 11:54:15 +01:00
Nick Craig-Wood
6b42421374 build: build macOS beta releases with native compiler on travis #2309 2018-06-26 09:39:44 +01:00
Nick Craig-Wood
fa051ff970 webdav: add bearer token (Macaroon) support for dCache - fixes #2360 2018-06-25 17:54:36 +01:00
Nick Craig-Wood
69164b3dda build: move non master beta builds into branch subdirectory 2018-06-25 16:49:04 +01:00
Nick Craig-Wood
935533e57f filter: raise --include and --exclude warning to ERROR so it appears without -v 2018-06-22 22:18:55 +01:00
Nick Craig-Wood
1550f70865 webdav: Don't accept redirects when reading metadata #2350
Go can't redirect PROPFIND requests properly, it changes the method to
GET, so we disable redirects when reading the metadata and assume the
object does not exist if we receive a redirect.

This is to work-around the qnap redirecting requests for directories
without /.
2018-06-18 12:22:13 +01:00
Nick Craig-Wood
1a65c3a740 rest: add NoRedirect flag to Options 2018-06-18 12:21:50 +01:00
Nick Craig-Wood
a29a1de43d webdav: if root ends with / then don't check if it is a file 2018-06-18 12:13:47 +01:00
Nick Craig-Wood
e7ae5e8ee0 webdav: ensure we call MKCOL with a URL with a trailing / #2350
This is an attempt to fix rclone and qnap interop.
2018-06-18 11:16:58 +01:00
Mateusz
56e1e82005 fs: added weekday schedule into --bwlimit - fixes #1822 2018-06-17 18:38:09 +01:00
lewapm
8442498693 backend/drive: add flag for keep revision forever - fixes #1525 2018-06-17 18:34:35 +01:00
Nick Craig-Wood
08021c4636 vendor: update all dependencies 2018-06-17 17:59:12 +01:00
Nick Craig-Wood
3f0789e2db deletefile: fix typo in docs 2018-06-17 16:58:37 +01:00
Nick Craig-Wood
7110349547 Start v1.42-DEV development 2018-06-16 21:25:58 +01:00
Nick Craig-Wood
a9adb43896 Version v1.42 2018-06-16 18:21:09 +01:00
Nick Craig-Wood
c47a4c9703 opendrive: re-read hash when updating objects
Previously this was reading a stale hash from the object leading to
broken integration tests.

This fixes these integration tests TestSyncDoesntUpdateModtime,
TestSyncAfterChangingFilesSizeOnly, TestSyncAfterChangingContentsOnly,
TestSyncWithUpdateOlder, TestSyncUTFNorm.
2018-06-15 14:50:17 +01:00
Nick Craig-Wood
d9d00a7dd7 rcat: remove --checksum flag from the docs as it is not usually effective 2018-06-14 16:15:54 +01:00
Nick Craig-Wood
b82e66daaa Add themylogin to contributors 2018-06-14 16:15:53 +01:00
themylogin
7d2861ead6 Adjust S3 upload concurrency with --s3-upload-concurrency 2018-06-14 16:15:17 +01:00
remusb
aaa8591661 cache: add non cached dirs on notifications - #2155 2018-06-13 23:57:26 +03:00
remusb
4df1794932 cache: fix panic when running without plex configs 2018-06-13 15:06:14 +03:00
Nick Craig-Wood
d18928962c dropbox: make dropbox for business folders accessible #2003
Paths prefixed with / on a dropbox for business plan will now start at
the root instead of the users home directory.
2018-06-13 11:03:34 +01:00
remusb
339fbf0df5 cache: reconnect plex websocket on failures 2018-06-12 22:58:15 +03:00
remusb
13ccb39819 cache: allow root to be expired from rc - #2237 2018-06-12 22:19:03 +03:00
remusb
f9a1a7e700 cache: fix root folder caching 2018-06-10 21:54:20 +03:00
Nick Craig-Wood
1c75581959 sync: fix TestCopyRedownload after ModifyWindow changes #2310 2018-06-10 17:34:00 +01:00
Nick Craig-Wood
4d793b8ee8 drive: remove part of workaround for #1675
Now that https://issuetracker.google.com/issues/64468406 has been
fixed, we can remove part of the workaround which fixed #1675 -
019adc35609c2136

This will make queries marginally more efficient.  We still need the
other part of the workaround since the `=` operator is case
insensitive.
2018-06-10 15:28:32 +01:00
Nick Craig-Wood
9289aead9b drive: Add --drive-acknowledge-abuse to download flagged files - fixes #2317
Also if rclone gets the cannotDownloadAbusiveFile suggest using the
--drive-acknowledge-abuse flag.
2018-06-10 15:25:21 +01:00
Filip Bartodziej
ce109ed9c0 log: password prompt output fixed for unix - partially fixes #2220 2018-06-10 12:57:45 +01:00
Filip Bartodziej
d7ac4ca44e cmd: deletefile command - fixes #2286 2018-06-10 12:49:33 +01:00
Nick Craig-Wood
1053d7e123 local: fix symlink/junction point directory handling under Windows
Before this commit rclone's handling of symlinks and junction points
under Windows was broken.  rclone treated them as files and attempted
to transfer them which gave the error "The handle is invalid".

Ultimately the cause of this was 3e43ff7414 which was a
workaround so files with reparse points (which are a kind of symlink)
would transfer correctly.

The solution implemented is to revert the above commit which will mean
that #614 will break again.  However there is now a work-around (which
will be signaled by rclone) to use the -L flag which wasn't available
when the original commit was made.

Fixes #2336
2018-06-10 12:25:03 +01:00
Nick Craig-Wood
017297af70 s3: Fix --s3-chunk-size which was always using the minimum - fixes #2345 2018-06-10 12:22:30 +01:00
remusb
4e8e5fed7d cache: clean remaining empty folders from temp upload path 2018-06-09 14:52:31 +03:00
remusb
c0f772bc14 cache: update internal tests and small fixes 2018-06-08 23:34:38 +03:00
Nick Craig-Wood
334ef28012 Add Benjamin Joseph Dag to contributors 2018-06-08 16:12:57 +01:00
Benjamin Joseph Dag
da45dadfe9 cmd: added --retries-sleep flag
The --retries-sleep flag can be used to sleep after each retry.
2018-06-08 16:12:24 +01:00
Nick Craig-Wood
05edb5f501 drive: Fix change list polling with team drives - fixes #2330
In the drive v3 conversion we forgot the IncludeTeamDriveItems
parameter when calling the changes API.  Adding it fixes the changes
polling with team drives.
2018-06-07 11:35:55 +01:00
Henning Surmeier
04d18d2a07 oauthutil: Use go template for web response
Every response is formed using the AuthResponseData struct together with
the AuthResponse html template.
2018-06-06 09:54:21 +01:00
Henning Surmeier
f1269dc06a onedrive: errorHandler for business requests
This implementation hopefully can handle all error requests from the
onedrive for business authentication.
I have only tested it with the "domain in unmanaged state" error.
2018-06-06 09:54:21 +01:00
Henning Surmeier
c5286ee157 oauthutil: support backend-specific errorHandler
This allows the backend to pass a errorHandler function to the doConfig
function. The webserver will pass the current request as a parameter to
the function.
The function can then examine all paramters and build the AuthError
struct which contains name, code, description of the error. A link to
the docs can be added to the HelpURL field.
oauthutil then takes care of formatting for the HTML response page. The
error details are also returned as an error in the server.err channel
and will be logged to the commandline.
2018-06-06 09:54:21 +01:00
Nick Craig-Wood
ba43acb6aa sync: fix TestCopyEmptyDirectories after ModifyWindow changes #2310 2018-06-04 21:41:25 +01:00
remusb
8a84975993 cache: cache lists using batch writes 2018-06-04 21:04:45 +03:00
ishuah
d758e1908e copy: create (pseudo copy) empty source directories to destination - fixes #1837 2018-06-04 11:01:14 +01:00
ishuah
737aed8412 Ensure items in srcEmptyDirs are actually empty 2018-06-04 11:01:14 +01:00
Stefan
4009fb67c8 fs: calculate ModifyWindow each time on the fly instead of relying on global state - see #2319, #2328 2018-06-03 20:45:34 +02:00
Nick Craig-Wood
3ef938ebde lsf: add --absolute flag to add a leading / onto path names 2018-06-03 10:42:34 +01:00
Nick Craig-Wood
5302e5f9b1 docs: add a note about SIGINFO for macOS 2018-06-02 17:38:05 +01:00
kubatasiemski
de8c7d8e45 cmd: add siginfo handler 2018-06-02 17:35:13 +01:00
Henning Surmeier
2a29f7f6c8 onedrive: Add troubleshooting to docs 2018-06-02 17:10:58 +01:00
Nick Craig-Wood
2b332bced2 Add Kasper Byrdal Nielsen to contributors 2018-05-31 09:42:36 +01:00
Kasper Byrdal Nielsen
aad75e6720 check: Add one-way argument
--one-way argument will check that all files on source matches the files on detination,
but not the other way. For example files present on destination but not on source will not
trigger an error.

Fixes: #1526
2018-05-31 09:42:16 +01:00
Stefan
2a806a8d8b mount: only print "File.rename error" if there actually is an error - see #2130 (#2322) 2018-05-29 19:19:17 +02:00
Nick Craig-Wood
500085d244 vendor: update github.com/dropbox/dropbox-sdk-go-unofficial #2158 2018-05-29 15:56:40 +01:00
Nick Craig-Wood
3d8e529441 rc: return error from remote on failure 2018-05-29 10:48:01 +01:00
Stefan
6607d8752c mountlib: add testcase to ensure the ModifyWindow is calculated on Mount (see #2002) (#2319) 2018-05-28 17:49:26 +02:00
Stefan
67e9ef4547 mount: delay rename if file has open writers instead of failing outright - fixes #2130 (#2249) 2018-05-24 20:45:11 +02:00
Nick Craig-Wood
d4213c0ac5 sftp: Fix slow downloads for long latency connections - fixes #1158
This was caused by using the sftp.File.Read method which resets the
streaming window after each call.  Replacing it with sftp.File.WriteTo
and an io.Pipe fixes the problem bringing the speed to the same as the
sftp binary.
2018-05-24 15:10:28 +01:00
Nick Craig-Wood
3a2248aa5f rc: add core/gc to run a garbage collection on demand 2018-05-24 15:10:28 +01:00
Nick Craig-Wood
573ef4c8ee rc: enable go profiling by default on the --rc port
This means you can use the pprof tool on a running rclone, eg

    go tool pprof http://localhost:5572/debug/pprof/heap
2018-05-24 15:10:28 +01:00
Nick Craig-Wood
7bf2d389a8 Add John Clayton to contributors 2018-05-22 11:48:20 +01:00
John Clayton
71b4f1ccab cache: use secure websockets for HTTPS Plex addresses 2018-05-22 11:47:57 +01:00
Nick Craig-Wood
e5ff375948 Use config.FileGet instead of fs.ConfigFileGet 2018-05-22 09:43:24 +01:00
Nick Craig-Wood
512f4b4487 Update error checking on fmt.Fprint* after errcheck update
Now we need to check or ignore errors on fmt.Fprint* explicitly -
previously errcheck just ignored them for us.
2018-05-22 09:41:13 +01:00
Nick Craig-Wood
a38f8b87ce docs: fix Nextcloud typo spotted by Eugene Mlodik 2018-05-16 16:43:52 +01:00
Nick Craig-Wood
9697754707 drive: Don't attempt to choose Team Drives when using rclone config create 2018-05-16 09:10:09 +01:00
Nick Craig-Wood
8e625e0bc3 config: add ConfirmWithDefault to change the default on AutoConfig 2018-05-16 09:09:41 +01:00
Nick Craig-Wood
e52ecba295 crypt: check the crypted hash of files when uploading #2303
This checks the checksum of the streamed encrypted data against the
checksum of the encrypted object returned from the remote and returns
an error if it is different.
2018-05-15 14:50:36 +01:00
Nick Craig-Wood
e62d2fd309 oauthutil: Fix custom redirect URL message - fixes #2306 2018-05-13 17:28:09 +01:00
Nick Craig-Wood
e56be0dfd8 lsf: Add --csv flag for compliant CSV output 2018-05-13 12:18:21 +01:00
Nick Craig-Wood
2a32e2d838 operations: turn ListFormatted into a Format method on ListFormat 2018-05-13 12:17:55 +01:00
Nick Craig-Wood
db4c206e0e lsjson: add MimeType to the output 2018-05-13 12:17:55 +01:00
Nick Craig-Wood
f77efc7649 lsf: Add 'm' format specifier to show the MimeType 2018-05-13 12:17:55 +01:00
Nick Craig-Wood
aadbcce486 fs: Add MimeTypeDirEntry to return the MimeType of a DirEntry 2018-05-13 12:17:55 +01:00
Nick Craig-Wood
f162116132 lsjson: add ID field to output to show Object ID - fixes #1901 2018-05-13 12:17:55 +01:00
Nick Craig-Wood
909c3a92d6 lsf: implement 'i' format for showing object ID - fixes #1476 2018-05-13 12:17:55 +01:00
Nick Craig-Wood
826975c341 fs: add Optional ID() method to Object and implement it in backends
ID() shows the internal ID of the Object if available.
2018-05-13 12:17:55 +01:00
Fabian Möller
6791cf7d7f atexit: prevent Run from being called on nil signal 2018-05-12 18:59:25 +02:00
Fabian Möller
d022c81d99 mount: ensure atexit gets run on interrupt
When running `rclone mount`, there were 2 signal handlers for `os.Interrupt`.

Those handlers would run concurrently and in some cases cause either unmount or `atexit.Run()` being skipped.

In addition `atexit.Run()` will get called in `resolveExitCode` to ensure cleanup on errors.
2018-05-12 10:40:44 +01:00
Nick Craig-Wood
cdde8fa75a opendrive: finish off #1026
* Fix errcheck and golint warnings
  * Remove unused constants and fix comments
  * Parse error responses properly
  * Fix Open with RangeOption
  * Fix Move, Copy and DirMove
  * Implement DirCacheFlush
  * Check interfaces are correct
  * Remove debugs and update overview
  * Correct feature flags
  * Pare replacement characters down to the minimum set
  * Add to the integration tests
2018-05-12 10:10:46 +01:00
Nick Craig-Wood
5ede6f6d09 Add Jakub Karlicek to contributors 2018-05-12 10:10:45 +01:00
Jakub Karlicek
53292527bb opendrive: fill out the functionality #1026
* Add Mkdir, Rmdir, Purge, Delete, SetModTime, Copy, Move, DirMove
 * Update file size after upload
 * Add Open seek
 * Set private permission for new folder and uploaded file
 * Add docs
 * Update List function
 * Fix UserSessionInfo struct
 * Fix socket leaks
 * Don’t close resp.Body in Open method
 * Get hash when listing files
2018-05-12 10:07:25 +01:00
Oliver Heyme
ec9894da07 opendrive: initial parts with download and upload working #1026 2018-05-12 10:07:16 +01:00
Nick Craig-Wood
ad02d1be3f fstest: update comments on how to run individual tests 2018-05-11 14:04:36 +01:00
Nick Craig-Wood
63f413f477 webdav: show all available information when printing errors 2018-05-11 08:43:53 +01:00
Nick Craig-Wood
f1ffe8e309 fstests: fix test crash if NewFs fails 2018-05-11 08:43:53 +01:00
Nick Craig-Wood
d85b9bc9d6 webdav: workarounds for biz.mail.ru
* Add "Depth: 1" on read metadata PROPFIND call
  * Accept 406 to mean directory already exists
2018-05-11 08:43:53 +01:00
Nick Craig-Wood
b07e51cf73 webdav: read the body of messages into the error if XML parse fails 2018-05-11 08:43:53 +01:00
Nick Craig-Wood
f073db81b1 drive: add --drive-alternate-export to fix large doc export - fixes #2243
The official drive APIs seem to have trouble downloading large
documents sometimes.

This commit adds a --drive-alternate-export flag to use an different,
unofficial set of export URLS which seem to download large files OK.
2018-05-10 10:04:39 +01:00
Nick Craig-Wood
9698a2babb gcs: low level retry all operations if necessary
Google cloud storage doesn't normally need retries, however certain
things (eg bucket creation and removal) are rate limited and do
generate 429 errors.

Before this change the integration tests would regularly blow up with
errors from GCS rate limiting bucket creation and removal.

After this change we low level retry all operations using the same
exponential backoff strategy as used in the google drive backend.
2018-05-10 09:24:09 +01:00
Nick Craig-Wood
5eecbd83ee bin: make make_test_files.go work properly on Windows 2018-05-09 16:59:29 +01:00
Nick Craig-Wood
e42edc8e8c copy, move: Copy single files directly, don't use --files-from work-around
Before this change rclone would inefficiently and confusingly read all
the files in the source directory when copy or moving a single file.
This caused confusion for the users to see log messages about files
which weren't part of the sync.

After the change the copy and move commands use the new infrastructure
made for the copyto and moveto command for single file copy and move.
2018-05-07 20:39:52 +01:00
Nick Craig-Wood
291954baba cmd: make names of argument parsing functions more consistent 2018-05-07 20:39:52 +01:00
Nick Craig-Wood
9d8d7ae1f0 mount,cmount: make --noappledouble --noapplexattr and change defaults #2287
Before this change we would unconditionally set the OSXFUSE options
noappledouble and noapplexattr.

However the noapplexattr options caused problems with copies in the
Finder.

Now the default for noapplexattr is false so we don't add the option
by default and the user can override the defaults using the
--noappledouble and --noapplexattr flags.
2018-05-07 20:37:09 +01:00
Nick Craig-Wood
6ce32e4661 mount,cmount: Add --volname flag and remove special chars from it #2287
Before this change rclone would set the volume name from the
remote:path normally.  However this has `:` and `/` in which make it
difficult to use in macOS.

Now rclone will remove the special characters and replace them with
spaces.  It also allows the volume name to be set with the --volname
flag.
2018-05-07 20:37:09 +01:00
Nick Craig-Wood
1755ffd1f3 mount: make Get/List/Set/Remove xattr return ENOSYS #2287
By default bazil fuse will return ENOTSUPP for these.  However if we
return ENOSYS then OSXFUSE (at least) will never call them again
saving round trips though fuse.
2018-05-07 20:37:09 +01:00
Nick Craig-Wood
aa5c5ec5d3 build: mask linter errors we can't fix 2018-05-05 17:32:41 +01:00
Nick Craig-Wood
e80ae4e09c build: remove unused struct fields spotted by structcheck 2018-05-05 17:32:41 +01:00
Nick Craig-Wood
1320e84bc2 build: remove unused code spotted by the deadcode linter 2018-05-05 17:32:41 +01:00
Nick Craig-Wood
cb5bd47e61 build: fix errors spotted by ineffassign linter
These were mostly caused by shadowing err and a good fraction of them
will have caused errors not to be propagated properly.
2018-05-05 17:32:41 +01:00
Nick Craig-Wood
790a8a9aed build: add gometalinter and gometalinter_install Makefile targets 2018-05-05 17:32:41 +01:00
Nick Craig-Wood
f1a43eca4d mount: make --daemon work for macOS without CGO 2018-05-05 16:23:47 +01:00
Nick Craig-Wood
7ea68f1fc6 sftp: require go1.9+ after golang.org/x/crypto/ssh update 2018-05-05 16:23:47 +01:00
Nick Craig-Wood
6427029c4e vendor: update all dependencies
* Update all dependencies
  * Remove all `[[constraint]]` from Gopkg.toml
  * Add in the minimum number of `[[override]]` to build
  * Remove go get of github.com/inconshreveable/mousetrap as it is vendored
  * Update docs with new policy on constraints
2018-05-05 15:52:24 +01:00
Nick Craig-Wood
21383877df cmd: make exit code 8 for --max-transfer exceeded 2018-05-05 12:58:28 +01:00
Nick Craig-Wood
f95835d613 fserrors: Look deeper into errors for Fatal/Retry/NoRetry errors.
Before this change fatal errors which were wrapped in a system error (eg a
URLError) were not recognised as fatal errors.
2018-05-05 12:58:28 +01:00
Nick Craig-Wood
be79b47a7a sync: log when we abandon the sync due to a fatal error 2018-05-05 12:58:28 +01:00
Nick Craig-Wood
be22735609 fs/accounting: fix deadlock on GetBytes
A deadlock could occur since we have now put a mutex on GetBytes from
StatsInfo.String (s.mu) - progress (acc.statmu) and read (acc.statmu)
- GetBytes (s.mu).

Fix this by giving stringSet its own locking and excluding the call
which caused the deadlock from the mutex in StatsInfo.String.
2018-05-05 12:58:28 +01:00
Nick Craig-Wood
1b1b3c13cd sync: add a test for aborting on max upload 2018-05-05 12:58:28 +01:00
Nick Craig-Wood
5c128272fd Implement --max-transfer flag to quit transferring at a limit #1655 2018-05-05 12:58:28 +01:00
Nick Craig-Wood
d178233e74 sync,march: check the cancel context on every channel send and receive
This fixes a deadlock on sync when all the copying channels receive a
Fatal Error.
2018-05-05 12:58:28 +01:00
Fabian Möller
98bf65c43b vfs: fix ChangeNotify for new or changed folders
Fixes #2251
2018-05-05 12:54:03 +01:00
Fabian Möller
3b5e70c8c6 drive: fix ChangeNotify for folders 2018-05-05 12:54:03 +01:00
Fabian Möller
bd3ad1ac3e vfs: add option to read source files in chunks 2018-05-05 12:49:42 +01:00
Fabian Möller
9fdf273614 fs: improve ChunkedReader
- make Close permanent and return errors afterwards
- use RangeSeek from the wrapped reader if present
- add a limit to chunk growth
- correct RangeSeek interface behavior
- add tests
2018-05-05 12:49:42 +01:00
Nick Craig-Wood
fe25cb9c54 drive: fix about (and df on a mount) for team drives - fixes #2288
Before this fix team drives would return the drive quota which is
incorrect and mis-leading.

Team drives don't appear to have an API for reading the bytes used or
the quota so we now return that the quota and usage are unknown.
2018-05-03 08:59:14 +01:00
Nick Craig-Wood
f2608e2a64 Add NoLooseEnds to contributors 2018-05-01 09:43:18 +01:00
NoLooseEnds
a5f1811892 cmd: Fixed a typo – minimum 2018-05-01 09:42:21 +01:00
Nick Craig-Wood
50dc5fe92e Add Rodrigo to contributors 2018-04-30 17:37:43 +01:00
Rodrigo
b7d2048032 WebDAV: Ignore Reason-Phrase in status line #2281 2018-04-30 17:36:38 +01:00
Nick Craig-Wood
3116249692 make sign_upload: only sign the v1.xx releases not the current ones 2018-04-30 17:29:50 +01:00
Nick Craig-Wood
d049e5c680 make build_dep: make sure we update the whole command for nfpm 2018-04-30 17:29:50 +01:00
Nick Craig-Wood
1c9572aba1 Add Piotr Oleszczyk to contributors 2018-04-30 17:29:50 +01:00
Piotr Oleszczyk
76f2cbeb94 sftp: Add --ssh-path-override flag #1474
The flag allows calculation of checksums on systems using
different paths for SSH and SFTP, like synology NAS boxes.
2018-04-30 17:05:10 +01:00
Nick Craig-Wood
0479c7dcf5 add github-release to make release_dep 2018-04-28 12:38:30 +01:00
Nick Craig-Wood
55674c0bfc Start v1.41-DEV development 2018-04-28 12:37:55 +01:00
9816 changed files with 200120 additions and 6581754 deletions

View File

@@ -4,6 +4,9 @@ os: Windows Server 2012 R2
clone_folder: c:\gopath\src\github.com\ncw\rclone
cache:
- '%LocalAppData%\go-build'
environment:
GOPATH: C:\gopath
CPATH: C:\Program Files (x86)\WinFsp\inc\fuse
@@ -43,4 +46,4 @@ artifacts:
- path: build/*-v*.zip
deploy_script:
- IF "%APPVEYOR_PULL_REQUEST_NUMBER%" == "" make appveyor_upload
- IF "%APPVEYOR_REPO_NAME%" == "ncw/rclone" IF "%APPVEYOR_PULL_REQUEST_NUMBER%" == "" make appveyor_upload

View File

@@ -1,3 +1,4 @@
---
version: 2
jobs:
@@ -13,10 +14,10 @@ jobs:
- run:
name: Cross-compile rclone
command: |
docker pull billziss/xgo-cgofuse
docker pull rclone/xgo-cgofuse
go get -v github.com/karalabe/xgo
xgo \
--image=billziss/xgo-cgofuse \
--image=rclone/xgo-cgofuse \
--targets=darwin/386,darwin/amd64,linux/386,linux/amd64,windows/386,windows/amd64 \
-tags cmount \
.
@@ -29,6 +30,21 @@ jobs:
command: |
mkdir -p /tmp/rclone.dist
cp -R rclone-* /tmp/rclone.dist
mkdir build
cp -R rclone-* build/
- run:
name: Build rclone
command: |
go version
go build
- run:
name: Upload artifacts
command: |
if [[ $CIRCLE_PULL_REQUEST != "" ]]; then
make circleci_upload
fi
- store_artifacts:
path: /tmp/rclone.dist

31
.github/ISSUE_TEMPLATE.md vendored Normal file
View File

@@ -0,0 +1,31 @@
<!--
Welcome :-) We understand you are having a problem with rclone; we want to help you with that!
If you've just got a question or aren't sure if you've found a bug then please use the rclone forum:
https://forum.rclone.org/
instead of filing an issue for a quick response.
If you are reporting a bug or asking for a new feature then please use one of the templates here:
https://github.com/ncw/rclone/issues/new
otherwise fill in the form below.
Thank you
The Rclone Developers
-->
#### Output of `rclone version`
#### Describe the issue

50
.github/ISSUE_TEMPLATE/Bug.md vendored Normal file
View File

@@ -0,0 +1,50 @@
---
name: Bug report
about: Report a problem with rclone
---
<!--
Welcome :-) We understand you are having a problem with rclone; we want to help you with that!
If you've just got a question or aren't sure if you've found a bug then please use the rclone forum:
https://forum.rclone.org/
instead of filing an issue for a quick response.
If you think you might have found a bug, please can you try to replicate it with the latest beta?
https://beta.rclone.org/
If you can still replicate it with the latest beta, then please fill in the info below which makes our lives much easier. A log with -vv will make our day :-)
Thank you
The Rclone Developers
-->
#### What is the problem you are having with rclone?
#### What is your rclone version (output from `rclone version`)
#### Which OS you are using and how many bits (eg Windows 7, 64 bit)
#### Which cloud storage system are you using? (eg Google Drive)
#### The command you were trying to run (eg `rclone copy /tmp remote:tmp`)
#### A log from the command with the `-vv` flag (eg output from `rclone -vv copy /tmp remote:tmp`)

36
.github/ISSUE_TEMPLATE/Feature.md vendored Normal file
View File

@@ -0,0 +1,36 @@
---
name: Feature request
about: Suggest a new feature or enhancement for rclone
---
<!--
Welcome :-)
So you've got an idea to improve rclone? We love that! You'll be glad to hear we've incorporated hundreds of ideas from contributors already.
Here is a checklist of things to do:
1. Please search the old issues first for your idea and +1 or comment on an existing issue if possible.
2. Discuss on the forum first: https://forum.rclone.org/
3. Make a feature request issue (this is the right place!).
4. Be prepared to get involved making the feature :-)
Looking forward to your great idea!
The Rclone Developers
-->
#### What is your current rclone version (output from `rclone version`)?
#### What problem are you are trying to solve?
#### How do you think rclone should be changed to solve that?

29
.github/PULL_REQUEST_TEMPLATE.md vendored Normal file
View File

@@ -0,0 +1,29 @@
<!--
Thank you very much for contributing code or documentation to rclone! Please
fill out the following questions to make it easier for us to review your
changes.
You do not need to check all the boxes below all at once, feel free to take
your time and add more commits. If you're done and ready for review, please
check the last box.
-->
#### What is the purpose of this change?
<!--
Describe the changes here
-->
#### Was the change discussed in an issue or in the forum before?
<!--
Link issues and relevant forum posts here.
-->
#### Checklist
- [ ] I have read the [contribution guidelines](https://github.com/ncw/rclone/blob/master/CONTRIBUTING.md#submitting-a-pull-request).
- [ ] I have added tests for all changes in this PR if appropriate.
- [ ] I have added documentation for the changes if appropriate.
- [ ] All commit messages are in [house style](https://github.com/ncw/rclone/blob/master/CONTRIBUTING.md#commit-messages).
- [ ] I'm done, this Pull Request is ready for review :-)

14
.gometalinter.json Normal file
View File

@@ -0,0 +1,14 @@
{
"Enable": [
"deadcode",
"errcheck",
"goimports",
"golint",
"ineffassign",
"structcheck",
"varcheck",
"vet"
],
"EnableGC": true,
"Vendor": true
}

View File

@@ -4,11 +4,12 @@ dist: trusty
os:
- linux
go:
- 1.7.6
- 1.8.7
- 1.9.3
- "1.10.1"
- 1.8.x
- 1.9.x
- 1.10.x
- 1.11.x
- tip
go_import_path: github.com/ncw/rclone
before_install:
- if [[ $TRAVIS_OS_NAME == linux ]]; then sudo modprobe fuse ; sudo chmod 666 /dev/fuse ; sudo chown root:$USER /etc/fuse.conf ; fi
- if [[ $TRAVIS_OS_NAME == osx ]]; then brew update && brew tap caskroom/cask && brew cask install osxfuse ; fi
@@ -33,18 +34,25 @@ addons:
- libfuse-dev
- rpm
- pkg-config
cache:
directories:
- $HOME/.cache/go-build
matrix:
allow_failures:
- go: tip
include:
- os: osx
go: "1.10.1"
go: 1.11.x
env: GOTAGS=""
cache:
directories:
- $HOME/Library/Caches/go-build
deploy:
provider: script
script: make travis_beta
skip_cleanup: true
on:
repo: ncw/rclone
all_branches: true
go: "1.10.1"
condition: $TRAVIS_OS_NAME == linux && $TRAVIS_PULL_REQUEST == false
go: 1.11.x
condition: $TRAVIS_PULL_REQUEST == false

View File

@@ -21,19 +21,19 @@ with the [latest beta of rclone](https://beta.rclone.org/):
## Submitting a pull request ##
If you find a bug that you'd like to fix, or a new feature that you'd
like to implement then please submit a pull request via Github.
like to implement then please submit a pull request via GitHub.
If it is a big feature then make an issue first so it can be discussed.
You'll need a Go environment set up with GOPATH set. See [the Go
getting started docs](https://golang.org/doc/install) for more info.
First in your web browser press the fork button on [rclone's Github
First in your web browser press the fork button on [rclone's GitHub
page](https://github.com/ncw/rclone).
Now in your terminal
go get github.com/ncw/rclone
go get -u github.com/ncw/rclone
cd $GOPATH/src/github.com/ncw/rclone
git remote rename origin upstream
git remote add origin git@github.com:YOURUSER/rclone.git
@@ -64,22 +64,31 @@ packages which you can install with
Make sure you
* Add documentation for a new feature (see below for where)
* Add unit tests for a new feature
* Add [documentation](#writing-documentation) for a new feature.
* Follow the [commit message guidelines](#commit-messages).
* Add [unit tests](#testing) for a new feature
* squash commits down to one per feature
* rebase to master `git rebase master`
* rebase to master with `git rebase master`
When you are done with that
git push origin my-new-feature
git push origin my-new-feature
Go to the Github website and click [Create pull
Go to the GitHub website and click [Create pull
request](https://help.github.com/articles/creating-a-pull-request/).
You patch will get reviewed and you might get asked to fix some stuff.
If so, then make the changes in the same branch, squash the commits,
rebase it to master then push it to Github with `--force`.
rebase it to master then push it to GitHub with `--force`.
## Enabling CI for your fork ##
The CI config files for rclone have taken care of forks of the project, so you can enable CI for your fork repo easily.
rclone currently uses [Travis CI](https://travis-ci.org/), [AppVeyor](https://ci.appveyor.com/), and
[Circle CI](https://circleci.com/) to build the project. To enable them for your fork, simply go into their
websites, find your fork of rclone, and enable building there.
## Testing ##
@@ -114,6 +123,13 @@ but they can be run against any of the remotes.
cd fs/operations
go test -v -remote TestDrive:
If you want to use the integration test framework to run these tests
all together with an HTML report and test retries then from the
project root:
go install github.com/ncw/rclone/fstest/test_all
test_all -backend drive
If you want to run all the integration tests against all the remotes,
then change into the project root and run
@@ -166,17 +182,21 @@ with modules beneath.
* pacer - retries with backoff and paces operations
* readers - a selection of useful io.Readers
* rest - a thin abstraction over net/http for REST
* vendor - 3rd party code managed by the dep tool
* vendor - 3rd party code managed by `go mod`
* vfs - Virtual FileSystem layer for implementing rclone mount and similar
## Writing Documentation ##
If you are adding a new feature then please update the documentation.
If you add a new flag, then if it is a general flag, document it in
If you add a new general flag (not for a backend), then document it in
`docs/content/docs.md` - the flags there are supposed to be in
alphabetical order. If it is a remote specific flag, then document it
in `docs/content/remote.md`.
alphabetical order.
If you add a new backend option/flag, then it should be documented in
the source file in the `Help:` field. The first line of this is used
for the flag help, the remainder is shown to the user in `rclone
config` and is added to the docs with `make backenddocs`.
The only documentation you need to edit are the `docs/content/*.md`
files. The MANUAL.*, rclone.1, web site etc are all auto generated
@@ -195,14 +215,20 @@ file.
## Commit messages ##
Please make the first line of your commit message a summary of the
change, and prefix it with the directory of the change followed by a
colon. The changelog gets made by looking at just these first lines
so make it good!
change that a user (not a developer) of rclone would like to read, and
prefix it with the directory of the change followed by a colon. The
changelog gets made by looking at just these first lines so make it
good!
If you have more to say about the commit, then enter a blank line and
carry on the description. Remember to say why the change was needed -
the commit itself shows what was changed.
Writing more is better than less. Comparing the behaviour before the
change to that after the change is very useful. Imagine you are
writing to yourself in 12 months time when you've forgotten everything
about what you just did and you need to get up to speed quickly.
If the change fixes an issue then write `Fixes #1234` in the commit
message. This can be on the subject line if it will fit. If you
don't want to close the associated issue just put `#1234` and the
@@ -229,37 +255,53 @@ Fixes #1498
## Adding a dependency ##
rclone uses the [dep](https://github.com/golang/dep) tool to manage
its dependencies. All code that rclone needs for building is stored
in the `vendor` directory for perfectly reproducable builds.
rclone uses the [go
modules](https://tip.golang.org/cmd/go/#hdr-Modules__module_versions__and_more)
support in go1.11 and later to manage its dependencies.
The `vendor` directory is entirely managed by the `dep` tool.
**NB** you must be using go1.11 or above to add a dependency to
rclone. Rclone will still build with older versions of go, but we use
the `go mod` command for dependencies which is only in go1.11 and
above.
To add a new dependency
rclone can be built with modules outside of the GOPATH, but for
backwards compatibility with older go versions, rclone also maintains
a `vendor` directory with all the external code rclone needs for
building.
dep ensure -add github.com/pkg/errors
The `vendor` directory is entirely managed by the `go mod` tool, do
not add things manually.
You can add constraints on that package (see the `dep` documentation),
but don't unless you really need to.
To add a dependency `github.com/ncw/new_dependency` see the
instructions below. These will fetch the dependency, add it to
`go.mod` and `go.sum` and vendor it for older go versions.
Please check in the changes generated by dep including the `vendor`
directory and `Godep.toml` and `Godep.locl` in a single commit
separate from any other code changes. Watch out for new files in
`vendor`.
GO111MODULE=on go get github.com/ncw/new_dependency
GO111MODULE=on go mod vendor
You can add constraints on that package when doing `go get` (see the
go docs linked above), but don't unless you really need to.
Please check in the changes generated by `go mod` including the
`vendor` directory and `go.mod` and `go.sum` in a single commit
separate from any other code changes with the title "vendor: add
github.com/ncw/new_dependency". Remember to `git add` any new files
in `vendor`.
## Updating a dependency ##
If you need to update a dependency then run
dep ensure -update github.com/pkg/errors
GO111MODULE=on go get -u github.com/pkg/errors
GO111MODULE=on go mod vendor
Check in in a single commit as above.
## Updating all the dependencies ##
In order to update all the dependencies then run `make update`. This
just runs `dep ensure -update`. Check in the changes in a single
commit as above.
just uses the go modules to update all the modules to their latest
stable release. Check in the changes in a single commit as above.
This should be done early in the release cycle to pick up new versions
of packages in time for them to get some testing.
@@ -308,7 +350,13 @@ Unit tests
Integration tests
* Add your fs to `fstest/test_all/test_all.go`
* Add your backend to `fstest/test_all/config.yaml`
* Once you've done that then you can use the integration test framework from the project root:
* go install ./...
* test_all -backend remote
Or if you want to run the integration tests manually:
* Make sure integration tests pass with
* `cd fs/operations`
* `go test -v -remote TestRemote:`
@@ -323,11 +371,10 @@ See the [testing](#testing) section for more information on integration tests.
Add your fs to the docs - you'll need to pick an icon for it from [fontawesome](http://fontawesome.io/icons/). Keep lists of remotes in alphabetical order but with the local file system last.
* `README.md` - main Github page
* `docs/content/remote.md` - main docs page
* `README.md` - main GitHub page
* `docs/content/remote.md` - main docs page (note the backend options are automatically added to this file with `make backenddocs`)
* `docs/content/overview.md` - overview docs
* `docs/content/docs.md` - list of remotes in config section
* `docs/content/about.md` - front page of rclone.org
* `docs/layouts/chrome/navbar.html` - add it to the website navigation
* `bin/make_manual.py` - add the page to the `docs` constant
* `cmd/cmd.go` - the main help for rclone

486
Gopkg.lock generated
View File

@@ -1,486 +0,0 @@
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
[[projects]]
branch = "master"
name = "bazil.org/fuse"
packages = [
".",
"fs",
"fuseutil"
]
revision = "371fbbdaa8987b715bdd21d6adc4c9b20155f748"
[[projects]]
name = "cloud.google.com/go"
packages = ["compute/metadata"]
revision = "20d4028b8a750c2aca76bf9fefa8ed2d0109b573"
version = "v0.19.0"
[[projects]]
name = "github.com/Azure/azure-sdk-for-go"
packages = [
"storage",
"version"
]
revision = "e67cd39e942c417ae5e9ae1165f778d9fe8996e0"
version = "v14.5.0"
[[projects]]
name = "github.com/Azure/go-autorest"
packages = [
"autorest",
"autorest/adal",
"autorest/azure",
"autorest/date"
]
revision = "0ae36a9e544696de46fdadb7b0d5fb38af48c063"
version = "v10.2.0"
[[projects]]
branch = "master"
name = "github.com/Unknwon/goconfig"
packages = ["."]
revision = "ef1e4c783f8f0478bd8bff0edb3dd0bade552599"
[[projects]]
branch = "master"
name = "github.com/VividCortex/ewma"
packages = ["."]
revision = "43880d236f695d39c62cf7aa4ebd4508c258e6c0"
[[projects]]
branch = "master"
name = "github.com/a8m/tree"
packages = ["."]
revision = "cf42b1e486f0b025942a768a9ad59c9939d6ca40"
[[projects]]
name = "github.com/abbot/go-http-auth"
packages = ["."]
revision = "0ddd408d5d60ea76e320503cc7dd091992dee608"
version = "v0.4.0"
[[projects]]
branch = "master"
name = "github.com/aws/aws-sdk-go"
packages = [
"aws",
"aws/awserr",
"aws/awsutil",
"aws/client",
"aws/client/metadata",
"aws/corehandlers",
"aws/credentials",
"aws/credentials/ec2rolecreds",
"aws/credentials/endpointcreds",
"aws/credentials/stscreds",
"aws/defaults",
"aws/ec2metadata",
"aws/endpoints",
"aws/request",
"aws/session",
"aws/signer/v4",
"internal/sdkio",
"internal/sdkrand",
"internal/shareddefaults",
"private/protocol",
"private/protocol/query",
"private/protocol/query/queryutil",
"private/protocol/rest",
"private/protocol/restxml",
"private/protocol/xml/xmlutil",
"service/s3",
"service/s3/s3iface",
"service/s3/s3manager",
"service/sts"
]
revision = "12fe7d35f8ad5f7f2715d414624b0723737de1f7"
[[projects]]
name = "github.com/billziss-gh/cgofuse"
packages = ["fuse"]
revision = "487e2baa5611bab252a906d7f9b869f944607305"
version = "v1.0.4"
[[projects]]
branch = "master"
name = "github.com/coreos/bbolt"
packages = ["."]
revision = "af9db2027c98c61ecd8e17caa5bd265792b9b9a2"
[[projects]]
name = "github.com/cpuguy83/go-md2man"
packages = ["md2man"]
revision = "20f5889cbdc3c73dbd2862796665e7c465ade7d1"
version = "v1.0.8"
[[projects]]
name = "github.com/davecgh/go-spew"
packages = ["spew"]
revision = "346938d642f2ec3594ed81d874461961cd0faa76"
version = "v1.1.0"
[[projects]]
name = "github.com/dgrijalva/jwt-go"
packages = ["."]
revision = "06ea1031745cb8b3dab3f6a236daf2b0aa468b7e"
version = "v3.2.0"
[[projects]]
name = "github.com/djherbis/times"
packages = ["."]
revision = "95292e44976d1217cf3611dc7c8d9466877d3ed5"
version = "v1.0.1"
[[projects]]
branch = "master"
name = "github.com/dropbox/dropbox-sdk-go-unofficial"
packages = [
"dropbox",
"dropbox/async",
"dropbox/common",
"dropbox/file_properties",
"dropbox/files",
"dropbox/sharing",
"dropbox/team_common",
"dropbox/team_policies",
"dropbox/users",
"dropbox/users_common"
]
revision = "f0b3f3ded6d415a94e83e9a514fb8025e4e6be31"
[[projects]]
name = "github.com/go-ini/ini"
packages = ["."]
revision = "6333e38ac20b8949a8dd68baa3650f4dee8f39f0"
version = "v1.33.0"
[[projects]]
name = "github.com/golang/protobuf"
packages = ["proto"]
revision = "925541529c1fa6821df4e44ce2723319eb2be768"
version = "v1.0.0"
[[projects]]
branch = "master"
name = "github.com/google/go-querystring"
packages = ["query"]
revision = "53e6ce116135b80d037921a7fdd5138cf32d7a8a"
[[projects]]
name = "github.com/inconshreveable/mousetrap"
packages = ["."]
revision = "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75"
version = "v1.0"
[[projects]]
branch = "master"
name = "github.com/jlaffaye/ftp"
packages = ["."]
revision = "427467931c6fbc25acba4537c9d3cbc40cfa569b"
[[projects]]
name = "github.com/jmespath/go-jmespath"
packages = ["."]
revision = "0b12d6b5"
[[projects]]
branch = "master"
name = "github.com/kardianos/osext"
packages = ["."]
revision = "ae77be60afb1dcacde03767a8c37337fad28ac14"
[[projects]]
branch = "master"
name = "github.com/kr/fs"
packages = ["."]
revision = "2788f0dbd16903de03cb8186e5c7d97b69ad387b"
[[projects]]
name = "github.com/marstr/guid"
packages = ["."]
revision = "8bd9a64bf37eb297b492a4101fb28e80ac0b290f"
version = "v1.1.0"
[[projects]]
name = "github.com/mattn/go-runewidth"
packages = ["."]
revision = "9e777a8366cce605130a531d2cd6363d07ad7317"
version = "v0.0.2"
[[projects]]
branch = "master"
name = "github.com/ncw/go-acd"
packages = ["."]
revision = "887eb06ab6a255fbf5744b5812788e884078620a"
[[projects]]
branch = "master"
name = "github.com/ncw/swift"
packages = ["."]
revision = "ae9f0ea1605b9aa6434ed5c731ca35d83ba67c55"
[[projects]]
branch = "master"
name = "github.com/nsf/termbox-go"
packages = ["."]
revision = "e2050e41c8847748ec5288741c0b19a8cb26d084"
[[projects]]
branch = "master"
name = "github.com/okzk/sdnotify"
packages = ["."]
revision = "ed8ca104421a21947710335006107540e3ecb335"
[[projects]]
name = "github.com/patrickmn/go-cache"
packages = ["."]
revision = "a3647f8e31d79543b2d0f0ae2fe5c379d72cedc0"
version = "v2.1.0"
[[projects]]
name = "github.com/pengsrc/go-shared"
packages = [
"buffer",
"check",
"convert",
"log",
"reopen"
]
revision = "b98065a377794d577e2a0e32869378b9ce4b8952"
version = "v0.1.1"
[[projects]]
branch = "master"
name = "github.com/pkg/errors"
packages = ["."]
revision = "816c9085562cd7ee03e7f8188a1cfd942858cded"
[[projects]]
name = "github.com/pkg/sftp"
packages = ["."]
revision = "43ec6c679d353f6e077d3965dc74f6d996eb4a09"
version = "1.5.1"
[[projects]]
name = "github.com/pmezard/go-difflib"
packages = ["difflib"]
revision = "792786c7400a136282c1664665ae0a8db921c6c2"
version = "v1.0.0"
[[projects]]
branch = "master"
name = "github.com/rfjakob/eme"
packages = ["."]
revision = "2222dbd4ba467ab3fc7e8af41562fcfe69c0d770"
[[projects]]
name = "github.com/russross/blackfriday"
packages = ["."]
revision = "55d61fa8aa702f59229e6cff85793c22e580eaf5"
version = "v1.5.1"
[[projects]]
name = "github.com/satori/go.uuid"
packages = ["."]
revision = "f58768cc1a7a7e77a3bd49e98cdd21419399b6a3"
version = "v1.2.0"
[[projects]]
branch = "master"
name = "github.com/sevlyar/go-daemon"
packages = ["."]
revision = "32749a731f76154d29bc6a547e6585f320eb235e"
[[projects]]
branch = "master"
name = "github.com/skratchdot/open-golang"
packages = ["open"]
revision = "75fb7ed4208cf72d323d7d02fd1a5964a7a9073c"
[[projects]]
branch = "master"
name = "github.com/spf13/cobra"
packages = [
".",
"doc"
]
revision = "c439c4fa093711d42e1b01acb1235b52004753c1"
[[projects]]
branch = "master"
name = "github.com/spf13/pflag"
packages = ["."]
revision = "ee5fd03fd6acfd43e44aea0b4135958546ed8e73"
[[projects]]
branch = "master"
name = "github.com/stretchr/testify"
packages = [
"assert",
"require"
]
revision = "380174f817a09abe5982a82f94ad50938a8df65d"
[[projects]]
branch = "master"
name = "github.com/t3rm1n4l/go-mega"
packages = ["."]
revision = "4e68b16e97ffc3b77abacbf727817a4d48fb0b66"
[[projects]]
branch = "master"
name = "github.com/xanzy/ssh-agent"
packages = ["."]
revision = "ba9c9e33906f58169366275e3450db66139a31a9"
[[projects]]
branch = "master"
name = "github.com/yunify/qingstor-sdk-go"
packages = [
".",
"config",
"logger",
"request",
"request/builder",
"request/data",
"request/errors",
"request/signer",
"request/unpacker",
"service",
"utils"
]
revision = "a3cbaaf92247eaf55751a7ff37c126c511757492"
[[projects]]
branch = "master"
name = "golang.org/x/crypto"
packages = [
"bcrypt",
"blowfish",
"curve25519",
"ed25519",
"ed25519/internal/edwards25519",
"internal/chacha20",
"nacl/secretbox",
"pbkdf2",
"poly1305",
"salsa20/salsa",
"scrypt",
"ssh",
"ssh/agent",
"ssh/terminal"
]
revision = "c3a3ad6d03f7a915c0f7e194b7152974bb73d287"
[[projects]]
branch = "master"
name = "golang.org/x/net"
packages = [
"context",
"context/ctxhttp",
"html",
"html/atom",
"http2",
"http2/hpack",
"idna",
"lex/httplex",
"webdav",
"webdav/internal/xml",
"websocket"
]
revision = "92b859f39abd2d91a854c9f9c4621b2f5054a92d"
[[projects]]
branch = "master"
name = "golang.org/x/oauth2"
packages = [
".",
"google",
"internal",
"jws",
"jwt"
]
revision = "fdc9e635145ae97e6c2cb777c48305600cf515cb"
[[projects]]
branch = "master"
name = "golang.org/x/sys"
packages = [
"unix",
"windows"
]
revision = "d8e400bc7db4870d786864138af681469693d18c"
[[projects]]
branch = "master"
name = "golang.org/x/text"
packages = [
"collate",
"collate/build",
"internal/colltab",
"internal/gen",
"internal/language",
"internal/language/compact",
"internal/tag",
"internal/triegen",
"internal/ucd",
"language",
"secure/bidirule",
"transform",
"unicode/bidi",
"unicode/cldr",
"unicode/norm",
"unicode/rangetable"
]
revision = "8c34f848e18c4bd34d02db7f19a0ed1a0a8f5852"
[[projects]]
branch = "master"
name = "golang.org/x/time"
packages = ["rate"]
revision = "26559e0f760e39c24d730d3224364aef164ee23f"
[[projects]]
branch = "master"
name = "google.golang.org/api"
packages = [
"drive/v3",
"gensupport",
"googleapi",
"googleapi/internal/uritemplates",
"storage/v1"
]
revision = "55e9fb4044f4757138d4273ace23060d022d18f9"
[[projects]]
name = "google.golang.org/appengine"
packages = [
".",
"internal",
"internal/app_identity",
"internal/base",
"internal/datastore",
"internal/log",
"internal/modules",
"internal/remote_api",
"internal/urlfetch",
"log",
"urlfetch"
]
revision = "150dc57a1b433e64154302bdc40b6bb8aefa313a"
version = "v1.0.0"
[[projects]]
name = "gopkg.in/yaml.v2"
packages = ["."]
revision = "7f97868eec74b32b0982dd158a51a446d1da7eb5"
version = "v2.1.1"
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
inputs-digest = "09939c0d5f32998497c8304c84dd5c397c88816d235441d87f5306ae5db43b8a"
solver-name = "gps-cdcl"
solver-version = 1

View File

@@ -1,154 +0,0 @@
## Gopkg.toml example (these lines may be deleted)
## "required" lists a set of packages (not projects) that must be included in
## Gopkg.lock. This list is merged with the set of packages imported by the current
## project. Use it when your project needs a package it doesn't explicitly import -
## including "main" packages.
# required = ["github.com/user/thing/cmd/thing"]
## "ignored" lists a set of packages (not projects) that are ignored when
## dep statically analyzes source code. Ignored packages can be in this project,
## or in a dependency.
# ignored = ["github.com/user/project/badpkg"]
## Dependencies define constraints on dependent projects. They are respected by
## dep whether coming from the Gopkg.toml of the current project or a dependency.
# [[constraint]]
## Required: the root import path of the project being constrained.
# name = "github.com/user/project"
#
## Recommended: the version constraint to enforce for the project.
## Only one of "branch", "version" or "revision" can be specified.
# version = "1.0.0"
# branch = "master"
# revision = "abc123"
#
## Optional: an alternate location (URL or import path) for the project's source.
# source = "https://github.com/myfork/package.git"
## Overrides have the same structure as [[constraint]], but supercede all
## [[constraint]] declarations from all projects. Only the current project's
## [[override]] are applied.
##
## Overrides are a sledgehammer. Use them only as a last resort.
# [[override]]
## Required: the root import path of the project being constrained.
# name = "github.com/user/project"
#
## Optional: specifying a version constraint override will cause all other
## constraints on this project to be ignored; only the overriden constraint
## need be satisfied.
## Again, only one of "branch", "version" or "revision" can be specified.
# version = "1.0.0"
# branch = "master"
# revision = "abc123"
#
## Optional: specifying an alternate source location as an override will
## enforce that the alternate location is used for that project, regardless of
## what source location any dependent projects specify.
# source = "https://github.com/myfork/package.git"
[[constraint]]
branch = "master"
name = "bazil.org/fuse"
[[constraint]]
branch = "master"
name = "github.com/Unknwon/goconfig"
[[constraint]]
branch = "master"
name = "github.com/VividCortex/ewma"
[[constraint]]
branch = "master"
name = "github.com/aws/aws-sdk-go"
[[constraint]]
branch = "master"
name = "github.com/ncw/go-acd"
[[constraint]]
branch = "master"
name = "github.com/ncw/swift"
[[constraint]]
branch = "master"
name = "github.com/pkg/errors"
[[constraint]]
branch = "master"
name = "github.com/pkg/sftp"
[[constraint]]
branch = "master"
name = "github.com/rfjakob/eme"
[[constraint]]
branch = "master"
name = "github.com/skratchdot/open-golang"
[[constraint]]
branch = "master"
name = "github.com/spf13/cobra"
[[constraint]]
branch = "master"
name = "github.com/spf13/pflag"
[[constraint]]
branch = "master"
name = "github.com/stretchr/testify"
[[constraint]]
branch = "master"
name = "golang.org/x/crypto"
[[constraint]]
branch = "master"
name = "golang.org/x/net"
[[constraint]]
branch = "master"
name = "golang.org/x/oauth2"
[[constraint]]
branch = "master"
name = "golang.org/x/sys"
[[constraint]]
branch = "master"
name = "golang.org/x/text"
[[constraint]]
branch = "master"
name = "google.golang.org/api"
[[constraint]]
branch = "master"
name = "github.com/dropbox/dropbox-sdk-go-unofficial"
[[constraint]]
branch = "master"
name = "github.com/yunify/qingstor-sdk-go"
[[constraint]]
branch = "master"
name = "github.com/coreos/bbolt"
[[constraint]]
branch = "master"
name = "github.com/patrickmn/go-cache"
[[constraint]]
branch = "master"
name = "github.com/okzk/sdnotify"
[[constraint]]
branch = "master"
name = "github.com/sevlyar/go-daemon"
[[constraint]]
branch = "master"
name = "github.com/t3rm1n4l/go-mega"

View File

@@ -1,43 +0,0 @@
<!--
Hi!
We understand you are having a problem with rclone or have an idea for an improvement - we want to help you with that!
If you've just got a question or aren't sure if you've found a bug then please use the rclone forum
https://forum.rclone.org/
instead of filing an issue. We'll reply quickly and it won't increase our massive issue backlog.
If you think you might have found a bug, please can you try to replicate it with the latest beta?
https://beta.rclone.org/
If you can still replicate it with the latest beta, then please fill in the info below which makes our lives much easier. A log with -vv will make our day :-)
If you have an idea for an improvement, then please search the old issues first and if you don't find your idea, make a new issue.
Thanks
The Rclone Developers
-->
#### What is the problem you are having with rclone?
#### What is your rclone version (eg output from `rclone -V`)
#### Which OS you are using and how many bits (eg Windows 7, 64 bit)
#### Which cloud storage system are you using? (eg Google Drive)
#### The command you were trying to run (eg `rclone copy /tmp remote:tmp`)
#### A log from the command with the `-vv` flag (eg output from `rclone -vv copy /tmp remote:tmp`)

View File

@@ -1,12 +1,17 @@
# Maintainers guide for rclone #
Current active maintainers of rclone are
Current active maintainers of rclone are:
* Nick Craig-Wood @ncw
* Stefan Breunig @breunigs
* Ishuah Kariuki @ishuah
* Remus Bunduc @remusb - cache subsystem maintainer
* Fabian Möller @B4dM4n
| Name | GitHub ID | Specific Responsibilities |
| :--------------- | :---------- | :-------------------------- |
| Nick Craig-Wood | @ncw | overall project health |
| Stefan Breunig | @breunigs | |
| Ishuah Kariuki | @ishuah | |
| Remus Bunduc | @remusb | cache backend |
| Fabian Möller | @B4dM4n | |
| Alex Chen | @Cnly | onedrive backend |
| Sandeep Ummadi | @sandeepkru | azureblob backend |
| Sebastian Bünger | @buengese | jottacloud & yandex backends |
**This is a work in progress Draft**
@@ -56,7 +61,7 @@ Close tickets as soon as you can - make sure they are tagged with a release. Po
Try to process pull requests promptly!
Merging pull requests on Github itself works quite well now-a-days so you can squash and rebase or rebase pull requests. rclone doesn't use merge commits. Use the squash and rebase option if you need to edit the commit message.
Merging pull requests on GitHub itself works quite well now-a-days so you can squash and rebase or rebase pull requests. rclone doesn't use merge commits. Use the squash and rebase option if you need to edit the commit message.
After merging the commit, in your local master branch, do `git pull` then run `bin/update-authors.py` to update the authors file then `git push`.

File diff suppressed because it is too large Load Diff

7938
MANUAL.md

File diff suppressed because it is too large Load Diff

6644
MANUAL.txt

File diff suppressed because it is too large Load Diff

127
Makefile
View File

@@ -1,12 +1,28 @@
SHELL = bash
TAG := $(shell echo $$(git describe --abbrev=8 --tags)-$${APPVEYOR_REPO_BRANCH:-$${TRAVIS_BRANCH:-$$(git rev-parse --abbrev-ref HEAD)}} | sed 's/-\([0-9]\)-/-00\1-/; s/-\([0-9][0-9]\)-/-0\1-/; s/-\(HEAD\|master\)$$//')
BRANCH := $(or $(APPVEYOR_REPO_BRANCH),$(TRAVIS_BRANCH),$(shell git rev-parse --abbrev-ref HEAD))
LAST_TAG := $(shell git describe --tags --abbrev=0)
ifeq ($(BRANCH),$(LAST_TAG))
BRANCH := master
endif
TAG_BRANCH := -$(BRANCH)
BRANCH_PATH := branch/
ifeq ($(subst HEAD,,$(subst master,,$(BRANCH))),)
TAG_BRANCH :=
BRANCH_PATH :=
endif
TAG := $(shell echo $$(git describe --abbrev=8 --tags | sed 's/-\([0-9]\)-/-00\1-/; s/-\([0-9][0-9]\)-/-0\1-/'))$(TAG_BRANCH)
NEW_TAG := $(shell echo $(LAST_TAG) | perl -lpe 's/v//; $$_ += 0.01; $$_ = sprintf("v%.2f", $$_)')
ifneq ($(TAG),$(LAST_TAG))
TAG := $(TAG)-beta
endif
GO_VERSION := $(shell go version)
GO_FILES := $(shell go list ./... | grep -v /vendor/ )
# Run full tests if go >= go1.9
FULL_TESTS := $(shell go version | perl -lne 'print "go$$1.$$2" if /go(\d+)\.(\d+)/ && ($$1 > 1 || $$2 >= 9)')
BETA_URL := https://beta.rclone.org/$(TAG)/
# Run full tests if go >= go1.11
FULL_TESTS := $(shell go version | perl -lne 'print "go$$1.$$2" if /go(\d+)\.(\d+)/ && ($$1 > 1 || $$2 >= 11)')
BETA_PATH := $(BRANCH_PATH)$(TAG)
BETA_URL := https://beta.rclone.org/$(BETA_PATH)/
BETA_UPLOAD_ROOT := memstore:beta-rclone-org
BETA_UPLOAD := $(BETA_UPLOAD_ROOT)/$(BETA_PATH)
# Pass in GOTAGS=xyz on the make command line to set build tags
ifdef GOTAGS
BUILDTAGS=-tags "$(GOTAGS)"
@@ -21,6 +37,7 @@ rclone:
vars:
@echo SHELL="'$(SHELL)'"
@echo BRANCH="'$(BRANCH)'"
@echo TAG="'$(TAG)'"
@echo LAST_TAG="'$(LAST_TAG)'"
@echo NEW_TAG="'$(NEW_TAG)'"
@@ -33,10 +50,9 @@ version:
# Full suite of integration tests
test: rclone
go install github.com/ncw/rclone/fstest/test_all
-go test -v -count 1 $(BUILDTAGS) $(GO_FILES) 2>&1 | tee test.log
-test_all github.com/ncw/rclone/fs/operations github.com/ncw/rclone/fs/sync 2>&1 | tee fs/test_all.log
@echo "Written logs in test.log and fs/test_all.log"
go install --ldflags "-s -X github.com/ncw/rclone/fs.Version=$(TAG)" $(BUILDTAGS) github.com/ncw/rclone/fstest/test_all
-test_all 2>&1 | tee test_all.log
@echo "Written logs in test_all.log"
# Quick test
quicktest:
@@ -51,36 +67,46 @@ ifdef FULL_TESTS
go vet $(BUILDTAGS) -printfuncs Debugf,Infof,Logf,Errorf ./...
errcheck $(BUILDTAGS) ./...
find . -name \*.go | grep -v /vendor/ | xargs goimports -d | grep . ; test $$? -eq 1
go list ./... | xargs -n1 golint | grep -E -v '(StorageUrl|CdnUrl)' ; test $$? -eq 1
go list ./... | xargs -n1 golint | grep -E -v '(StorageUrl|CdnUrl|ApplicationCredentialId)' ; test $$? -eq 1
else
@echo Skipping source quality tests as version of go too old
endif
gometalinter_install:
go get -u github.com/alecthomas/gometalinter
gometalinter --install --update
# We aren't using gometalinter as the default linter yet because
# 1. it doesn't support build tags: https://github.com/alecthomas/gometalinter/issues/275
# 2. can't get -printfuncs working with the vet linter
gometalinter:
gometalinter ./...
# Get the build dependencies
build_dep:
ifdef FULL_TESTS
go get -u github.com/kisielk/errcheck
go get -u golang.org/x/tools/cmd/goimports
go get -u github.com/golang/lint/golint
go get -u github.com/inconshreveable/mousetrap
go get -u github.com/tools/godep
go get -u golang.org/x/lint/golint
endif
# Get the release dependencies
release_dep:
go get -u github.com/goreleaser/nfpm
go get -u github.com/goreleaser/nfpm/...
go get -u github.com/aktau/github-release
# Update dependencies
update:
go get -u github.com/golang/dep/cmd/dep
dep ensure -update -v
GO111MODULE=on go get -u ./...
GO111MODULE=on go mod tidy
GO111MODULE=on go mod vendor
doc: rclone.1 MANUAL.html MANUAL.txt
doc: rclone.1 MANUAL.html MANUAL.txt rcdocs commanddocs
rclone.1: MANUAL.md
pandoc -s --from markdown --to man MANUAL.md -o rclone.1
MANUAL.md: bin/make_manual.py docs/content/*.md commanddocs
MANUAL.md: bin/make_manual.py docs/content/*.md commanddocs backenddocs
./bin/make_manual.py
MANUAL.html: MANUAL.md
@@ -90,7 +116,10 @@ MANUAL.txt: MANUAL.md
pandoc -s --from markdown --to plain MANUAL.md -o MANUAL.txt
commanddocs: rclone
rclone gendocs docs/content/commands/
XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" rclone gendocs docs/content/commands/
backenddocs: rclone bin/make_backend_docs.py
./bin/make_backend_docs.py
rcdocs: rclone
bin/make_rc_docs.sh
@@ -115,9 +144,9 @@ tarball:
git archive -9 --format=tar.gz --prefix=rclone-$(TAG)/ -o build/rclone-$(TAG).tar.gz $(TAG)
sign_upload:
cd build && md5sum rclone-* | gpg --clearsign > MD5SUMS
cd build && sha1sum rclone-* | gpg --clearsign > SHA1SUMS
cd build && sha256sum rclone-* | gpg --clearsign > SHA256SUMS
cd build && md5sum rclone-v* | gpg --clearsign > MD5SUMS
cd build && sha1sum rclone-v* | gpg --clearsign > SHA1SUMS
cd build && sha256sum rclone-v* | gpg --clearsign > SHA256SUMS
check_sign:
cd build && gpg --verify MD5SUMS && gpg --decrypt MD5SUMS | md5sum -c
@@ -125,8 +154,8 @@ check_sign:
cd build && gpg --verify SHA256SUMS && gpg --decrypt SHA256SUMS | sha256sum -c
upload:
rclone -v copy --exclude '*current*' build/ memstore:downloads-rclone-org/$(TAG)
rclone -v copy --include '*current*' --include version.txt build/ memstore:downloads-rclone-org
rclone -P copy build/ memstore:downloads-rclone-org/$(TAG)
rclone lsf build --files-only --include '*.{zip,deb,rpm}' --include version.txt | xargs -i bash -c 'i={}; j="$$i"; [[ $$i =~ (.*)(-v[0-9\.]+-)(.*) ]] && j=$${BASH_REMATCH[1]}-current-$${BASH_REMATCH[3]}; rclone copyto -v "memstore:downloads-rclone-org/$(TAG)/$$i" "memstore:downloads-rclone-org/$$j"'
upload_github:
./bin/upload-github $(TAG)
@@ -135,43 +164,54 @@ cross: doc
go run bin/cross-compile.go -release current $(BUILDTAGS) $(TAG)
beta:
go run bin/cross-compile.go $(BUILDTAGS) $(TAG)β
rclone -v copy build/ memstore:pub-rclone-org/$(TAG)β
@echo Beta release ready at https://pub.rclone.org/$(TAG)%CE%B2/
go run bin/cross-compile.go $(BUILDTAGS) $(TAG)
rclone -v copy build/ memstore:pub-rclone-org/$(TAG)
@echo Beta release ready at https://pub.rclone.org/$(TAG)/
log_since_last_release:
git log $(LAST_TAG)..
compile_all:
ifdef FULL_TESTS
go run bin/cross-compile.go -parallel 8 -compile-only $(BUILDTAGS) $(TAG)β
go run bin/cross-compile.go -parallel 8 -compile-only $(BUILDTAGS) $(TAG)
else
@echo Skipping compile all as version of go too old
endif
appveyor_upload:
rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ memstore:beta-rclone-org/$(TAG)
ifeq ($(APPVEYOR_REPO_BRANCH),master)
rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ memstore:beta-rclone-org
rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD)
ifndef BRANCH_PATH
rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ $(BETA_UPLOAD_ROOT)
endif
@echo Beta release ready at $(BETA_URL)
circleci_upload:
./rclone --config bin/travis.rclone.conf -v copy build/ $(BETA_UPLOAD)/testbuilds
ifndef BRANCH_PATH
./rclone --config bin/travis.rclone.conf -v copy build/ $(BETA_UPLOAD_ROOT)/test/testbuilds-latest
endif
@echo Beta release ready at $(BETA_URL)/testbuilds
BUILD_FLAGS := -exclude "^(windows|darwin)/"
ifeq ($(TRAVIS_OS_NAME),osx)
BUILD_FLAGS := -include "^darwin/" -cgo
endif
travis_beta:
ifeq ($(TRAVIS_OS_NAME),linux)
go run bin/get-github-release.go -extract nfpm goreleaser/nfpm 'nfpm_.*_Linux_x86_64.tar.gz'
endif
git log $(LAST_TAG).. > /tmp/git-log.txt
go run bin/cross-compile.go -release beta-latest -git-log /tmp/git-log.txt -exclude "^windows/" -parallel 8 $(BUILDTAGS) $(TAG)β
rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ memstore:beta-rclone-org/$(TAG)
ifeq ($(TRAVIS_BRANCH),master)
rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ memstore:beta-rclone-org
go run bin/cross-compile.go -release beta-latest -git-log /tmp/git-log.txt $(BUILD_FLAGS) -parallel 8 $(BUILDTAGS) $(TAG)
rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD)
ifndef BRANCH_PATH
rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ $(BETA_UPLOAD_ROOT)
endif
@echo Beta release ready at $(BETA_URL)
# Fetch the windows builds from appveyor
fetch_windows:
rclone -v copy --include 'rclone-v*-windows-*.zip' memstore:beta-rclone-org/$(TAG) build/
-#cp -av build/rclone-v*-windows-386.zip build/rclone-current-windows-386.zip
-#cp -av build/rclone-v*-windows-amd64.zip build/rclone-current-windows-amd64.zip
md5sum build/rclone-*-windows-*.zip | sort
# Fetch the binary builds from travis and appveyor
fetch_binaries:
rclone -P sync $(BETA_UPLOAD) build/
serve: website
cd docs && hugo server -v -w
@@ -182,10 +222,10 @@ tag: doc
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(NEW_TAG)\"\n" | gofmt > fs/version.go
echo -n "$(NEW_TAG)" > docs/layouts/partials/version.html
git tag -s -m "Version $(NEW_TAG)" $(NEW_TAG)
bin/make_changelog.py $(LAST_TAG) $(NEW_TAG) > docs/content/changelog.md.new
mv docs/content/changelog.md.new docs/content/changelog.md
@echo "Edit the new changelog in docs/content/changelog.md"
@echo " * $(NEW_TAG) -" `date -I` >> docs/content/changelog.md
@git log $(LAST_TAG)..$(NEW_TAG) --oneline >> docs/content/changelog.md
@echo "Then commit the changes"
@echo "Then commit all the changes"
@echo git commit -m \"Version $(NEW_TAG)\" -a -v
@echo "And finally run make retag before make cross etc"
@@ -198,4 +238,3 @@ startdev:
winzip:
zip -9 rclone-$(TAG).zip rclone.exe

100
README.md
View File

@@ -2,10 +2,11 @@
[Website](https://rclone.org) |
[Documentation](https://rclone.org/docs/) |
[Download](https://rclone.org/downloads/) |
[Contributing](CONTRIBUTING.md) |
[Changelog](https://rclone.org/changelog/) |
[Installation](https://rclone.org/install/) |
[Forum](https://forum.rclone.org/)
[Forum](https://forum.rclone.org/) |
[G+](https://google.com/+RcloneOrg)
[![Build Status](https://travis-ci.org/ncw/rclone.svg?branch=master)](https://travis-ci.org/ncw/rclone)
@@ -13,48 +14,83 @@
[![CircleCI](https://circleci.com/gh/ncw/rclone/tree/master.svg?style=svg)](https://circleci.com/gh/ncw/rclone/tree/master)
[![GoDoc](https://godoc.org/github.com/ncw/rclone?status.svg)](https://godoc.org/github.com/ncw/rclone)
Rclone is a command line program to sync files and directories to and from
# Rclone
* Amazon Drive
* Amazon S3 / Dreamhost / Ceph / Minio / Wasabi
* Backblaze B2
* Box
* Dropbox
* FTP
* Google Cloud Storage
* Google Drive
* HTTP
* Hubic
* Mega
* Microsoft Azure Blob Storage
* Microsoft OneDrive
* Openstack Swift / Rackspace cloud files / Memset Memstore / OVH / Oracle Cloud Storage
* pCloud
* QingStor
* SFTP
* Webdav / Owncloud / Nextcloud
* Yandex Disk
* The local filesystem
Rclone *("rsync for cloud storage")* is a command line program to sync files and directories to and from different cloud storage providers.
Features
## Storage providers
* Alibaba Cloud (Aliyun) Object Storage System (OSS) [:page_facing_up:](https://rclone.org/s3/#alibaba-oss)
* Amazon Drive [:page_facing_up:](https://rclone.org/amazonclouddrive/) ([See note](https://rclone.org/amazonclouddrive/#status))
* Amazon S3 [:page_facing_up:](https://rclone.org/s3/)
* Backblaze B2 [:page_facing_up:](https://rclone.org/b2/)
* Box [:page_facing_up:](https://rclone.org/box/)
* Ceph [:page_facing_up:](https://rclone.org/s3/#ceph)
* DigitalOcean Spaces [:page_facing_up:](https://rclone.org/s3/#digitalocean-spaces)
* Dreamhost [:page_facing_up:](https://rclone.org/s3/#dreamhost)
* Dropbox [:page_facing_up:](https://rclone.org/dropbox/)
* FTP [:page_facing_up:](https://rclone.org/ftp/)
* Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/)
* Google Drive [:page_facing_up:](https://rclone.org/drive/)
* HTTP [:page_facing_up:](https://rclone.org/http/)
* Hubic [:page_facing_up:](https://rclone.org/hubic/)
* Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
* IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3)
* Memset Memstore [:page_facing_up:](https://rclone.org/swift/)
* Mega [:page_facing_up:](https://rclone.org/mega/)
* Microsoft Azure Blob Storage [:page_facing_up:](https://rclone.org/azureblob/)
* Microsoft OneDrive [:page_facing_up:](https://rclone.org/onedrive/)
* Minio [:page_facing_up:](https://rclone.org/s3/#minio)
* Nextcloud [:page_facing_up:](https://rclone.org/webdav/#nextcloud)
* OVH [:page_facing_up:](https://rclone.org/swift/)
* OpenDrive [:page_facing_up:](https://rclone.org/opendrive/)
* Openstack Swift [:page_facing_up:](https://rclone.org/swift/)
* Oracle Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
* ownCloud [:page_facing_up:](https://rclone.org/webdav/#owncloud)
* pCloud [:page_facing_up:](https://rclone.org/pcloud/)
* put.io [:page_facing_up:](https://rclone.org/webdav/#put-io)
* QingStor [:page_facing_up:](https://rclone.org/qingstor/)
* Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
* Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)
* SFTP [:page_facing_up:](https://rclone.org/sftp/)
* Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi)
* WebDAV [:page_facing_up:](https://rclone.org/webdav/)
* Yandex Disk [:page_facing_up:](https://rclone.org/yandex/)
* The local filesystem [:page_facing_up:](https://rclone.org/local/)
Please see [the full list of all storage providers and their features](https://rclone.org/overview/)
## Features
* MD5/SHA1 hashes checked at all times for file integrity
* Timestamps preserved on files
* Partial syncs supported on a whole file basis
* Copy mode to just copy new/changed files
* Sync (one way) mode to make a directory identical
* Check mode to check for file hash equality
* [Copy](https://rclone.org/commands/rclone_copy/) mode to just copy new/changed files
* [Sync](https://rclone.org/commands/rclone_sync/) (one way) mode to make a directory identical
* [Check](https://rclone.org/commands/rclone_check/) mode to check for file hash equality
* Can sync to and from network, eg two different cloud accounts
* Optional encryption (Crypt)
* Optional FUSE mount
* Optional encryption ([Crypt](https://rclone.org/crypt/))
* Optional cache ([Cache](https://rclone.org/cache/))
* Optional FUSE mount ([rclone mount](https://rclone.org/commands/rclone_mount/))
See the home page for installation, usage, documentation, changelog
and configuration walkthroughs.
## Installation & documentation
* https://rclone.org/
Please see the [rclone website](https://rclone.org/) for:
* [Installation](https://rclone.org/install/)
* [Documentation & configuration](https://rclone.org/docs/)
* [Changelog](https://rclone.org/changelog/)
* [FAQ](https://rclone.org/faq/)
* [Storage providers](https://rclone.org/overview/)
* [Forum](https://forum.rclone.org/)
* ...and more
## Downloads
* https://rclone.org/downloads/
License
-------
This is free software under the terms of MIT the license (check the
COPYING file included in this package).
[COPYING file](/COPYING) included in this package).

View File

@@ -13,14 +13,9 @@ Making a release
* git status - to check for new man pages - git add them
* git commit -a -v -m "Version v1.XX"
* make retag
* make release_dep
* # Set the GOPATH for a current stable go compiler
* make cross
* git checkout docs/content/commands # to undo date changes in commands
* git push --tags origin master
* git push --tags origin master:stable # update the stable branch for packager.io
* # Wait for the appveyor and travis builds to complete then fetch the windows binaries from appveyor
* make fetch_windows
* # Wait for the appveyor and travis builds to complete then...
* make fetch_binaries
* make tarball
* make sign_upload
* make check_sign
@@ -31,10 +26,45 @@ Making a release
* # announce with forum post, twitter post, G+ post
Early in the next release cycle update the vendored dependencies
* Review any pinned packages in go.mod and remove if possible
* make update
* git status
* git add new files
* carry forward any patches to vendor stuff
* git commit -a -v
Make the version number be just in a file?
If `make update` fails with errors like this:
```
# github.com/cpuguy83/go-md2man/md2man
../../../../pkg/mod/github.com/cpuguy83/go-md2man@v1.0.8/md2man/md2man.go:11:16: undefined: blackfriday.EXTENSION_NO_INTRA_EMPHASIS
../../../../pkg/mod/github.com/cpuguy83/go-md2man@v1.0.8/md2man/md2man.go:12:16: undefined: blackfriday.EXTENSION_TABLES
```
Can be fixed with
* GO111MODULE=on go get -u github.com/russross/blackfriday@v1.5.2
* GO111MODULE=on go mod tidy
* GO111MODULE=on go mod vendor
Making a point release. If rclone needs a point release due to some
horrendous bug, then
* git branch v1.XX v1.XX-fixes
* git cherry-pick any fixes
* Test (see above)
* make NEW_TAG=v1.XX.1 tag
* edit docs/content/changelog.md
* make TAG=v1.43.1 doc
* git commit -a -v -m "Version v1.XX.1"
* git tag -d -v1.XX.1
* git tag -s -m "Version v1.XX.1" v1.XX.1
* git push --tags -u origin v1.XX-fixes
* make BRANCH_PATH= TAG=v1.43.1 fetch_binaries
* make TAG=v1.43.1 tarball
* make TAG=v1.43.1 sign_upload
* make TAG=v1.43.1 check_sign
* make TAG=v1.43.1 upload
* make TAG=v1.43.1 upload_website
* make TAG=v1.43.1 upload_github
* NB this overwrites the current beta so after the release, rebuild the last travis build
* Announce!

View File

@@ -2,12 +2,12 @@ package alias
import (
"errors"
"path"
"path/filepath"
"strings"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/fspath"
)
// Register with Fs
@@ -17,29 +17,38 @@ func init() {
Description: "Alias for a existing remote",
NewFs: NewFs,
Options: []fs.Option{{
Name: "remote",
Help: "Remote or path to alias.\nCan be \"myremote:path/to/dir\", \"myremote:bucket\", \"myremote:\" or \"/local/path\".",
Name: "remote",
Help: "Remote or path to alias.\nCan be \"myremote:path/to/dir\", \"myremote:bucket\", \"myremote:\" or \"/local/path\".",
Required: true,
}},
}
fs.Register(fsi)
}
// Options defines the configuration for this backend
type Options struct {
Remote string `config:"remote"`
}
// NewFs contstructs an Fs from the path.
//
// The returned Fs is the actual Fs, referenced by remote in the config
func NewFs(name, root string) (fs.Fs, error) {
remote := config.FileGet(name, "remote")
if remote == "" {
return nil, errors.New("alias can't point to an empty remote - check the value of the remote setting")
}
if strings.HasPrefix(remote, name+":") {
return nil, errors.New("can't point alias remote at itself - check the value of the remote setting")
}
fsInfo, configName, fsPath, err := fs.ParseRemote(remote)
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
root = filepath.ToSlash(root)
return fsInfo.NewFs(configName, path.Join(fsPath, root))
if opt.Remote == "" {
return nil, errors.New("alias can't point to an empty remote - check the value of the remote setting")
}
if strings.HasPrefix(opt.Remote, name+":") {
return nil, errors.New("can't point alias remote at itself - check the value of the remote setting")
}
fsInfo, configName, fsPath, config, err := fs.ConfigFs(opt.Remote)
if err != nil {
return nil, err
}
return fsInfo.NewFs(configName, fspath.JoinRootPath(fsPath, root), config)
}

View File

@@ -15,8 +15,6 @@ import (
var (
remoteName = "TestAlias"
testPath = "test"
filesPath = filepath.Join(testPath, "files")
)
func prepare(t *testing.T, root string) {

View File

@@ -15,14 +15,17 @@ import (
_ "github.com/ncw/rclone/backend/googlecloudstorage"
_ "github.com/ncw/rclone/backend/http"
_ "github.com/ncw/rclone/backend/hubic"
_ "github.com/ncw/rclone/backend/jottacloud"
_ "github.com/ncw/rclone/backend/local"
_ "github.com/ncw/rclone/backend/mega"
_ "github.com/ncw/rclone/backend/onedrive"
_ "github.com/ncw/rclone/backend/opendrive"
_ "github.com/ncw/rclone/backend/pcloud"
_ "github.com/ncw/rclone/backend/qingstor"
_ "github.com/ncw/rclone/backend/s3"
_ "github.com/ncw/rclone/backend/sftp"
_ "github.com/ncw/rclone/backend/swift"
_ "github.com/ncw/rclone/backend/union"
_ "github.com/ncw/rclone/backend/webdav"
_ "github.com/ncw/rclone/backend/yandex"
)

View File

@@ -18,14 +18,14 @@ import (
"log"
"net/http"
"path"
"regexp"
"strings"
"time"
"github.com/ncw/go-acd"
acd "github.com/ncw/go-acd"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/config/flags"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/fserrors"
"github.com/ncw/rclone/fs/fshttp"
"github.com/ncw/rclone/fs/hash"
@@ -38,20 +38,17 @@ import (
)
const (
folderKind = "FOLDER"
fileKind = "FILE"
assetKind = "ASSET"
statusAvailable = "AVAILABLE"
timeFormat = time.RFC3339 // 2014-03-07T22:31:12.173Z
minSleep = 20 * time.Millisecond
warnFileSize = 50000 << 20 // Display warning for files larger than this size
folderKind = "FOLDER"
fileKind = "FILE"
statusAvailable = "AVAILABLE"
timeFormat = time.RFC3339 // 2014-03-07T22:31:12.173Z
minSleep = 20 * time.Millisecond
warnFileSize = 50000 << 20 // Display warning for files larger than this size
defaultTempLinkThreshold = fs.SizeSuffix(9 << 30) // Download files bigger than this via the tempLink
)
// Globals
var (
// Flags
tempLinkThreshold = fs.SizeSuffix(9 << 30) // Download files bigger than this via the tempLink
uploadWaitPerGB = flags.DurationP("acd-upload-wait-per-gb", "", 180*time.Second, "Additional time per GB to wait after a failed complete upload to see if it appears.")
// Description of how to auth for this app
acdConfig = &oauth2.Config{
Scopes: []string{"clouddrive:read_all", "clouddrive:write"},
@@ -69,35 +66,91 @@ var (
func init() {
fs.Register(&fs.RegInfo{
Name: "amazon cloud drive",
Prefix: "acd",
Description: "Amazon Drive",
NewFs: NewFs,
Config: func(name string) {
err := oauthutil.Config("amazon cloud drive", name, acdConfig)
Config: func(name string, m configmap.Mapper) {
err := oauthutil.Config("amazon cloud drive", name, m, acdConfig)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
},
Options: []fs.Option{{
Name: config.ConfigClientID,
Help: "Amazon Application Client Id - required.",
Name: config.ConfigClientID,
Help: "Amazon Application Client ID.",
Required: true,
}, {
Name: config.ConfigClientSecret,
Help: "Amazon Application Client Secret - required.",
Name: config.ConfigClientSecret,
Help: "Amazon Application Client Secret.",
Required: true,
}, {
Name: config.ConfigAuthURL,
Help: "Auth server URL - leave blank to use Amazon's.",
Name: config.ConfigAuthURL,
Help: "Auth server URL.\nLeave blank to use Amazon's.",
Advanced: true,
}, {
Name: config.ConfigTokenURL,
Help: "Token server url - leave blank to use Amazon's.",
Name: config.ConfigTokenURL,
Help: "Token server url.\nleave blank to use Amazon's.",
Advanced: true,
}, {
Name: "checkpoint",
Help: "Checkpoint for internal polling (debug).",
Hide: fs.OptionHideBoth,
Advanced: true,
}, {
Name: "upload_wait_per_gb",
Help: `Additional time per GB to wait after a failed complete upload to see if it appears.
Sometimes Amazon Drive gives an error when a file has been fully
uploaded but the file appears anyway after a little while. This
happens sometimes for files over 1GB in size and nearly every time for
files bigger than 10GB. This parameter controls the time rclone waits
for the file to appear.
The default value for this parameter is 3 minutes per GB, so by
default it will wait 3 minutes for every GB uploaded to see if the
file appears.
You can disable this feature by setting it to 0. This may cause
conflict errors as rclone retries the failed upload but the file will
most likely appear correctly eventually.
These values were determined empirically by observing lots of uploads
of big files for a range of file sizes.
Upload with the "-v" flag to see more info about what rclone is doing
in this situation.`,
Default: fs.Duration(180 * time.Second),
Advanced: true,
}, {
Name: "templink_threshold",
Help: `Files >= this size will be downloaded via their tempLink.
Files this size or more will be downloaded via their "tempLink". This
is to work around a problem with Amazon Drive which blocks downloads
of files bigger than about 10GB. The default for this is 9GB which
shouldn't need to be changed.
To download files above this threshold, rclone requests a "tempLink"
which downloads the file through a temporary URL directly from the
underlying S3 storage.`,
Default: defaultTempLinkThreshold,
Advanced: true,
}},
})
flags.VarP(&tempLinkThreshold, "acd-templink-threshold", "", "Files >= this size will be downloaded via their tempLink.")
}
// Options defines the configuration for this backend
type Options struct {
Checkpoint string `config:"checkpoint"`
UploadWaitPerGB fs.Duration `config:"upload_wait_per_gb"`
TempLinkThreshold fs.SizeSuffix `config:"templink_threshold"`
}
// Fs represents a remote acd server
type Fs struct {
name string // name of this remote
features *fs.Features // optional features
opt Options // options for this Fs
c *acd.Client // the connection to the acd server
noAuthClient *http.Client // unauthenticated http client
root string // the path we are working on
@@ -138,9 +191,6 @@ func (f *Fs) Features() *fs.Features {
return f.features
}
// Pattern to match a acd path
var matcher = regexp.MustCompile(`^([^/]*)(.*)$`)
// parsePath parses an acd 'url'
func parsePath(path string) (root string) {
root = strings.Trim(path, "/")
@@ -196,7 +246,13 @@ func filterRequest(req *http.Request) {
}
// NewFs constructs an Fs from the path, container:path
func NewFs(name, root string) (fs.Fs, error) {
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
root = parsePath(root)
baseClient := fshttp.NewClient(fs.Config)
if do, ok := baseClient.Transport.(interface {
@@ -206,15 +262,16 @@ func NewFs(name, root string) (fs.Fs, error) {
} else {
fs.Debugf(name+":", "Couldn't add request filter - large file downloads will fail")
}
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(name, acdConfig, baseClient)
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(name, m, acdConfig, baseClient)
if err != nil {
log.Fatalf("Failed to configure Amazon Drive: %v", err)
return nil, errors.Wrap(err, "failed to configure Amazon Drive")
}
c := acd.NewClient(oAuthClient)
f := &Fs{
name: name,
root: root,
opt: *opt,
c: c,
pacer: pacer.New().SetMinSleep(minSleep).SetPacer(pacer.AmazonCloudDrivePacer),
noAuthClient: fshttp.NewClient(fs.Config),
@@ -255,16 +312,16 @@ func NewFs(name, root string) (fs.Fs, error) {
if err != nil {
// Assume it is a file
newRoot, remote := dircache.SplitPath(root)
newF := *f
newF.dirCache = dircache.New(newRoot, f.trueRootID, &newF)
newF.root = newRoot
tempF := *f
tempF.dirCache = dircache.New(newRoot, f.trueRootID, &tempF)
tempF.root = newRoot
// Make new Fs which is the parent
err = newF.dirCache.FindRoot(false)
err = tempF.dirCache.FindRoot(false)
if err != nil {
// No root so return old f
return f, nil
}
_, err := newF.newObjectWithInfo(remote, nil)
_, err := tempF.newObjectWithInfo(remote, nil)
if err != nil {
if err == fs.ErrorObjectNotFound {
// File doesn't exist so return old f
@@ -272,8 +329,13 @@ func NewFs(name, root string) (fs.Fs, error) {
}
return nil, err
}
// XXX: update the old f here instead of returning tempF, since
// `features` were already filled with functions having *f as a receiver.
// See https://github.com/ncw/rclone/issues/2182
f.dirCache = tempF.dirCache
f.root = tempF.root
// return an error with an fs which points to the parent
return &newF, fs.ErrorIsFile
return f, fs.ErrorIsFile
}
return f, nil
}
@@ -532,13 +594,13 @@ func (f *Fs) checkUpload(resp *http.Response, in io.Reader, src fs.ObjectInfo, i
}
// Don't wait for uploads - assume they will appear later
if *uploadWaitPerGB <= 0 {
if f.opt.UploadWaitPerGB <= 0 {
fs.Debugf(src, "Upload error detected but waiting disabled: %v (%q)", inErr, httpStatus)
return false, inInfo, inErr
}
// Time we should wait for the upload
uploadWaitPerByte := float64(*uploadWaitPerGB) / 1024 / 1024 / 1024
uploadWaitPerByte := float64(f.opt.UploadWaitPerGB) / 1024 / 1024 / 1024
timeToWait := time.Duration(uploadWaitPerByte * float64(src.Size()))
const sleepTime = 5 * time.Second // sleep between tries
@@ -1020,7 +1082,7 @@ func (o *Object) Storable() bool {
// Open an object for read
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
bigObject := o.Size() >= int64(tempLinkThreshold)
bigObject := o.Size() >= int64(o.fs.opt.TempLinkThreshold)
if bigObject {
fs.Debugf(o, "Downloading large object via tempLink")
}
@@ -1212,24 +1274,38 @@ func (o *Object) MimeType() string {
// Automatically restarts itself in case of unexpected behaviour of the remote.
//
// Close the returned channel to stop being notified.
func (f *Fs) ChangeNotify(notifyFunc func(string, fs.EntryType), pollInterval time.Duration) chan bool {
checkpoint := config.FileGet(f.name, "checkpoint")
func (f *Fs) ChangeNotify(notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) {
checkpoint := f.opt.Checkpoint
quit := make(chan bool)
go func() {
var ticker *time.Ticker
var tickerC <-chan time.Time
for {
checkpoint = f.changeNotifyRunner(notifyFunc, checkpoint)
if err := config.SetValueAndSave(f.name, "checkpoint", checkpoint); err != nil {
fs.Debugf(f, "Unable to save checkpoint: %v", err)
}
select {
case <-quit:
return
case <-time.After(pollInterval):
case pollInterval, ok := <-pollIntervalChan:
if !ok {
if ticker != nil {
ticker.Stop()
}
return
}
if pollInterval == 0 {
if ticker != nil {
ticker.Stop()
ticker, tickerC = nil, nil
}
} else {
ticker = time.NewTicker(pollInterval)
tickerC = ticker.C
}
case <-tickerC:
checkpoint = f.changeNotifyRunner(notifyFunc, checkpoint)
if err := config.SetValueAndSave(f.name, "checkpoint", checkpoint); err != nil {
fs.Debugf(f, "Unable to save checkpoint: %v", err)
}
}
}
}()
return quit
}
func (f *Fs) changeNotifyRunner(notifyFunc func(string, fs.EntryType), checkpoint string) string {
@@ -1320,6 +1396,14 @@ func (f *Fs) changeNotifyRunner(notifyFunc func(string, fs.EntryType), checkpoin
return checkpoint
}
// ID returns the ID of the Object if known, or "" if not
func (o *Object) ID() string {
if o.info.Id == nil {
return ""
}
return *o.info.Id
}
// Check the interfaces are satisfied
var (
_ fs.Fs = (*Fs)(nil)
@@ -1331,4 +1415,5 @@ var (
_ fs.ChangeNotifier = (*Fs)(nil)
_ fs.Object = (*Object)(nil)
_ fs.MimeTyper = &Object{}
_ fs.IDer = &Object{}
)

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,18 @@
// +build !plan9,!solaris,go1.8
package azureblob
import (
"testing"
"github.com/stretchr/testify/assert"
)
func (f *Fs) InternalTest(t *testing.T) {
// Check first feature flags are set on this
// remote
enabled := f.Features().SetTier
assert.True(t, enabled)
enabled = f.Features().GetTier
assert.True(t, enabled)
}

View File

@@ -1,17 +1,37 @@
// Test AzureBlob filesystem interface
package azureblob_test
// +build !plan9,!solaris,go1.8
package azureblob
import (
"testing"
"github.com/ncw/rclone/backend/azureblob"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestAzureBlob:",
NilObject: (*azureblob.Object)(nil),
RemoteName: "TestAzureBlob:",
NilObject: (*Object)(nil),
TiersToTest: []string{"Hot", "Cool"},
ChunkedUpload: fstests.ChunkedUploadConfig{
MaxChunkSize: maxChunkSize,
},
})
}
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setUploadChunkSize(cs)
}
func (f *Fs) SetUploadCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setUploadCutoff(cs)
}
var (
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
_ fstests.SetUploadCutoffer = (*Fs)(nil)
)

View File

@@ -0,0 +1,6 @@
// Build for azureblob for unsupported platforms to stop go complaining
// about "no buildable Go source files "
// +build plan9 solaris !go1.8
package azureblob

View File

@@ -31,11 +31,6 @@ func (e *Error) Fatal() bool {
var _ fserrors.Fataler = (*Error)(nil)
// Account describes a B2 account
type Account struct {
ID string `json:"accountId"` // The identifier for the account.
}
// Bucket describes a B2 bucket
type Bucket struct {
ID string `json:"bucketId"`
@@ -74,7 +69,7 @@ const versionFormat = "-v2006-01-02-150405.000"
func (t Timestamp) AddVersion(remote string) string {
ext := path.Ext(remote)
base := remote[:len(remote)-len(ext)]
s := (time.Time)(t).Format(versionFormat)
s := time.Time(t).Format(versionFormat)
// Replace the '.' with a '-'
s = strings.Replace(s, ".", "-", -1)
return base + s + ext
@@ -107,20 +102,20 @@ func RemoveVersion(remote string) (t Timestamp, newRemote string) {
// IsZero returns true if the timestamp is unitialised
func (t Timestamp) IsZero() bool {
return (time.Time)(t).IsZero()
return time.Time(t).IsZero()
}
// Equal compares two timestamps
//
// If either are !IsZero then it returns false
func (t Timestamp) Equal(s Timestamp) bool {
if (time.Time)(t).IsZero() {
if time.Time(t).IsZero() {
return false
}
if (time.Time)(s).IsZero() {
if time.Time(s).IsZero() {
return false
}
return (time.Time)(t).Equal((time.Time)(s))
return time.Time(t).Equal(time.Time(s))
}
// File is info about a file
@@ -137,10 +132,27 @@ type File struct {
// AuthorizeAccountResponse is as returned from the b2_authorize_account call
type AuthorizeAccountResponse struct {
AccountID string `json:"accountId"` // The identifier for the account.
AuthorizationToken string `json:"authorizationToken"` // An authorization token to use with all calls, other than b2_authorize_account, that need an Authorization header.
APIURL string `json:"apiUrl"` // The base URL to use for all API calls except for uploading and downloading files.
DownloadURL string `json:"downloadUrl"` // The base URL to use for downloading files.
AbsoluteMinimumPartSize int `json:"absoluteMinimumPartSize"` // The smallest possible size of a part of a large file.
AccountID string `json:"accountId"` // The identifier for the account.
Allowed struct { // An object (see below) containing the capabilities of this auth token, and any restrictions on using it.
BucketID string `json:"bucketId"` // When present, access is restricted to one bucket.
BucketName string `json:"bucketName"` // When present, name of bucket - may be empty
Capabilities []string `json:"capabilities"` // A list of strings, each one naming a capability the key has.
NamePrefix interface{} `json:"namePrefix"` // When present, access is restricted to files whose names start with the prefix
} `json:"allowed"`
APIURL string `json:"apiUrl"` // The base URL to use for all API calls except for uploading and downloading files.
AuthorizationToken string `json:"authorizationToken"` // An authorization token to use with all calls, other than b2_authorize_account, that need an Authorization header.
DownloadURL string `json:"downloadUrl"` // The base URL to use for downloading files.
MinimumPartSize int `json:"minimumPartSize"` // DEPRECATED: This field will always have the same value as recommendedPartSize. Use recommendedPartSize instead.
RecommendedPartSize int `json:"recommendedPartSize"` // The recommended size for each part of a large file. We recommend using this part size for optimal upload performance.
}
// ListBucketsRequest is parameters for b2_list_buckets call
type ListBucketsRequest struct {
AccountID string `json:"accountId"` // The identifier for the account.
BucketID string `json:"bucketId,omitempty"` // When specified, the result will be a list containing just this bucket.
BucketName string `json:"bucketName,omitempty"` // When specified, the result will be a list containing just this bucket.
BucketTypes []string `json:"bucketTypes,omitempty"` // If present, B2 will use it as a filter for bucket types returned in the list buckets response.
}
// ListBucketsResponse is as returned from the b2_list_buckets call

View File

@@ -22,8 +22,8 @@ import (
"github.com/ncw/rclone/backend/b2/api"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/accounting"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/config/flags"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/fserrors"
"github.com/ncw/rclone/fs/fshttp"
"github.com/ncw/rclone/fs/hash"
@@ -34,30 +34,27 @@ import (
)
const (
defaultEndpoint = "https://api.backblazeb2.com"
headerPrefix = "x-bz-info-" // lower case as that is what the server returns
timeKey = "src_last_modified_millis"
timeHeader = headerPrefix + timeKey
sha1Key = "large_file_sha1"
sha1Header = "X-Bz-Content-Sha1"
sha1InfoHeader = headerPrefix + sha1Key
testModeHeader = "X-Bz-Test-Mode"
retryAfterHeader = "Retry-After"
minSleep = 10 * time.Millisecond
maxSleep = 5 * time.Minute
decayConstant = 1 // bigger for slower decay, exponential
maxParts = 10000
maxVersions = 100 // maximum number of versions we search in --b2-versions mode
defaultEndpoint = "https://api.backblazeb2.com"
headerPrefix = "x-bz-info-" // lower case as that is what the server returns
timeKey = "src_last_modified_millis"
timeHeader = headerPrefix + timeKey
sha1Key = "large_file_sha1"
sha1Header = "X-Bz-Content-Sha1"
sha1InfoHeader = headerPrefix + sha1Key
testModeHeader = "X-Bz-Test-Mode"
retryAfterHeader = "Retry-After"
minSleep = 10 * time.Millisecond
maxSleep = 5 * time.Minute
decayConstant = 1 // bigger for slower decay, exponential
maxParts = 10000
maxVersions = 100 // maximum number of versions we search in --b2-versions mode
minChunkSize = 5 * fs.MebiByte
defaultChunkSize = 96 * fs.MebiByte
defaultUploadCutoff = 200 * fs.MebiByte
)
// Globals
var (
minChunkSize = fs.SizeSuffix(5E6)
chunkSize = fs.SizeSuffix(96 * 1024 * 1024)
uploadCutoff = fs.SizeSuffix(200E6)
b2TestMode = flags.StringP("b2-test-mode", "", "", "A flag string for X-Bz-Test-Mode header.")
b2Versions = flags.BoolP("b2-versions", "", false, "Include old versions in directory listings.")
b2HardDelete = flags.BoolP("b2-hard-delete", "", false, "Permanently delete files on remote removal, otherwise hide files.")
errNotWithVersions = errors.New("can't modify or delete files in --b2-versions mode")
)
@@ -68,29 +65,89 @@ func init() {
Description: "Backblaze B2",
NewFs: NewFs,
Options: []fs.Option{{
Name: "account",
Help: "Account ID",
Name: "account",
Help: "Account ID or Application Key ID",
Required: true,
}, {
Name: "key",
Help: "Application Key",
Name: "key",
Help: "Application Key",
Required: true,
}, {
Name: "endpoint",
Help: "Endpoint for the service - leave blank normally.",
},
},
Name: "endpoint",
Help: "Endpoint for the service.\nLeave blank normally.",
Advanced: true,
}, {
Name: "test_mode",
Help: `A flag string for X-Bz-Test-Mode header for debugging.
This is for debugging purposes only. Setting it to one of the strings
below will cause b2 to return specific errors:
* "fail_some_uploads"
* "expire_some_account_authorization_tokens"
* "force_cap_exceeded"
These will be set in the "X-Bz-Test-Mode" header which is documented
in the [b2 integrations checklist](https://www.backblaze.com/b2/docs/integration_checklist.html).`,
Default: "",
Hide: fs.OptionHideConfigurator,
Advanced: true,
}, {
Name: "versions",
Help: "Include old versions in directory listings.\nNote that when using this no file write operations are permitted,\nso you can't upload files or delete them.",
Default: false,
Advanced: true,
}, {
Name: "hard_delete",
Help: "Permanently delete files on remote removal, otherwise hide files.",
Default: false,
}, {
Name: "upload_cutoff",
Help: `Cutoff for switching to chunked upload.
Files above this size will be uploaded in chunks of "--b2-chunk-size".
This value should be set no larger than 4.657GiB (== 5GB).`,
Default: fs.SizeSuffix(defaultUploadCutoff),
Advanced: true,
}, {
Name: "chunk_size",
Help: `Upload chunk size. Must fit in memory.
When uploading large files, chunk the file into this size. Note that
these chunks are buffered in memory and there might a maximum of
"--transfers" chunks in progress at once. 5,000,000 Bytes is the
minimim size.`,
Default: fs.SizeSuffix(defaultChunkSize),
Advanced: true,
}, {
Name: "disable_checksum",
Help: `Disable checksums for large (> upload cutoff) files`,
Default: false,
Advanced: true,
}},
})
flags.VarP(&uploadCutoff, "b2-upload-cutoff", "", "Cutoff for switching to chunked upload")
flags.VarP(&chunkSize, "b2-chunk-size", "", "Upload chunk size. Must fit in memory.")
}
// Options defines the configuration for this backend
type Options struct {
Account string `config:"account"`
Key string `config:"key"`
Endpoint string `config:"endpoint"`
TestMode string `config:"test_mode"`
Versions bool `config:"versions"`
HardDelete bool `config:"hard_delete"`
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
ChunkSize fs.SizeSuffix `config:"chunk_size"`
DisableCheckSum bool `config:"disable_checksum"`
}
// Fs represents a remote b2 server
type Fs struct {
name string // name of this remote
root string // the path we are working on if any
opt Options // parsed config options
features *fs.Features // optional features
account string // account name
key string // auth key
endpoint string // name of the starting api endpoint
srv *rest.Client // the connection to the b2 server
bucket string // the bucket we are working on
bucketOKMu sync.Mutex // mutex to protect bucket OK
@@ -145,7 +202,7 @@ func (f *Fs) Features() *fs.Features {
}
// Pattern to match a b2 path
var matcher = regexp.MustCompile(`^([^/]*)(.*)$`)
var matcher = regexp.MustCompile(`^/*([^/]*)(.*)$`)
// parseParse parses a b2 'url'
func parsePath(path string) (bucket, directory string, err error) {
@@ -231,37 +288,73 @@ func errorHandler(resp *http.Response) error {
return errResponse
}
// NewFs contstructs an Fs from the path, bucket:path
func NewFs(name, root string) (fs.Fs, error) {
if uploadCutoff < chunkSize {
return nil, errors.Errorf("b2: upload cutoff (%v) must be greater than or equal to chunk size (%v)", uploadCutoff, chunkSize)
func checkUploadChunkSize(cs fs.SizeSuffix) error {
if cs < minChunkSize {
return errors.Errorf("%s is less than %s", cs, minChunkSize)
}
if chunkSize < minChunkSize {
return nil, errors.Errorf("b2: chunk size can't be less than %v - was %v", minChunkSize, chunkSize)
return nil
}
func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
err = checkUploadChunkSize(cs)
if err == nil {
old, f.opt.ChunkSize = f.opt.ChunkSize, cs
f.fillBufferTokens() // reset the buffer tokens
}
return
}
func checkUploadCutoff(opt *Options, cs fs.SizeSuffix) error {
if cs < opt.ChunkSize {
return errors.Errorf("%v is less than chunk size %v", cs, opt.ChunkSize)
}
return nil
}
func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
err = checkUploadCutoff(&f.opt, cs)
if err == nil {
old, f.opt.UploadCutoff = f.opt.UploadCutoff, cs
}
return
}
// NewFs contstructs an Fs from the path, bucket:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
err = checkUploadCutoff(opt, opt.UploadCutoff)
if err != nil {
return nil, errors.Wrap(err, "b2: upload cutoff")
}
err = checkUploadChunkSize(opt.ChunkSize)
if err != nil {
return nil, errors.Wrap(err, "b2: chunk size")
}
bucket, directory, err := parsePath(root)
if err != nil {
return nil, err
}
account := config.FileGet(name, "account")
if account == "" {
if opt.Account == "" {
return nil, errors.New("account not found")
}
key := config.FileGet(name, "key")
if key == "" {
if opt.Key == "" {
return nil, errors.New("key not found")
}
endpoint := config.FileGet(name, "endpoint", defaultEndpoint)
if opt.Endpoint == "" {
opt.Endpoint = defaultEndpoint
}
f := &Fs{
name: name,
bucket: bucket,
root: directory,
account: account,
key: key,
endpoint: endpoint,
srv: rest.NewClient(fshttp.NewClient(fs.Config)).SetErrorHandler(errorHandler),
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
bufferTokens: make(chan []byte, fs.Config.Transfers),
name: name,
opt: *opt,
bucket: bucket,
root: directory,
srv: rest.NewClient(fshttp.NewClient(fs.Config)).SetErrorHandler(errorHandler),
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
}
f.features = (&fs.Features{
ReadMimeType: true,
@@ -269,19 +362,28 @@ func NewFs(name, root string) (fs.Fs, error) {
BucketBased: true,
}).Fill(f)
// Set the test flag if required
if *b2TestMode != "" {
testMode := strings.TrimSpace(*b2TestMode)
if opt.TestMode != "" {
testMode := strings.TrimSpace(opt.TestMode)
f.srv.SetHeader(testModeHeader, testMode)
fs.Debugf(f, "Setting test header \"%s: %s\"", testModeHeader, testMode)
}
// Fill up the buffer tokens
for i := 0; i < fs.Config.Transfers; i++ {
f.bufferTokens <- nil
}
f.fillBufferTokens()
err = f.authorizeAccount()
if err != nil {
return nil, errors.Wrap(err, "failed to authorize account")
}
// If this is a key limited to a single bucket, it must exist already
if f.bucket != "" && f.info.Allowed.BucketID != "" {
allowedBucket := f.info.Allowed.BucketName
if allowedBucket == "" {
return nil, errors.New("bucket that application key is restricted to no longer exists")
}
if allowedBucket != f.bucket {
return nil, errors.Errorf("you must use bucket %q with this application key", allowedBucket)
}
f.markBucketOK()
f.setBucketID(f.info.Allowed.BucketID)
}
if f.root != "" {
f.root += "/"
// Check to see if the (bucket,directory) is actually an existing file
@@ -316,9 +418,9 @@ func (f *Fs) authorizeAccount() error {
opts := rest.Opts{
Method: "GET",
Path: "/b2api/v1/b2_authorize_account",
RootURL: f.endpoint,
UserName: f.account,
Password: f.key,
RootURL: f.opt.Endpoint,
UserName: f.opt.Account,
Password: f.opt.Key,
ExtraHeaders: map[string]string{"Authorization": ""}, // unset the Authorization for this request
}
err := f.pacer.Call(func() (bool, error) {
@@ -380,11 +482,19 @@ func (f *Fs) clearUploadURL() {
f.uploadMu.Unlock()
}
// Fill up (or reset) the buffer tokens
func (f *Fs) fillBufferTokens() {
f.bufferTokens = make(chan []byte, fs.Config.Transfers)
for i := 0; i < fs.Config.Transfers; i++ {
f.bufferTokens <- nil
}
}
// getUploadBlock gets a block from the pool of size chunkSize
func (f *Fs) getUploadBlock() []byte {
buf := <-f.bufferTokens
if buf == nil {
buf = make([]byte, chunkSize)
buf = make([]byte, f.opt.ChunkSize)
}
// fs.Debugf(f, "Getting upload block %p", buf)
return buf
@@ -393,7 +503,7 @@ func (f *Fs) getUploadBlock() []byte {
// putUploadBlock returns a block to the pool of size chunkSize
func (f *Fs) putUploadBlock(buf []byte) {
buf = buf[:cap(buf)]
if len(buf) != int(chunkSize) {
if len(buf) != int(f.opt.ChunkSize) {
panic("bad blocksize returned to pool")
}
// fs.Debugf(f, "Returning upload block %p", buf)
@@ -563,7 +673,7 @@ func (f *Fs) markBucketOK() {
// listDir lists a single directory
func (f *Fs) listDir(dir string) (entries fs.DirEntries, err error) {
last := ""
err = f.list(dir, false, "", 0, *b2Versions, func(remote string, object *api.File, isDirectory bool) error {
err = f.list(dir, false, "", 0, f.opt.Versions, func(remote string, object *api.File, isDirectory bool) error {
entry, err := f.itemToDirEntry(remote, object, isDirectory, &last)
if err != nil {
return err
@@ -635,7 +745,7 @@ func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
}
list := walk.NewListRHelper(callback)
last := ""
err = f.list(dir, true, "", 0, *b2Versions, func(remote string, object *api.File, isDirectory bool) error {
err = f.list(dir, true, "", 0, f.opt.Versions, func(remote string, object *api.File, isDirectory bool) error {
entry, err := f.itemToDirEntry(remote, object, isDirectory, &last)
if err != nil {
return err
@@ -655,7 +765,11 @@ type listBucketFn func(*api.Bucket) error
// listBucketsToFn lists the buckets to the function supplied
func (f *Fs) listBucketsToFn(fn listBucketFn) error {
var account = api.Account{ID: f.info.AccountID}
var account = api.ListBucketsRequest{
AccountID: f.info.AccountID,
BucketID: f.info.Allowed.BucketID,
}
var response api.ListBucketsResponse
opts := rest.Opts{
Method: "POST",
@@ -879,6 +993,12 @@ func (f *Fs) purge(oldOnly bool) error {
errReturn = err
}
}
var isUnfinishedUploadStale = func(timestamp api.Timestamp) bool {
if time.Since(time.Time(timestamp)).Hours() > 24 {
return true
}
return false
}
// Delete Config.Transfers in parallel
toBeDeleted := make(chan *api.File, fs.Config.Transfers)
@@ -902,6 +1022,9 @@ func (f *Fs) purge(oldOnly bool) error {
if object.Action == "hide" {
fs.Debugf(remote, "Deleting current version (id %q) as it is a hide marker", object.ID)
toBeDeleted <- object
} else if object.Action == "start" && isUnfinishedUploadStale(object.UploadTimestamp) {
fs.Debugf(remote, "Deleting current version (id %q) as it is a start marker (upload started at %s)", object.ID, time.Time(object.UploadTimestamp).Local())
toBeDeleted <- object
} else {
fs.Debugf(remote, "Not deleting current version (id %q) %q", object.ID, object.Action)
}
@@ -1035,12 +1158,12 @@ func (o *Object) readMetaData() (err error) {
maxSearched := 1
var timestamp api.Timestamp
baseRemote := o.remote
if *b2Versions {
if o.fs.opt.Versions {
timestamp, baseRemote = api.RemoveVersion(baseRemote)
maxSearched = maxVersions
}
var info *api.File
err = o.fs.list("", true, baseRemote, maxSearched, *b2Versions, func(remote string, object *api.File, isDirectory bool) error {
err = o.fs.list("", true, baseRemote, maxSearched, o.fs.opt.Versions, func(remote string, object *api.File, isDirectory bool) error {
if isDirectory {
return nil
}
@@ -1254,7 +1377,7 @@ func urlEncode(in string) string {
//
// The new object may have been created if an error is returned
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
if *b2Versions {
if o.fs.opt.Versions {
return errNotWithVersions
}
err = o.fs.Mkdir("")
@@ -1289,7 +1412,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
} else {
return err
}
} else if size > int64(uploadCutoff) {
} else if size > int64(o.fs.opt.UploadCutoff) {
up, err := o.fs.newLargeUpload(o, in, src)
if err != nil {
return err
@@ -1383,11 +1506,6 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
},
ContentLength: &size,
}
// for go1.8 (see release notes) we must nil the Body if we want a
// "Content-Length: 0" header which b2 requires for all files.
if size == 0 {
opts.Body = nil
}
var response api.FileInfo
// Don't retry, return a retry error instead
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
@@ -1408,10 +1526,10 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
// Remove an object
func (o *Object) Remove() error {
if *b2Versions {
if o.fs.opt.Versions {
return errNotWithVersions
}
if *b2HardDelete {
if o.fs.opt.HardDelete {
return o.fs.deleteByID(o.id, o.fs.root+o.remote)
}
return o.fs.hide(o.fs.root + o.remote)
@@ -1422,6 +1540,11 @@ func (o *Object) MimeType() string {
return o.mimeType
}
// ID returns the ID of the Object if known, or "" if not
func (o *Object) ID() string {
return o.id
}
// Check the interfaces are satisfied
var (
_ fs.Fs = &Fs{}
@@ -1431,4 +1554,5 @@ var (
_ fs.ListRer = &Fs{}
_ fs.Object = &Object{}
_ fs.MimeTyper = &Object{}
_ fs.IDer = &Object{}
)

View File

@@ -1,10 +1,10 @@
// Test B2 filesystem interface
package b2_test
package b2
import (
"testing"
"github.com/ncw/rclone/backend/b2"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fstest/fstests"
)
@@ -12,6 +12,23 @@ import (
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestB2:",
NilObject: (*b2.Object)(nil),
NilObject: (*Object)(nil),
ChunkedUpload: fstests.ChunkedUploadConfig{
MinChunkSize: minChunkSize,
NeedMultipleChunks: true,
},
})
}
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setUploadChunkSize(cs)
}
func (f *Fs) SetUploadCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setUploadCutoff(cs)
}
var (
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
_ fstests.SetUploadCutoffer = (*Fs)(nil)
)

View File

@@ -86,10 +86,10 @@ func (f *Fs) newLargeUpload(o *Object, in io.Reader, src fs.ObjectInfo) (up *lar
parts := int64(0)
sha1SliceSize := int64(maxParts)
if size == -1 {
fs.Debugf(o, "Streaming upload with --b2-chunk-size %s allows uploads of up to %s and will fail only when that limit is reached.", fs.SizeSuffix(chunkSize), fs.SizeSuffix(maxParts*chunkSize))
fs.Debugf(o, "Streaming upload with --b2-chunk-size %s allows uploads of up to %s and will fail only when that limit is reached.", f.opt.ChunkSize, maxParts*f.opt.ChunkSize)
} else {
parts = size / int64(chunkSize)
if size%int64(chunkSize) != 0 {
parts = size / int64(o.fs.opt.ChunkSize)
if size%int64(o.fs.opt.ChunkSize) != 0 {
parts++
}
if parts > maxParts {
@@ -116,8 +116,10 @@ func (f *Fs) newLargeUpload(o *Object, in io.Reader, src fs.ObjectInfo) (up *lar
},
}
// Set the SHA1 if known
if calculatedSha1, err := src.Hash(hash.SHA1); err == nil && calculatedSha1 != "" {
request.Info[sha1Key] = calculatedSha1
if !o.fs.opt.DisableCheckSum {
if calculatedSha1, err := src.Hash(hash.SHA1); err == nil && calculatedSha1 != "" {
request.Info[sha1Key] = calculatedSha1
}
}
var response api.StartLargeFileResponse
err = f.pacer.Call(func() (bool, error) {
@@ -357,7 +359,8 @@ outer:
buf := up.f.getUploadBlock()
// Read the chunk
n, err := io.ReadFull(up.in, buf)
var n int
n, err = io.ReadFull(up.in, buf)
if err == io.ErrUnexpectedEOF {
fs.Debugf(up.o, "Read less than a full chunk, making this the last one.")
buf = buf[:n]
@@ -366,7 +369,6 @@ outer:
} else if err == io.EOF {
fs.Debugf(up.o, "Could not read any more bytes, previous chunk was the last.")
up.f.putUploadBlock(buf)
hasMoreParts = false
err = nil
break outer
} else if err != nil {
@@ -409,8 +411,8 @@ outer:
}
reqSize := remaining
if reqSize >= int64(chunkSize) {
reqSize = int64(chunkSize)
if reqSize >= int64(up.f.opt.ChunkSize) {
reqSize = int64(up.f.opt.ChunkSize)
}
// Get a block of memory

View File

@@ -61,7 +61,7 @@ func (e *Error) Error() string {
var _ error = (*Error)(nil)
// ItemFields are the fields needed for FileInfo
var ItemFields = "type,id,sequence_id,etag,sha1,name,size,created_at,modified_at,content_created_at,content_modified_at,item_status"
var ItemFields = "type,id,sequence_id,etag,sha1,name,size,created_at,modified_at,content_created_at,content_modified_at,item_status,shared_link"
// Types of things in Item
const (
@@ -86,6 +86,10 @@ type Item struct {
ContentCreatedAt Time `json:"content_created_at"`
ContentModifiedAt Time `json:"content_modified_at"`
ItemStatus string `json:"item_status"` // active, trashed if the file has been moved to the trash, and deleted if the file has been permanently deleted
SharedLink struct {
URL string `json:"url,omitempty"`
Access string `json:"access,omitempty"`
} `json:"shared_link"`
}
// ModTime returns the modification time of the item
@@ -145,6 +149,14 @@ type CopyFile struct {
Parent Parent `json:"parent"`
}
// CreateSharedLink is the request for Public Link
type CreateSharedLink struct {
SharedLink struct {
URL string `json:"url,omitempty"`
Access string `json:"access,omitempty"`
} `json:"shared_link"`
}
// UploadSessionRequest is uses in Create Upload Session
type UploadSessionRequest struct {
FolderID string `json:"folder_id,omitempty"` // don't pass for update
@@ -172,8 +184,8 @@ type UploadSessionResponse struct {
// Part defines the return from upload part call which are passed to commit upload also
type Part struct {
PartID string `json:"part_id"`
Offset int `json:"offset"`
Size int `json:"size"`
Offset int64 `json:"offset"`
Size int64 `json:"size"`
Sha1 string `json:"sha1"`
}

View File

@@ -16,7 +16,6 @@ import (
"net/http"
"net/url"
"path"
"regexp"
"strconv"
"strings"
"time"
@@ -24,7 +23,8 @@ import (
"github.com/ncw/rclone/backend/box/api"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/config/flags"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/config/obscure"
"github.com/ncw/rclone/fs/fserrors"
"github.com/ncw/rclone/fs/hash"
@@ -47,6 +47,7 @@ const (
uploadURL = "https://upload.box.com/api/2.0"
listChunks = 1000 // chunk size to read directory listings
minUploadCutoff = 50000000 // upload cutoff can be no lower than this
defaultUploadCutoff = 50 * 1024 * 1024
)
// Globals
@@ -62,7 +63,6 @@ var (
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.RedirectURL,
}
uploadCutoff = fs.SizeSuffix(50 * 1024 * 1024)
)
// Register with Fs
@@ -71,27 +71,43 @@ func init() {
Name: "box",
Description: "Box",
NewFs: NewFs,
Config: func(name string) {
err := oauthutil.Config("box", name, oauthConfig)
Config: func(name string, m configmap.Mapper) {
err := oauthutil.Config("box", name, m, oauthConfig)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
},
Options: []fs.Option{{
Name: config.ConfigClientID,
Help: "Box App Client Id - leave blank normally.",
Help: "Box App Client Id.\nLeave blank normally.",
}, {
Name: config.ConfigClientSecret,
Help: "Box App Client Secret - leave blank normally.",
Help: "Box App Client Secret\nLeave blank normally.",
}, {
Name: "upload_cutoff",
Help: "Cutoff for switching to multipart upload (>= 50MB).",
Default: fs.SizeSuffix(defaultUploadCutoff),
Advanced: true,
}, {
Name: "commit_retries",
Help: "Max number of times to try committing a multipart file.",
Default: 100,
Advanced: true,
}},
})
flags.VarP(&uploadCutoff, "box-upload-cutoff", "", "Cutoff for switching to multipart upload")
}
// Options defines the configuration for this backend
type Options struct {
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
CommitRetries int `config:"commit_retries"`
}
// Fs represents a remote box
type Fs struct {
name string // name of this remote
root string // the path we are working on
opt Options // parsed options
features *fs.Features // optional features
srv *rest.Client // the connection to the one drive server
dirCache *dircache.DirCache // Map of directory path to directory id
@@ -110,6 +126,7 @@ type Object struct {
size int64 // size of the object
modTime time.Time // modification time of the object
id string // ID of the object
publicLink string // Public Link for the object
sha1 string // SHA-1 of the object content
}
@@ -135,9 +152,6 @@ func (f *Fs) Features() *fs.Features {
return f.features
}
// Pattern to match a box path
var matcher = regexp.MustCompile(`^([^/]*)(.*)$`)
// parsePath parses an box 'url'
func parsePath(path string) (root string) {
root = strings.Trim(path, "/")
@@ -223,20 +237,28 @@ func errorHandler(resp *http.Response) error {
}
// NewFs constructs an Fs from the path, container:path
func NewFs(name, root string) (fs.Fs, error) {
if uploadCutoff < minUploadCutoff {
return nil, errors.Errorf("box: upload cutoff (%v) must be greater than equal to %v", uploadCutoff, fs.SizeSuffix(minUploadCutoff))
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
if opt.UploadCutoff < minUploadCutoff {
return nil, errors.Errorf("box: upload cutoff (%v) must be greater than equal to %v", opt.UploadCutoff, fs.SizeSuffix(minUploadCutoff))
}
root = parsePath(root)
oAuthClient, ts, err := oauthutil.NewClient(name, oauthConfig)
oAuthClient, ts, err := oauthutil.NewClient(name, m, oauthConfig)
if err != nil {
log.Fatalf("Failed to configure Box: %v", err)
return nil, errors.Wrap(err, "failed to configure Box")
}
f := &Fs{
name: name,
root: root,
opt: *opt,
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
uploadToken: pacer.NewTokenDispenser(fs.Config.Transfers),
@@ -261,16 +283,16 @@ func NewFs(name, root string) (fs.Fs, error) {
if err != nil {
// Assume it is a file
newRoot, remote := dircache.SplitPath(root)
newF := *f
newF.dirCache = dircache.New(newRoot, rootID, &newF)
newF.root = newRoot
tempF := *f
tempF.dirCache = dircache.New(newRoot, rootID, &tempF)
tempF.root = newRoot
// Make new Fs which is the parent
err = newF.dirCache.FindRoot(false)
err = tempF.dirCache.FindRoot(false)
if err != nil {
// No root so return old f
return f, nil
}
_, err := newF.newObjectWithInfo(remote, nil)
_, err := tempF.newObjectWithInfo(remote, nil)
if err != nil {
if err == fs.ErrorObjectNotFound {
// File doesn't exist so return old f
@@ -278,8 +300,14 @@ func NewFs(name, root string) (fs.Fs, error) {
}
return nil, err
}
f.features.Fill(&tempF)
// XXX: update the old f here instead of returning tempF, since
// `features` were already filled with functions having *f as a receiver.
// See https://github.com/ncw/rclone/issues/2182
f.dirCache = tempF.dirCache
f.root = tempF.root
// return an error with an fs which points to the parent
return &newF, fs.ErrorIsFile
return f, fs.ErrorIsFile
}
return f, nil
}
@@ -653,7 +681,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
Parameters: fieldsValue(),
}
replacedLeaf := replaceReservedChars(leaf)
copy := api.CopyFile{
copyFile := api.CopyFile{
Name: replacedLeaf,
Parent: api.Parent{
ID: directoryID,
@@ -662,7 +690,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
var resp *http.Response
var info *api.Item
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(&opts, &copy, &info)
resp, err = f.srv.CallJSON(&opts, &copyFile, &info)
return shouldRetry(resp, err)
})
if err != nil {
@@ -823,6 +851,46 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
return nil
}
// PublicLink adds a "readable by anyone with link" permission on the given file or folder.
func (f *Fs) PublicLink(remote string) (string, error) {
id, err := f.dirCache.FindDir(remote, false)
var opts rest.Opts
if err == nil {
fs.Debugf(f, "attempting to share directory '%s'", remote)
opts = rest.Opts{
Method: "PUT",
Path: "/folders/" + id,
Parameters: fieldsValue(),
}
} else {
fs.Debugf(f, "attempting to share single file '%s'", remote)
o, err := f.NewObject(remote)
if err != nil {
return "", err
}
if o.(*Object).publicLink != "" {
return o.(*Object).publicLink, nil
}
opts = rest.Opts{
Method: "PUT",
Path: "/files/" + o.(*Object).id,
Parameters: fieldsValue(),
}
}
shareLink := api.CreateSharedLink{}
var info api.Item
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(&opts, &shareLink, &info)
return shouldRetry(resp, err)
})
return info.SharedLink.URL, err
}
// DirCacheFlush resets the directory cache - used in testing as an
// optional interface
func (f *Fs) DirCacheFlush() {
@@ -887,6 +955,7 @@ func (o *Object) setMetaData(info *api.Item) (err error) {
o.sha1 = info.SHA1
o.modTime = info.ModTime()
o.id = info.ID
o.publicLink = info.SharedLink.URL
return nil
}
@@ -993,8 +1062,8 @@ func (o *Object) upload(in io.Reader, leaf, directoryID string, modTime time.Tim
var resp *http.Response
var result api.FolderItems
opts := rest.Opts{
Method: "POST",
Body: in,
Method: "POST",
Body: in,
MultipartMetadataName: "attributes",
MultipartContentName: "contents",
MultipartFileName: upload.Name,
@@ -1039,7 +1108,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
}
// Upload with simple or multipart
if size <= int64(uploadCutoff) {
if size <= int64(o.fs.opt.UploadCutoff) {
err = o.upload(in, leaf, directoryID, modTime)
} else {
err = o.uploadMultipart(in, leaf, directoryID, size, modTime)
@@ -1052,6 +1121,11 @@ func (o *Object) Remove() error {
return o.fs.deleteObject(o.id)
}
// ID returns the ID of the Object if known, or "" if not
func (o *Object) ID() string {
return o.id
}
// Check the interfaces are satisfied
var (
_ fs.Fs = (*Fs)(nil)
@@ -1061,5 +1135,7 @@ var (
_ fs.Mover = (*Fs)(nil)
_ fs.DirMover = (*Fs)(nil)
_ fs.DirCacheFlusher = (*Fs)(nil)
_ fs.PublicLinker = (*Fs)(nil)
_ fs.Object = (*Object)(nil)
_ fs.IDer = (*Object)(nil)
)

View File

@@ -96,7 +96,9 @@ func (o *Object) commitUpload(SessionID string, parts []api.Part, modTime time.T
request.Attributes.ContentCreatedAt = api.Time(modTime)
var body []byte
var resp *http.Response
maxTries := fs.Config.LowLevelRetries
// For discussion of this value see:
// https://github.com/ncw/rclone/issues/2054
maxTries := o.fs.opt.CommitRetries
const defaultDelay = 10
var tries int
outer:

822
backend/cache/cache.go vendored

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -23,7 +23,7 @@ func (r *run) mountFs(t *testing.T, f fs.Fs) {
fuse.FSName(device), fuse.VolumeName(device),
fuse.NoAppleDouble(),
fuse.NoAppleXattr(),
fuse.AllowOther(),
//fuse.AllowOther(),
}
err := os.MkdirAll(r.mntDir, os.ModePerm)
require.NoError(t, err)

454
backend/cache/cache_upload_test.go vendored Normal file
View File

@@ -0,0 +1,454 @@
// +build !plan9
package cache_test
import (
"fmt"
"math/rand"
"os"
"path"
"strconv"
"testing"
"time"
"github.com/ncw/rclone/backend/cache"
_ "github.com/ncw/rclone/backend/drive"
"github.com/ncw/rclone/fs"
"github.com/stretchr/testify/require"
)
func TestInternalUploadTempDirCreated(t *testing.T) {
id := fmt.Sprintf("tiutdc%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true,
nil,
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id)})
defer runInstance.cleanupFs(t, rootFs, boltDb)
_, err := os.Stat(path.Join(runInstance.tmpUploadDir, id))
require.NoError(t, err)
}
func testInternalUploadQueueOneFile(t *testing.T, id string, rootFs fs.Fs, boltDb *cache.Persistent) {
// create some rand test data
testSize := int64(524288000)
testReader := runInstance.randomReader(t, testSize)
bu := runInstance.listenForBackgroundUpload(t, rootFs, "one")
runInstance.writeRemoteReader(t, rootFs, "one", testReader)
// validate that it exists in temp fs
ti, err := os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "one")))
require.NoError(t, err)
if runInstance.rootIsCrypt {
require.Equal(t, int64(524416032), ti.Size())
} else {
require.Equal(t, testSize, ti.Size())
}
de1, err := runInstance.list(t, rootFs, "")
require.NoError(t, err)
require.Len(t, de1, 1)
runInstance.completeBackgroundUpload(t, "one", bu)
// check if it was removed from temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "one")))
require.True(t, os.IsNotExist(err))
// check if it can be read
data2, err := runInstance.readDataFromRemote(t, rootFs, "one", 0, int64(1024), false)
require.NoError(t, err)
require.Len(t, data2, 1024)
}
func TestInternalUploadQueueOneFileNoRest(t *testing.T) {
id := fmt.Sprintf("tiuqofnr%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "0s"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
testInternalUploadQueueOneFile(t, id, rootFs, boltDb)
}
func TestInternalUploadQueueOneFileWithRest(t *testing.T) {
id := fmt.Sprintf("tiuqofwr%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1m"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
testInternalUploadQueueOneFile(t, id, rootFs, boltDb)
}
func TestInternalUploadMoveExistingFile(t *testing.T) {
id := fmt.Sprintf("tiumef%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "3s"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
err := rootFs.Mkdir("one")
require.NoError(t, err)
err = rootFs.Mkdir("one/test")
require.NoError(t, err)
err = rootFs.Mkdir("second")
require.NoError(t, err)
// create some rand test data
testSize := int64(10485760)
testReader := runInstance.randomReader(t, testSize)
runInstance.writeObjectReader(t, rootFs, "one/test/data.bin", testReader)
runInstance.completeAllBackgroundUploads(t, rootFs, "one/test/data.bin")
de1, err := runInstance.list(t, rootFs, "one/test")
require.NoError(t, err)
require.Len(t, de1, 1)
time.Sleep(time.Second * 5)
//_ = os.Remove(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "one/test")))
//require.NoError(t, err)
err = runInstance.dirMove(t, rootFs, "one/test", "second/test")
require.NoError(t, err)
// check if it can be read
de1, err = runInstance.list(t, rootFs, "second/test")
require.NoError(t, err)
require.Len(t, de1, 1)
}
func TestInternalUploadTempPathCleaned(t *testing.T) {
id := fmt.Sprintf("tiutpc%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "5s"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
err := rootFs.Mkdir("one")
require.NoError(t, err)
err = rootFs.Mkdir("one/test")
require.NoError(t, err)
err = rootFs.Mkdir("second")
require.NoError(t, err)
// create some rand test data
testSize := int64(1048576)
testReader := runInstance.randomReader(t, testSize)
testReader2 := runInstance.randomReader(t, testSize)
runInstance.writeObjectReader(t, rootFs, "one/test/data.bin", testReader)
runInstance.writeObjectReader(t, rootFs, "second/data.bin", testReader2)
runInstance.completeAllBackgroundUploads(t, rootFs, "one/test/data.bin")
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "one/test")))
require.True(t, os.IsNotExist(err))
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "one")))
require.True(t, os.IsNotExist(err))
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "second")))
require.False(t, os.IsNotExist(err))
runInstance.completeAllBackgroundUploads(t, rootFs, "second/data.bin")
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "second/data.bin")))
require.True(t, os.IsNotExist(err))
de1, err := runInstance.list(t, rootFs, "one/test")
require.NoError(t, err)
require.Len(t, de1, 1)
// check if it can be read
de1, err = runInstance.list(t, rootFs, "second")
require.NoError(t, err)
require.Len(t, de1, 1)
}
func TestInternalUploadQueueMoreFiles(t *testing.T) {
id := fmt.Sprintf("tiuqmf%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1s"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
err := rootFs.Mkdir("test")
require.NoError(t, err)
minSize := 5242880
maxSize := 10485760
totalFiles := 10
rand.Seed(time.Now().Unix())
lastFile := ""
for i := 0; i < totalFiles; i++ {
size := int64(rand.Intn(maxSize-minSize) + minSize)
testReader := runInstance.randomReader(t, size)
remote := "test/" + strconv.Itoa(i) + ".bin"
runInstance.writeRemoteReader(t, rootFs, remote, testReader)
// validate that it exists in temp fs
ti, err := os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, remote)))
require.NoError(t, err)
require.Equal(t, size, runInstance.cleanSize(t, ti.Size()))
if runInstance.wrappedIsExternal && i < totalFiles-1 {
time.Sleep(time.Second * 3)
}
lastFile = remote
}
// check if cache lists all files, likely temp upload didn't finish yet
de1, err := runInstance.list(t, rootFs, "test")
require.NoError(t, err)
require.Len(t, de1, totalFiles)
// wait for background uploader to do its thing
runInstance.completeAllBackgroundUploads(t, rootFs, lastFile)
// retry until we have no more temp files and fail if they don't go down to 0
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test")))
require.True(t, os.IsNotExist(err))
// check if cache lists all files
de1, err = runInstance.list(t, rootFs, "test")
require.NoError(t, err)
require.Len(t, de1, totalFiles)
}
func TestInternalUploadTempFileOperations(t *testing.T) {
id := "tiutfo"
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1h"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
boltDb.PurgeTempUploads()
// create some rand test data
runInstance.mkdir(t, rootFs, "test")
runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
// check if it can be read
data1, err := runInstance.readDataFromRemote(t, rootFs, "test/one", 0, int64(len([]byte("one content"))), false)
require.NoError(t, err)
require.Equal(t, []byte("one content"), data1)
// validate that it exists in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
require.NoError(t, err)
// test DirMove - allowed
err = runInstance.dirMove(t, rootFs, "test", "second")
if err != errNotSupported {
require.NoError(t, err)
_, err = rootFs.NewObject("test/one")
require.Error(t, err)
_, err = rootFs.NewObject("second/one")
require.NoError(t, err)
// validate that it exists in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
require.Error(t, err)
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "second/one")))
require.NoError(t, err)
_, err = boltDb.SearchPendingUpload(runInstance.encryptRemoteIfNeeded(t, path.Join(id, "test/one")))
require.Error(t, err)
var started bool
started, err = boltDb.SearchPendingUpload(runInstance.encryptRemoteIfNeeded(t, path.Join(id, "second/one")))
require.NoError(t, err)
require.False(t, started)
runInstance.mkdir(t, rootFs, "test")
runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
}
// test Rmdir - allowed
err = runInstance.rm(t, rootFs, "test")
require.Error(t, err)
require.Contains(t, err.Error(), "directory not empty")
_, err = rootFs.NewObject("test/one")
require.NoError(t, err)
// validate that it exists in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
require.NoError(t, err)
started, err := boltDb.SearchPendingUpload(runInstance.encryptRemoteIfNeeded(t, path.Join(id, "test/one")))
require.False(t, started)
require.NoError(t, err)
// test Move/Rename -- allowed
err = runInstance.move(t, rootFs, path.Join("test", "one"), path.Join("test", "second"))
if err != errNotSupported {
require.NoError(t, err)
// try to read from it
_, err = rootFs.NewObject("test/one")
require.Error(t, err)
_, err = rootFs.NewObject("test/second")
require.NoError(t, err)
data2, err := runInstance.readDataFromRemote(t, rootFs, "test/second", 0, int64(len([]byte("one content"))), false)
require.NoError(t, err)
require.Equal(t, []byte("one content"), data2)
// validate that it exists in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
require.Error(t, err)
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/second")))
require.NoError(t, err)
runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
}
// test Copy -- allowed
err = runInstance.copy(t, rootFs, path.Join("test", "one"), path.Join("test", "third"))
if err != errNotSupported {
require.NoError(t, err)
_, err = rootFs.NewObject("test/one")
require.NoError(t, err)
_, err = rootFs.NewObject("test/third")
require.NoError(t, err)
data2, err := runInstance.readDataFromRemote(t, rootFs, "test/third", 0, int64(len([]byte("one content"))), false)
require.NoError(t, err)
require.Equal(t, []byte("one content"), data2)
// validate that it exists in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
require.NoError(t, err)
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/third")))
require.NoError(t, err)
}
// test Remove -- allowed
err = runInstance.rm(t, rootFs, "test/one")
require.NoError(t, err)
_, err = rootFs.NewObject("test/one")
require.Error(t, err)
// validate that it doesn't exist in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
require.Error(t, err)
runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
// test Update -- allowed
firstModTime, err := runInstance.modTime(t, rootFs, "test/one")
require.NoError(t, err)
err = runInstance.updateData(t, rootFs, "test/one", "one content", " updated")
require.NoError(t, err)
obj2, err := rootFs.NewObject("test/one")
require.NoError(t, err)
data2 := runInstance.readDataFromObj(t, obj2, 0, int64(len("one content updated")), false)
require.Equal(t, "one content updated", string(data2))
tmpInfo, err := os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
require.NoError(t, err)
if runInstance.rootIsCrypt {
require.Equal(t, int64(67), tmpInfo.Size())
} else {
require.Equal(t, int64(len(data2)), tmpInfo.Size())
}
// test SetModTime -- allowed
secondModTime, err := runInstance.modTime(t, rootFs, "test/one")
require.NoError(t, err)
require.NotEqual(t, secondModTime, firstModTime)
require.NotEqual(t, time.Time{}, firstModTime)
require.NotEqual(t, time.Time{}, secondModTime)
}
func TestInternalUploadUploadingFileOperations(t *testing.T) {
id := "tiuufo"
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1h"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
boltDb.PurgeTempUploads()
// create some rand test data
runInstance.mkdir(t, rootFs, "test")
runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
// check if it can be read
data1, err := runInstance.readDataFromRemote(t, rootFs, "test/one", 0, int64(len([]byte("one content"))), false)
require.NoError(t, err)
require.Equal(t, []byte("one content"), data1)
// validate that it exists in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
require.NoError(t, err)
err = boltDb.SetPendingUploadToStarted(runInstance.encryptRemoteIfNeeded(t, path.Join(rootFs.Root(), "test/one")))
require.NoError(t, err)
// test DirMove
err = runInstance.dirMove(t, rootFs, "test", "second")
if err != errNotSupported {
require.Error(t, err)
_, err = rootFs.NewObject("test/one")
require.NoError(t, err)
// validate that it exists in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
require.NoError(t, err)
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "second/one")))
require.Error(t, err)
}
// test Rmdir
err = runInstance.rm(t, rootFs, "test")
require.Error(t, err)
_, err = rootFs.NewObject("test/one")
require.NoError(t, err)
// validate that it doesn't exist in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
require.NoError(t, err)
// test Move/Rename
err = runInstance.move(t, rootFs, path.Join("test", "one"), path.Join("test", "second"))
if err != errNotSupported {
require.Error(t, err)
// try to read from it
_, err = rootFs.NewObject("test/one")
require.NoError(t, err)
_, err = rootFs.NewObject("test/second")
require.Error(t, err)
// validate that it exists in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
require.NoError(t, err)
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/second")))
require.Error(t, err)
}
// test Copy -- allowed
err = runInstance.copy(t, rootFs, path.Join("test", "one"), path.Join("test", "third"))
if err != errNotSupported {
require.NoError(t, err)
_, err = rootFs.NewObject("test/one")
require.NoError(t, err)
_, err = rootFs.NewObject("test/third")
require.NoError(t, err)
data2, err := runInstance.readDataFromRemote(t, rootFs, "test/third", 0, int64(len([]byte("one content"))), false)
require.NoError(t, err)
require.Equal(t, []byte("one content"), data2)
// validate that it exists in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
require.NoError(t, err)
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/third")))
require.NoError(t, err)
}
// test Remove
err = runInstance.rm(t, rootFs, "test/one")
require.Error(t, err)
_, err = rootFs.NewObject("test/one")
require.NoError(t, err)
// validate that it doesn't exist in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
require.NoError(t, err)
runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
// test Update - this seems to work. Why? FIXME
//firstModTime, err := runInstance.modTime(t, rootFs, "test/one")
//require.NoError(t, err)
//err = runInstance.updateData(t, rootFs, "test/one", "one content", " updated", func() {
// data2 := runInstance.readDataFromRemote(t, rootFs, "test/one", 0, int64(len("one content updated")), true)
// require.Equal(t, "one content", string(data2))
//
// tmpInfo, err := os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
// require.NoError(t, err)
// if runInstance.rootIsCrypt {
// require.Equal(t, int64(67), tmpInfo.Size())
// } else {
// require.Equal(t, int64(len(data2)), tmpInfo.Size())
// }
//})
//require.Error(t, err)
// test SetModTime -- seems to work cause of previous
//secondModTime, err := runInstance.modTime(t, rootFs, "test/one")
//require.NoError(t, err)
//require.Equal(t, secondModTime, firstModTime)
//require.NotEqual(t, time.Time{}, firstModTime)
//require.NotEqual(t, time.Time{}, secondModTime)
}

View File

@@ -3,16 +3,15 @@
package cache
import (
"time"
"path"
"time"
"github.com/ncw/rclone/fs"
)
// Directory is a generic dir that stores basic information about it
type Directory struct {
fs.Directory `json:"-"`
Directory fs.Directory `json:"-"` // can be nil
CacheFs *Fs `json:"-"` // cache fs
Name string `json:"name"` // name of the directory
@@ -125,6 +124,14 @@ func (d *Directory) Items() int64 {
return d.CacheItems
}
// ID returns the ID of the cached directory if known
func (d *Directory) ID() string {
if d.Directory == nil {
return ""
}
return d.Directory.ID()
}
var (
_ fs.Directory = (*Directory)(nil)
)

View File

@@ -5,12 +5,11 @@ package cache
import (
"fmt"
"io"
"sync"
"time"
"path"
"runtime"
"strings"
"sync"
"time"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/operations"
@@ -49,12 +48,13 @@ type Handle struct {
offset int64
seenOffsets map[int64]bool
mu sync.Mutex
workersWg sync.WaitGroup
confirmReading chan bool
UseMemory bool
workers []*worker
closed bool
reading bool
workers int
maxWorkerID int
UseMemory bool
closed bool
reading bool
}
// NewObjectHandle returns a new Handle for an existing Object
@@ -65,14 +65,14 @@ func NewObjectHandle(o *Object, cfs *Fs) *Handle {
offset: 0,
preloadOffset: -1, // -1 to trigger the first preload
UseMemory: cfs.chunkMemory,
UseMemory: !cfs.opt.ChunkNoMemory,
reading: false,
}
r.seenOffsets = make(map[int64]bool)
r.memory = NewMemory(-1)
// create a larger buffer to queue up requests
r.preloadQueue = make(chan int64, r.cfs.totalWorkers*10)
r.preloadQueue = make(chan int64, r.cfs.opt.TotalWorkers*10)
r.confirmReading = make(chan bool)
r.startReadWorkers()
return r
@@ -95,10 +95,10 @@ func (r *Handle) String() string {
// startReadWorkers will start the worker pool
func (r *Handle) startReadWorkers() {
if r.hasAtLeastOneWorker() {
if r.workers > 0 {
return
}
totalWorkers := r.cacheFs().totalWorkers
totalWorkers := r.cacheFs().opt.TotalWorkers
if r.cacheFs().plexConnector.isConfigured() {
if !r.cacheFs().plexConnector.isConnected() {
@@ -117,26 +117,27 @@ func (r *Handle) startReadWorkers() {
// scaleOutWorkers will increase the worker pool count by the provided amount
func (r *Handle) scaleWorkers(desired int) {
current := len(r.workers)
current := r.workers
if current == desired {
return
}
if current > desired {
// scale in gracefully
for i := 0; i < current-desired; i++ {
for r.workers > desired {
r.preloadQueue <- -1
r.workers--
}
} else {
// scale out
for i := 0; i < desired-current; i++ {
for r.workers < desired {
w := &worker{
r: r,
ch: r.preloadQueue,
id: current + i,
id: r.maxWorkerID,
}
r.workersWg.Add(1)
r.workers++
r.maxWorkerID++
go w.run()
r.workers = append(r.workers, w)
}
}
// ignore first scale out from 0
@@ -147,17 +148,16 @@ func (r *Handle) scaleWorkers(desired int) {
func (r *Handle) confirmExternalReading() {
// if we have a max value of workers
// or there's no external confirmation available
// then we skip this step
if len(r.workers) > 1 ||
!r.cacheFs().plexConnector.isConnected() {
if r.workers > 1 ||
!r.cacheFs().plexConnector.isConfigured() {
return
}
if !r.cacheFs().plexConnector.isPlaying(r.cachedObject) {
return
}
fs.Infof(r, "confirmed reading by external reader")
r.scaleWorkers(r.cacheFs().totalMaxWorkers)
r.scaleWorkers(r.cacheFs().opt.TotalWorkers)
}
// queueOffset will send an offset to the workers if it's different from the last one
@@ -179,8 +179,8 @@ func (r *Handle) queueOffset(offset int64) {
}
}
for i := 0; i < len(r.workers); i++ {
o := r.preloadOffset + r.cacheFs().chunkSize*int64(i)
for i := 0; i < r.workers; i++ {
o := r.preloadOffset + int64(r.cacheFs().opt.ChunkSize)*int64(i)
if o < 0 || o >= r.cachedObject.Size() {
continue
}
@@ -194,16 +194,6 @@ func (r *Handle) queueOffset(offset int64) {
}
}
func (r *Handle) hasAtLeastOneWorker() bool {
oneWorker := false
for i := 0; i < len(r.workers); i++ {
if r.workers[i].isRunning() {
oneWorker = true
}
}
return oneWorker
}
// getChunk is called by the FS to retrieve a specific chunk of known start and size from where it can find it
// it can be from transient or persistent cache
// it will also build the chunk from the cache's specific chunk boundaries and build the final desired chunk in a buffer
@@ -212,7 +202,7 @@ func (r *Handle) getChunk(chunkStart int64) ([]byte, error) {
var err error
// we calculate the modulus of the requested offset with the size of a chunk
offset := chunkStart % r.cacheFs().chunkSize
offset := chunkStart % int64(r.cacheFs().opt.ChunkSize)
// we align the start offset of the first chunk to a likely chunk in the storage
chunkStart = chunkStart - offset
@@ -229,7 +219,7 @@ func (r *Handle) getChunk(chunkStart int64) ([]byte, error) {
if !found {
// we're gonna give the workers a chance to pickup the chunk
// and retry a couple of times
for i := 0; i < r.cacheFs().readRetries*8; i++ {
for i := 0; i < r.cacheFs().opt.ReadRetries*8; i++ {
data, err = r.storage().GetChunk(r.cachedObject, chunkStart)
if err == nil {
found = true
@@ -244,7 +234,7 @@ func (r *Handle) getChunk(chunkStart int64) ([]byte, error) {
// not found in ram or
// the worker didn't managed to download the chunk in time so we abort and close the stream
if err != nil || len(data) == 0 || !found {
if !r.hasAtLeastOneWorker() {
if r.workers == 0 {
fs.Errorf(r, "out of workers")
return nil, io.ErrUnexpectedEOF
}
@@ -256,7 +246,7 @@ func (r *Handle) getChunk(chunkStart int64) ([]byte, error) {
if offset > 0 {
if offset > int64(len(data)) {
fs.Errorf(r, "unexpected conditions during reading. current position: %v, current chunk position: %v, current chunk size: %v, offset: %v, chunk size: %v, file size: %v",
r.offset, chunkStart, len(data), offset, r.cacheFs().chunkSize, r.cachedObject.Size())
r.offset, chunkStart, len(data), offset, r.cacheFs().opt.ChunkSize, r.cachedObject.Size())
return nil, io.ErrUnexpectedEOF
}
data = data[int(offset):]
@@ -305,14 +295,7 @@ func (r *Handle) Close() error {
close(r.preloadQueue)
r.closed = true
// wait for workers to complete their jobs before returning
waitCount := 3
for i := 0; i < len(r.workers); i++ {
waitIdx := 0
for r.workers[i].isRunning() && waitIdx < waitCount {
time.Sleep(time.Second)
waitIdx++
}
}
r.workersWg.Wait()
r.memory.db.Flush()
fs.Debugf(r, "cache reader closed %v", r.offset)
@@ -339,9 +322,9 @@ func (r *Handle) Seek(offset int64, whence int) (int64, error) {
err = errors.Errorf("cache: unimplemented seek whence %v", whence)
}
chunkStart := r.offset - (r.offset % r.cacheFs().chunkSize)
if chunkStart >= r.cacheFs().chunkSize {
chunkStart = chunkStart - r.cacheFs().chunkSize
chunkStart := r.offset - (r.offset % int64(r.cacheFs().opt.ChunkSize))
if chunkStart >= int64(r.cacheFs().opt.ChunkSize) {
chunkStart = chunkStart - int64(r.cacheFs().opt.ChunkSize)
}
r.queueOffset(chunkStart)
@@ -349,12 +332,9 @@ func (r *Handle) Seek(offset int64, whence int) (int64, error) {
}
type worker struct {
r *Handle
ch <-chan int64
rc io.ReadCloser
id int
running bool
mu sync.Mutex
r *Handle
rc io.ReadCloser
id int
}
// String is a representation of this worker
@@ -399,33 +379,19 @@ func (w *worker) reader(offset, end int64, closeOpen bool) (io.ReadCloser, error
})
}
func (w *worker) isRunning() bool {
w.mu.Lock()
defer w.mu.Unlock()
return w.running
}
func (w *worker) setRunning(f bool) {
w.mu.Lock()
defer w.mu.Unlock()
w.running = f
}
// run is the main loop for the worker which receives offsets to preload
func (w *worker) run() {
var err error
var data []byte
defer w.setRunning(false)
defer func() {
if w.rc != nil {
_ = w.rc.Close()
w.setRunning(false)
}
w.r.workersWg.Done()
}()
for {
chunkStart, open := <-w.ch
w.setRunning(true)
chunkStart, open := <-w.r.preloadQueue
if chunkStart < 0 || !open {
break
}
@@ -446,14 +412,13 @@ func (w *worker) run() {
continue
}
}
err = nil
} else {
if w.r.storage().HasChunk(w.r.cachedObject, chunkStart) {
continue
}
}
chunkEnd := chunkStart + w.r.cacheFs().chunkSize
chunkEnd := chunkStart + int64(w.r.cacheFs().opt.ChunkSize)
// TODO: Remove this comment if it proves to be reliable for #1896
//if chunkEnd > w.r.cachedObject.Size() {
// chunkEnd = w.r.cachedObject.Size()
@@ -468,7 +433,7 @@ func (w *worker) download(chunkStart, chunkEnd int64, retry int) {
var data []byte
// stop retries
if retry >= w.r.cacheFs().readRetries {
if retry >= w.r.cacheFs().opt.ReadRetries {
return
}
// back-off between retries
@@ -493,7 +458,7 @@ func (w *worker) download(chunkStart, chunkEnd int64, retry int) {
}
data = make([]byte, chunkEnd-chunkStart)
sourceRead := 0
var sourceRead int
sourceRead, err = io.ReadFull(w.rc, data)
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
fs.Errorf(w, "failed to read chunk %v: %v", chunkStart, err)
@@ -614,7 +579,7 @@ func (b *backgroundWriter) run() {
return
}
absPath, err := b.fs.cache.getPendingUpload(b.fs.Root(), b.fs.tempWriteWait)
absPath, err := b.fs.cache.getPendingUpload(b.fs.Root(), time.Duration(b.fs.opt.TempWaitTime))
if err != nil || absPath == "" || !b.fs.isRootInPath(absPath) {
time.Sleep(time.Second)
continue
@@ -630,6 +595,23 @@ func (b *backgroundWriter) run() {
fs.Errorf(remote, "background upload: %v", err)
continue
}
// clean empty dirs up to root
thisDir := cleanPath(path.Dir(remote))
for thisDir != "" {
thisList, err := b.fs.tempFs.List(thisDir)
if err != nil {
break
}
if len(thisList) > 0 {
break
}
err = b.fs.tempFs.Rmdir(thisDir)
fs.Debugf(thisDir, "cleaned from temp path")
if err != nil {
break
}
thisDir = cleanPath(path.Dir(thisDir))
}
fs.Infof(remote, "background upload: uploaded entry")
err = b.fs.cache.removePendingUpload(absPath)
if err != nil && !strings.Contains(err.Error(), "pending upload not found") {

View File

@@ -44,7 +44,7 @@ func NewObject(f *Fs, remote string) *Object {
cacheType := objectInCache
parentFs := f.UnWrap()
if f.tempWritePath != "" {
if f.opt.TempWritePath != "" {
_, err := f.cache.SearchPendingUpload(fullRemote)
if err == nil { // queued for upload
cacheType = objectPendingUpload
@@ -75,7 +75,7 @@ func ObjectFromOriginal(f *Fs, o fs.Object) *Object {
cacheType := objectInCache
parentFs := f.UnWrap()
if f.tempWritePath != "" {
if f.opt.TempWritePath != "" {
_, err := f.cache.SearchPendingUpload(fullRemote)
if err == nil { // queued for upload
cacheType = objectPendingUpload
@@ -153,7 +153,7 @@ func (o *Object) Storable() bool {
// 2. is not pending a notification from the wrapped fs
func (o *Object) refresh() error {
isNotified := o.CacheFs.isNotifiedRemote(o.Remote())
isExpired := time.Now().After(o.CacheTs.Add(o.CacheFs.fileAge))
isExpired := time.Now().After(o.CacheTs.Add(time.Duration(o.CacheFs.opt.InfoAge)))
if !isExpired && !isNotified {
return nil
}
@@ -208,11 +208,17 @@ func (o *Object) SetModTime(t time.Time) error {
// Open is used to request a specific part of the file using fs.RangeOption
func (o *Object) Open(options ...fs.OpenOption) (io.ReadCloser, error) {
if err := o.refreshFromSource(true); err != nil {
var err error
if o.Object == nil {
err = o.refreshFromSource(true)
} else {
err = o.refresh()
}
if err != nil {
return nil, err
}
var err error
cacheReader := NewObjectHandle(o, o.CacheFs)
var offset, limit int64 = 0, -1
for _, option := range options {
@@ -237,7 +243,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
return err
}
// pause background uploads if active
if o.CacheFs.tempWritePath != "" {
if o.CacheFs.opt.TempWritePath != "" {
o.CacheFs.backgroundRunner.pause()
defer o.CacheFs.backgroundRunner.play()
// don't allow started uploads
@@ -256,6 +262,8 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
// deleting cached chunks and info to be replaced with new ones
_ = o.CacheFs.cache.RemoveObject(o.abs())
// advertise to ChangeNotify if wrapped doesn't do that
o.CacheFs.notifyChangeUpstreamIfNeeded(o.Remote(), fs.EntryObject)
o.CacheModTime = src.ModTime().UnixNano()
o.CacheSize = src.Size()
@@ -272,7 +280,7 @@ func (o *Object) Remove() error {
return err
}
// pause background uploads if active
if o.CacheFs.tempWritePath != "" {
if o.CacheFs.opt.TempWritePath != "" {
o.CacheFs.backgroundRunner.pause()
defer o.CacheFs.backgroundRunner.play()
// don't allow started uploads
@@ -351,6 +359,13 @@ func (o *Object) tempFileStartedUpload() bool {
return started
}
// UnWrap returns the Object that this Object is wrapping or
// nil if it isn't wrapping anything
func (o *Object) UnWrap() fs.Object {
return o.Object
}
var (
_ fs.Object = (*Object)(nil)
_ fs.Object = (*Object)(nil)
_ fs.ObjectUnWrapper = (*Object)(nil)
)

56
backend/cache/plex.go vendored
View File

@@ -3,21 +3,19 @@
package cache
import (
"bytes"
"crypto/tls"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"strings"
"sync"
"time"
"sync"
"bytes"
"io/ioutil"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config"
"github.com/patrickmn/go-cache"
cache "github.com/patrickmn/go-cache"
"golang.org/x/net/websocket"
)
@@ -55,15 +53,17 @@ type plexConnector struct {
username string
password string
token string
insecure bool
f *Fs
mu sync.Mutex
running bool
runningMu sync.Mutex
stateCache *cache.Cache
saveToken func(string)
}
// newPlexConnector connects to a Plex server and generates a token
func newPlexConnector(f *Fs, plexURL, username, password string) (*plexConnector, error) {
func newPlexConnector(f *Fs, plexURL, username, password string, insecure bool, saveToken func(string)) (*plexConnector, error) {
u, err := url.ParseRequestURI(strings.TrimRight(plexURL, "/"))
if err != nil {
return nil, err
@@ -75,14 +75,16 @@ func newPlexConnector(f *Fs, plexURL, username, password string) (*plexConnector
username: username,
password: password,
token: "",
insecure: insecure,
stateCache: cache.New(time.Hour, time.Minute),
saveToken: saveToken,
}
return pc, nil
}
// newPlexConnector connects to a Plex server and generates a token
func newPlexConnectorWithToken(f *Fs, plexURL, token string) (*plexConnector, error) {
func newPlexConnectorWithToken(f *Fs, plexURL, token string, insecure bool) (*plexConnector, error) {
u, err := url.ParseRequestURI(strings.TrimRight(plexURL, "/"))
if err != nil {
return nil, err
@@ -92,6 +94,7 @@ func newPlexConnectorWithToken(f *Fs, plexURL, token string) (*plexConnector, er
f: f,
url: u,
token: token,
insecure: insecure,
stateCache: cache.New(time.Hour, time.Minute),
}
pc.listenWebsocket()
@@ -106,11 +109,26 @@ func (p *plexConnector) closeWebsocket() {
p.running = false
}
func (p *plexConnector) websocketDial() (*websocket.Conn, error) {
u := strings.TrimRight(strings.Replace(strings.Replace(
p.url.String(), "http://", "ws://", 1), "https://", "wss://", 1), "/")
url := fmt.Sprintf(defPlexNotificationURL, u, p.token)
config, err := websocket.NewConfig(url, "http://localhost")
if err != nil {
return nil, err
}
if p.insecure {
config.TlsConfig = &tls.Config{InsecureSkipVerify: true}
}
return websocket.DialConfig(config)
}
func (p *plexConnector) listenWebsocket() {
u := strings.Replace(p.url.String(), "http://", "ws://", 1)
u = strings.Replace(u, "https://", "ws://", 1)
conn, err := websocket.Dial(fmt.Sprintf(defPlexNotificationURL, strings.TrimRight(u, "/"), p.token),
"", "http://localhost")
p.runningMu.Lock()
defer p.runningMu.Unlock()
conn, err := p.websocketDial()
if err != nil {
fs.Errorf("plex", "%v", err)
return
@@ -127,8 +145,8 @@ func (p *plexConnector) listenWebsocket() {
err := websocket.JSON.Receive(conn, notif)
if err != nil {
fs.Debugf("plex", "%v", err)
time.Sleep(time.Second)
continue
p.closeWebsocket()
break
}
// we're only interested in play events
if notif.Container.Type == "playing" {
@@ -206,8 +224,9 @@ func (p *plexConnector) authenticate() error {
}
p.token = token
if p.token != "" {
config.FileSet(p.f.Name(), "plex_token", p.token)
config.SaveConfig()
if p.saveToken != nil {
p.saveToken(p.token)
}
fs.Infof(p.f.Name(), "Connected to Plex server: %v", p.url.String())
}
p.listenWebsocket()
@@ -229,6 +248,9 @@ func (p *plexConnector) isConfigured() bool {
func (p *plexConnector) isPlaying(co *Object) bool {
var err error
if !p.isConnected() {
p.listenWebsocket()
}
remote := co.Remote()
if cr, yes := p.f.isWrappedByCrypt(); yes {

View File

@@ -8,7 +8,7 @@ import (
"time"
"github.com/ncw/rclone/fs"
"github.com/patrickmn/go-cache"
cache "github.com/patrickmn/go-cache"
"github.com/pkg/errors"
)

View File

@@ -3,20 +3,17 @@
package cache
import (
"time"
"bytes"
"encoding/binary"
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path"
"strconv"
"strings"
"sync"
"io/ioutil"
"fmt"
"time"
bolt "github.com/coreos/bbolt"
"github.com/ncw/rclone/fs"
@@ -34,7 +31,8 @@ const (
// Features flags for this storage type
type Features struct {
PurgeDb bool // purge the db before starting
PurgeDb bool // purge the db before starting
DbWaitTime time.Duration // time to wait for DB to be available
}
var boltMap = make(map[string]*Persistent)
@@ -122,7 +120,7 @@ func (b *Persistent) connect() error {
if err != nil {
return errors.Wrapf(err, "failed to create a data directory %q", b.dataPath)
}
b.db, err = bolt.Open(b.dbPath, 0644, &bolt.Options{Timeout: *cacheDbWaitTime})
b.db, err = bolt.Open(b.dbPath, 0644, &bolt.Options{Timeout: b.features.DbWaitTime})
if err != nil {
return errors.Wrapf(err, "failed to open a cache connection to %q", b.dbPath)
}
@@ -192,19 +190,46 @@ func (b *Persistent) GetDir(remote string) (*Directory, error) {
// AddDir will update a CachedDirectory metadata and all its entries
func (b *Persistent) AddDir(cachedDir *Directory) error {
return b.AddBatchDir([]*Directory{cachedDir})
}
// AddBatchDir will update a list of CachedDirectory metadata and all their entries
func (b *Persistent) AddBatchDir(cachedDirs []*Directory) error {
if len(cachedDirs) == 0 {
return nil
}
return b.db.Update(func(tx *bolt.Tx) error {
bucket := b.getBucket(cachedDir.abs(), true, tx)
var bucket *bolt.Bucket
if cachedDirs[0].Dir == "" {
bucket = tx.Bucket([]byte(RootBucket))
} else {
bucket = b.getBucket(cachedDirs[0].Dir, true, tx)
}
if bucket == nil {
return errors.Errorf("couldn't open bucket (%v)", cachedDir)
return errors.Errorf("couldn't open bucket (%v)", cachedDirs[0].Dir)
}
encoded, err := json.Marshal(cachedDir)
if err != nil {
return errors.Errorf("couldn't marshal object (%v): %v", cachedDir, err)
}
err = bucket.Put([]byte("."), encoded)
if err != nil {
return err
for _, cachedDir := range cachedDirs {
var b *bolt.Bucket
var err error
if cachedDir.Name == "" {
b = bucket
} else {
b, err = bucket.CreateBucketIfNotExists([]byte(cachedDir.Name))
}
if err != nil {
return err
}
encoded, err := json.Marshal(cachedDir)
if err != nil {
return errors.Errorf("couldn't marshal object (%v): %v", cachedDir, err)
}
err = b.Put([]byte("."), encoded)
if err != nil {
return err
}
}
return nil
})
@@ -315,7 +340,7 @@ func (b *Persistent) RemoveDir(fp string) error {
// ExpireDir will flush a CachedDirectory and all its objects from the objects
// chunks will remain as they are
func (b *Persistent) ExpireDir(cd *Directory) error {
t := time.Now().Add(cd.CacheFs.fileAge * -1)
t := time.Now().Add(time.Duration(-cd.CacheFs.opt.InfoAge))
cd.CacheTs = &t
// expire all parents
@@ -402,7 +427,7 @@ func (b *Persistent) RemoveObject(fp string) error {
// ExpireObject will flush an Object and all its data if desired
func (b *Persistent) ExpireObject(co *Object, withData bool) error {
co.CacheTs = time.Now().Add(co.CacheFs.fileAge * -1)
co.CacheTs = time.Now().Add(time.Duration(-co.CacheFs.opt.InfoAge))
err := b.AddObject(co)
if withData {
_ = os.RemoveAll(path.Join(b.dataPath, co.abs()))
@@ -1070,10 +1095,3 @@ func itob(v int64) []byte {
func btoi(d []byte) int64 {
return int64(binary.BigEndian.Uint64(d))
}
// cloneBytes returns a copy of a given slice.
func cloneBytes(v []byte) []byte {
var clone = make([]byte, len(v))
copy(clone, v)
return clone
}

View File

@@ -17,11 +17,9 @@ import (
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/accounting"
"github.com/pkg/errors"
"github.com/rfjakob/eme"
"golang.org/x/crypto/nacl/secretbox"
"golang.org/x/crypto/scrypt"
"github.com/rfjakob/eme"
)
// Constants
@@ -43,6 +41,7 @@ var (
ErrorBadDecryptControlChar = errors.New("bad decryption - contains control chars")
ErrorNotAMultipleOfBlocksize = errors.New("not a multiple of blocksize")
ErrorTooShortAfterDecode = errors.New("too short after base32 decode")
ErrorTooLongAfterDecode = errors.New("too long after base32 decode")
ErrorEncryptedFileTooShort = errors.New("file is too short to be encrypted")
ErrorEncryptedFileBadHeader = errors.New("file has truncated block header")
ErrorEncryptedBadMagic = errors.New("not an encrypted file - bad magic string")
@@ -145,6 +144,7 @@ type cipher struct {
buffers sync.Pool // encrypt/decrypt buffers
cryptoRand io.Reader // read crypto random numbers from here
dirNameEncrypt bool
passCorrupted bool
}
// newCipher initialises the cipher. If salt is "" then it uses a built in salt val
@@ -164,6 +164,11 @@ func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bo
return c, nil
}
// Set to pass corrupted blocks
func (c *cipher) setPassCorrupted(passCorrupted bool) {
c.passCorrupted = passCorrupted
}
// Key creates all the internal keys from the password passed in using
// scrypt.
//
@@ -286,6 +291,9 @@ func (c *cipher) decryptSegment(ciphertext string) (string, error) {
// not possible if decodeFilename() working correctly
return "", ErrorTooShortAfterDecode
}
if len(rawCiphertext) > 2048 {
return "", ErrorTooLongAfterDecode
}
paddedPlaintext := eme.Transform(c.block, c.nameTweak[:], rawCiphertext, eme.DirectionDecrypt)
plaintext, err := pkcs7.Unpad(nameCipherBlockSize, paddedPlaintext)
if err != nil {
@@ -820,7 +828,10 @@ func (fh *decrypter) fillBuffer() (err error) {
if err != nil {
return err // return pending error as it is likely more accurate
}
return ErrorEncryptedBadBlock
if !fh.c.passCorrupted {
return ErrorEncryptedBadBlock
}
fs.Errorf(nil, "passing corrupted block")
}
fh.bufIndex = 0
fh.bufSize = n - blockHeaderSize

View File

@@ -24,7 +24,7 @@ func TestNewNameEncryptionMode(t *testing.T) {
{"off", NameEncryptionOff, ""},
{"standard", NameEncryptionStandard, ""},
{"obfuscate", NameEncryptionObfuscated, ""},
{"potato", NameEncryptionMode(0), "Unknown file name encryption mode \"potato\""},
{"potato", NameEncryptionOff, "Unknown file name encryption mode \"potato\""},
} {
actual, actualErr := NewNameEncryptionMode(test.in)
assert.Equal(t, actual, test.expected)
@@ -194,6 +194,10 @@ func TestEncryptSegment(t *testing.T) {
func TestDecryptSegment(t *testing.T) {
// We've tested the forwards above, now concentrate on the errors
longName := make([]byte, 3328)
for i := range longName {
longName[i] = 'a'
}
c, _ := newCipher(NameEncryptionStandard, "", "", true)
for _, test := range []struct {
in string
@@ -201,6 +205,7 @@ func TestDecryptSegment(t *testing.T) {
}{
{"64=", ErrorBadBase32Encoding},
{"!", base32.CorruptInputError(0)},
{string(longName), ErrorTooLongAfterDecode},
{encodeFileName([]byte("a")), ErrorNotAMultipleOfBlocksize},
{encodeFileName([]byte("123456789abcdef")), ErrorNotAMultipleOfBlocksize},
{encodeFileName([]byte("123456789abcdef0")), pkcs7.ErrorPaddingTooLong},

View File

@@ -4,25 +4,19 @@ package crypt
import (
"fmt"
"io"
"path"
"strconv"
"strings"
"time"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/config/flags"
"github.com/ncw/rclone/fs/accounting"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/config/obscure"
"github.com/ncw/rclone/fs/fspath"
"github.com/ncw/rclone/fs/hash"
"github.com/pkg/errors"
)
// Globals
var (
// Flags
cryptShowMapping = flags.BoolP("crypt-show-mapping", "", false, "For all files listed show how the names encrypt.")
)
// Register with Fs
func init() {
fs.Register(&fs.RegInfo{
@@ -30,11 +24,13 @@ func init() {
Description: "Encrypt/Decrypt a remote",
NewFs: NewFs,
Options: []fs.Option{{
Name: "remote",
Help: "Remote to encrypt/decrypt.\nNormally should contain a ':' and a path, eg \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
Name: "remote",
Help: "Remote to encrypt/decrypt.\nNormally should contain a ':' and a path, eg \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
Required: true,
}, {
Name: "filename_encryption",
Help: "How to encrypt the filenames.",
Name: "filename_encryption",
Help: "How to encrypt the filenames.",
Default: "standard",
Examples: []fs.OptionExample{
{
Value: "off",
@@ -48,8 +44,9 @@ func init() {
},
},
}, {
Name: "directory_name_encryption",
Help: "Option to either encrypt directory names or leave them intact.",
Name: "directory_name_encryption",
Help: "Option to either encrypt directory names or leave them intact.",
Default: true,
Examples: []fs.OptionExample{
{
Value: "true",
@@ -68,68 +65,108 @@ func init() {
Name: "password2",
Help: "Password or pass phrase for salt. Optional but recommended.\nShould be different to the previous password.",
IsPassword: true,
Optional: true,
}, {
Name: "show_mapping",
Help: `For all files listed show how the names encrypt.
If this flag is set then for each file that the remote is asked to
list, it will log (at level INFO) a line stating the decrypted file
name and the encrypted file name.
This is so you can work out which encrypted names are which decrypted
names just in case you need to do something with the encrypted file
names, or for debugging purposes.`,
Default: false,
Hide: fs.OptionHideConfigurator,
Advanced: true,
}, {
Name: "pass_corrupted_blocks",
Help: `Pass through corrupted blocks to the output.
This is for debugging corruption problems in crypt - it shouldn't be needed normally.
`,
Default: false,
Hide: fs.OptionHideConfigurator,
Advanced: true,
}},
})
}
// NewCipher constructs a Cipher for the given config name
func NewCipher(name string) (Cipher, error) {
mode, err := NewNameEncryptionMode(config.FileGet(name, "filename_encryption", "standard"))
// newCipherForConfig constructs a Cipher for the given config name
func newCipherForConfig(opt *Options) (Cipher, error) {
mode, err := NewNameEncryptionMode(opt.FilenameEncryption)
if err != nil {
return nil, err
}
dirNameEncrypt, err := strconv.ParseBool(config.FileGet(name, "directory_name_encryption", "true"))
if err != nil {
return nil, err
}
password := config.FileGet(name, "password", "")
if password == "" {
if opt.Password == "" {
return nil, errors.New("password not set in config file")
}
password, err = obscure.Reveal(password)
password, err := obscure.Reveal(opt.Password)
if err != nil {
return nil, errors.Wrap(err, "failed to decrypt password")
}
salt := config.FileGet(name, "password2", "")
if salt != "" {
salt, err = obscure.Reveal(salt)
var salt string
if opt.Password2 != "" {
salt, err = obscure.Reveal(opt.Password2)
if err != nil {
return nil, errors.Wrap(err, "failed to decrypt password2")
}
}
cipher, err := newCipher(mode, password, salt, dirNameEncrypt)
cipher, err := newCipher(mode, password, salt, opt.DirectoryNameEncryption)
if err != nil {
return nil, errors.Wrap(err, "failed to make cipher")
}
cipher.setPassCorrupted(opt.PassCorruptedBlocks)
return cipher, nil
}
// NewFs contstructs an Fs from the path, container:path
func NewFs(name, rpath string) (fs.Fs, error) {
cipher, err := NewCipher(name)
// NewCipher constructs a Cipher for the given config
func NewCipher(m configmap.Mapper) (Cipher, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
remote := config.FileGet(name, "remote")
return newCipherForConfig(opt)
}
// NewFs contstructs an Fs from the path, container:path
func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
cipher, err := newCipherForConfig(opt)
if err != nil {
return nil, err
}
remote := opt.Remote
if strings.HasPrefix(remote, name+":") {
return nil, errors.New("can't point crypt remote at itself - check the value of the remote setting")
}
wInfo, wName, wPath, wConfig, err := fs.ConfigFs(remote)
if err != nil {
return nil, errors.Wrapf(err, "failed to parse remote %q to wrap", remote)
}
// Look for a file first
remotePath := path.Join(remote, cipher.EncryptFileName(rpath))
wrappedFs, err := fs.NewFs(remotePath)
remotePath := fspath.JoinRootPath(wPath, cipher.EncryptFileName(rpath))
wrappedFs, err := wInfo.NewFs(wName, remotePath, wConfig)
// if that didn't produce a file, look for a directory
if err != fs.ErrorIsFile {
remotePath = path.Join(remote, cipher.EncryptDirName(rpath))
wrappedFs, err = fs.NewFs(remotePath)
remotePath = fspath.JoinRootPath(wPath, cipher.EncryptDirName(rpath))
wrappedFs, err = wInfo.NewFs(wName, remotePath, wConfig)
}
if err != fs.ErrorIsFile && err != nil {
return nil, errors.Wrapf(err, "failed to make remote %q to wrap", remotePath)
return nil, errors.Wrapf(err, "failed to make remote %s:%q to wrap", wName, remotePath)
}
f := &Fs{
Fs: wrappedFs,
name: name,
root: rpath,
opt: *opt,
cipher: cipher,
}
// the features here are ones we could support, and they are
@@ -145,7 +182,7 @@ func NewFs(name, rpath string) (fs.Fs, error) {
doChangeNotify := wrappedFs.Features().ChangeNotify
if doChangeNotify != nil {
f.features.ChangeNotify = func(notifyFunc func(string, fs.EntryType), pollInterval time.Duration) chan bool {
f.features.ChangeNotify = func(notifyFunc func(string, fs.EntryType), pollInterval <-chan time.Duration) {
wrappedNotifyFunc := func(path string, entryType fs.EntryType) {
decrypted, err := f.DecryptFileName(path)
if err != nil {
@@ -154,21 +191,32 @@ func NewFs(name, rpath string) (fs.Fs, error) {
}
notifyFunc(decrypted, entryType)
}
return doChangeNotify(wrappedNotifyFunc, pollInterval)
doChangeNotify(wrappedNotifyFunc, pollInterval)
}
}
return f, err
}
// Options defines the configuration for this backend
type Options struct {
Remote string `config:"remote"`
FilenameEncryption string `config:"filename_encryption"`
DirectoryNameEncryption bool `config:"directory_name_encryption"`
Password string `config:"password"`
Password2 string `config:"password2"`
ShowMapping bool `config:"show_mapping"`
PassCorruptedBlocks bool `config:"pass_corrupted_blocks"`
}
// Fs represents a wrapped fs.Fs
type Fs struct {
fs.Fs
name string
root string
opt Options
features *fs.Features // optional features
cipher Cipher
mode NameEncryptionMode
}
// Name of the remote (as passed into NewFs)
@@ -199,7 +247,7 @@ func (f *Fs) add(entries *fs.DirEntries, obj fs.Object) {
fs.Debugf(remote, "Skipping undecryptable file name: %v", err)
return
}
if *cryptShowMapping {
if f.opt.ShowMapping {
fs.Logf(decryptedRemote, "Encrypts to %q", remote)
}
*entries = append(*entries, f.newObject(obj))
@@ -213,7 +261,7 @@ func (f *Fs) addDir(entries *fs.DirEntries, dir fs.Directory) {
fs.Debugf(remote, "Skipping undecryptable dir name: %v", err)
return
}
if *cryptShowMapping {
if f.opt.ShowMapping {
fs.Logf(decryptedRemote, "Encrypts to %q", remote)
}
*entries = append(*entries, f.newDir(dir))
@@ -291,14 +339,54 @@ type putFn func(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.O
// put implements Put or PutStream
func (f *Fs) put(in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn) (fs.Object, error) {
// Encrypt the data into wrappedIn
wrappedIn, err := f.cipher.EncryptData(in)
if err != nil {
return nil, err
}
// Find a hash the destination supports to compute a hash of
// the encrypted data
ht := f.Fs.Hashes().GetOne()
var hasher *hash.MultiHasher
if ht != hash.None {
hasher, err = hash.NewMultiHasherTypes(hash.NewHashSet(ht))
if err != nil {
return nil, err
}
// unwrap the accounting
var wrap accounting.WrapFn
wrappedIn, wrap = accounting.UnWrap(wrappedIn)
// add the hasher
wrappedIn = io.TeeReader(wrappedIn, hasher)
// wrap the accounting back on
wrappedIn = wrap(wrappedIn)
}
// Transfer the data
o, err := put(wrappedIn, f.newObjectInfo(src), options...)
if err != nil {
return nil, err
}
// Check the hashes of the encrypted data if we were comparing them
if ht != hash.None && hasher != nil {
srcHash := hasher.Sums()[ht]
var dstHash string
dstHash, err = o.Hash(ht)
if err != nil {
return nil, errors.Wrap(err, "failed to read destination hash")
}
if srcHash != "" && dstHash != "" && srcHash != dstHash {
// remove object
err = o.Remove()
if err != nil {
fs.Errorf(o, "Failed to remove corrupted object: %v", err)
}
return nil, errors.Errorf("corrupted on transfer: %v crypted hash differ %q vs %q", ht, srcHash, dstHash)
}
}
return f.newObject(o), nil
}
@@ -636,24 +724,24 @@ func (o *Object) Open(options ...fs.OpenOption) (rc io.ReadCloser, err error) {
// Update in to the object with the modTime given of the given size
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
wrappedIn, err := o.f.cipher.EncryptData(in)
if err != nil {
return err
update := func(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return o.Object, o.Object.Update(in, src, options...)
}
return o.Object.Update(wrappedIn, o.f.newObjectInfo(src))
_, err := o.f.put(in, src, options, update)
return err
}
// newDir returns a dir with the Name decrypted
func (f *Fs) newDir(dir fs.Directory) fs.Directory {
new := fs.NewDirCopy(dir)
newDir := fs.NewDirCopy(dir)
remote := dir.Remote()
decryptedRemote, err := f.cipher.DecryptDirName(remote)
if err != nil {
fs.Debugf(remote, "Undecryptable dir name: %v", err)
} else {
new.SetRemote(decryptedRemote)
newDir.SetRemote(decryptedRemote)
}
return new
return newDir
}
// ObjectInfo describes a wrapped fs.ObjectInfo for being the source

View File

@@ -7,13 +7,30 @@ import (
"testing"
"github.com/ncw/rclone/backend/crypt"
_ "github.com/ncw/rclone/backend/drive" // for integration tests
_ "github.com/ncw/rclone/backend/local"
_ "github.com/ncw/rclone/backend/swift" // for integration tests
"github.com/ncw/rclone/fs/config/obscure"
"github.com/ncw/rclone/fstest"
"github.com/ncw/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
if *fstest.RemoteName == "" {
t.Skip("Skipping as -remote not set")
}
fstests.Run(t, &fstests.Opt{
RemoteName: *fstest.RemoteName,
NilObject: (*crypt.Object)(nil),
})
}
// TestStandard runs integration tests against the remote
func TestStandard(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-standard")
name := "TestCrypt"
fstests.Run(t, &fstests.Opt{
@@ -30,6 +47,9 @@ func TestStandard(t *testing.T) {
// TestOff runs integration tests against the remote
func TestOff(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-off")
name := "TestCrypt2"
fstests.Run(t, &fstests.Opt{
@@ -46,6 +66,9 @@ func TestOff(t *testing.T) {
// TestObfuscate runs integration tests against the remote
func TestObfuscate(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-obfuscate")
name := "TestCrypt3"
fstests.Run(t, &fstests.Opt{

File diff suppressed because it is too large Load Diff

View File

@@ -1,63 +1,82 @@
// +build go1.9
package drive
import (
"bytes"
"encoding/json"
"io"
"io/ioutil"
"mime"
"path/filepath"
"strings"
"testing"
"google.golang.org/api/drive/v3"
_ "github.com/ncw/rclone/backend/local"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/operations"
"github.com/ncw/rclone/fstest/fstests"
"github.com/pkg/errors"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"google.golang.org/api/drive/v3"
)
const exampleExportFormats = `{
"application/vnd.google-apps.document": [
"application/rtf",
"application/vnd.oasis.opendocument.text",
"text/html",
"application/pdf",
"application/epub+zip",
"application/zip",
"application/vnd.openxmlformats-officedocument.wordprocessingml.document",
"text/plain"
],
"application/vnd.google-apps.spreadsheet": [
"application/x-vnd.oasis.opendocument.spreadsheet",
"text/tab-separated-values",
"application/pdf",
"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
"text/csv",
"application/zip",
"application/vnd.oasis.opendocument.spreadsheet"
],
"application/vnd.google-apps.jam": [
"application/pdf"
],
"application/vnd.google-apps.script": [
"application/vnd.google-apps.script+json"
],
"application/vnd.google-apps.presentation": [
"application/vnd.oasis.opendocument.presentation",
"application/pdf",
"application/vnd.openxmlformats-officedocument.presentationml.presentation",
"text/plain"
],
"application/vnd.google-apps.form": [
"application/zip"
],
"application/vnd.google-apps.drawing": [
"image/svg+xml",
"image/png",
"application/pdf",
"image/jpeg"
]
}`
func TestDriveScopes(t *testing.T) {
for _, test := range []struct {
in string
want []string
wantFlag bool
}{
{"", []string{
"https://www.googleapis.com/auth/drive",
}, false},
{" drive.file , drive.readonly", []string{
"https://www.googleapis.com/auth/drive.file",
"https://www.googleapis.com/auth/drive.readonly",
}, false},
{" drive.file , drive.appfolder", []string{
"https://www.googleapis.com/auth/drive.file",
"https://www.googleapis.com/auth/drive.appfolder",
}, true},
} {
got := driveScopes(test.in)
assert.Equal(t, test.want, got, test.in)
gotFlag := driveScopesContainsAppFolder(got)
assert.Equal(t, test.wantFlag, gotFlag, test.in)
}
}
var exportFormats map[string][]string
/*
var additionalMimeTypes = map[string]string{
"application/vnd.ms-excel.sheet.macroenabled.12": ".xlsm",
"application/vnd.ms-excel.template.macroenabled.12": ".xltm",
"application/vnd.ms-powerpoint.presentation.macroenabled.12": ".pptm",
"application/vnd.ms-powerpoint.slideshow.macroenabled.12": ".ppsm",
"application/vnd.ms-powerpoint.template.macroenabled.12": ".potm",
"application/vnd.ms-powerpoint": ".ppt",
"application/vnd.ms-word.document.macroenabled.12": ".docm",
"application/vnd.ms-word.template.macroenabled.12": ".dotm",
"application/vnd.openxmlformats-officedocument.presentationml.template": ".potx",
"application/vnd.openxmlformats-officedocument.spreadsheetml.template": ".xltx",
"application/vnd.openxmlformats-officedocument.wordprocessingml.template": ".dotx",
"application/vnd.sun.xml.writer": ".sxw",
"text/richtext": ".rtf",
}
*/
// Load the example export formats into exportFormats for testing
func TestInternalLoadExampleExportFormats(t *testing.T) {
assert.NoError(t, json.Unmarshal([]byte(exampleExportFormats), &exportFormats))
func TestInternalLoadExampleFormats(t *testing.T) {
fetchFormatsOnce.Do(func() {})
buf, err := ioutil.ReadFile(filepath.FromSlash("test/about.json"))
var about struct {
ExportFormats map[string][]string `json:"exportFormats,omitempty"`
ImportFormats map[string][]string `json:"importFormats,omitempty"`
}
require.NoError(t, err)
require.NoError(t, json.Unmarshal(buf, &about))
_exportFormats = fixMimeTypeMap(about.ExportFormats)
_importFormats = fixMimeTypeMap(about.ImportFormats)
}
func TestInternalParseExtensions(t *testing.T) {
@@ -66,47 +85,204 @@ func TestInternalParseExtensions(t *testing.T) {
want []string
wantErr error
}{
{"doc", []string{"doc"}, nil},
{" docx ,XLSX, pptx,svg", []string{"docx", "xlsx", "pptx", "svg"}, nil},
{"docx,svg,Docx", []string{"docx", "svg"}, nil},
{"docx,potato,docx", []string{"docx"}, errors.New(`couldn't find mime type for extension "potato"`)},
{"doc", []string{".doc"}, nil},
{" docx ,XLSX, pptx,svg", []string{".docx", ".xlsx", ".pptx", ".svg"}, nil},
{"docx,svg,Docx", []string{".docx", ".svg"}, nil},
{"docx,potato,docx", []string{".docx"}, errors.New(`couldn't find MIME type for extension ".potato"`)},
} {
f := new(Fs)
gotErr := f.parseExtensions(test.in)
extensions, _, gotErr := parseExtensions(test.in)
if test.wantErr == nil {
assert.NoError(t, gotErr)
} else {
assert.EqualError(t, gotErr, test.wantErr.Error())
}
assert.Equal(t, test.want, f.extensions)
assert.Equal(t, test.want, extensions)
}
// Test it is appending
f := new(Fs)
assert.Nil(t, f.parseExtensions("docx,svg"))
assert.Nil(t, f.parseExtensions("docx,svg,xlsx"))
assert.Equal(t, []string{"docx", "svg", "xlsx"}, f.extensions)
extensions, _, gotErr := parseExtensions("docx,svg", "docx,svg,xlsx")
assert.NoError(t, gotErr)
assert.Equal(t, []string{".docx", ".svg", ".xlsx"}, extensions)
}
func TestInternalFindExportFormat(t *testing.T) {
item := new(drive.File)
item.MimeType = "application/vnd.google-apps.document"
item := &drive.File{
Name: "file",
MimeType: "application/vnd.google-apps.document",
}
for _, test := range []struct {
extensions []string
wantExtension string
wantMimeType string
}{
{[]string{}, "", ""},
{[]string{"pdf"}, "pdf", "application/pdf"},
{[]string{"pdf", "rtf", "xls"}, "pdf", "application/pdf"},
{[]string{"xls", "rtf", "pdf"}, "rtf", "application/rtf"},
{[]string{"xls", "csv", "svg"}, "", ""},
{[]string{".pdf"}, ".pdf", "application/pdf"},
{[]string{".pdf", ".rtf", ".xls"}, ".pdf", "application/pdf"},
{[]string{".xls", ".rtf", ".pdf"}, ".rtf", "application/rtf"},
{[]string{".xls", ".csv", ".svg"}, "", ""},
} {
f := new(Fs)
f.extensions = test.extensions
gotExtension, gotMimeType := f.findExportFormat("file", exportFormats[item.MimeType])
f.exportExtensions = test.extensions
gotExtension, gotFilename, gotMimeType, gotIsDocument := f.findExportFormat(item)
assert.Equal(t, test.wantExtension, gotExtension)
if test.wantExtension != "" {
assert.Equal(t, item.Name+gotExtension, gotFilename)
} else {
assert.Equal(t, "", gotFilename)
}
assert.Equal(t, test.wantMimeType, gotMimeType)
assert.Equal(t, true, gotIsDocument)
}
}
func TestMimeTypesToExtension(t *testing.T) {
for mimeType, extension := range _mimeTypeToExtension {
extensions, err := mime.ExtensionsByType(mimeType)
assert.NoError(t, err)
assert.Contains(t, extensions, extension)
}
}
func TestExtensionToMimeType(t *testing.T) {
for mimeType, extension := range _mimeTypeToExtension {
gotMimeType := mime.TypeByExtension(extension)
mediatype, _, err := mime.ParseMediaType(gotMimeType)
assert.NoError(t, err)
assert.Equal(t, mimeType, mediatype)
}
}
func TestExtensionsForExportFormats(t *testing.T) {
if _exportFormats == nil {
t.Error("exportFormats == nil")
}
for fromMT, toMTs := range _exportFormats {
for _, toMT := range toMTs {
if !isInternalMimeType(toMT) {
extensions, err := mime.ExtensionsByType(toMT)
assert.NoError(t, err, "invalid MIME type %q", toMT)
assert.NotEmpty(t, extensions, "No extension found for %q (from: %q)", fromMT, toMT)
}
}
}
}
func TestExtensionsForImportFormats(t *testing.T) {
t.Skip()
if _importFormats == nil {
t.Error("_importFormats == nil")
}
for fromMT := range _importFormats {
if !isInternalMimeType(fromMT) {
extensions, err := mime.ExtensionsByType(fromMT)
assert.NoError(t, err, "invalid MIME type %q", fromMT)
assert.NotEmpty(t, extensions, "No extension found for %q", fromMT)
}
}
}
func (f *Fs) InternalTestDocumentImport(t *testing.T) {
oldAllow := f.opt.AllowImportNameChange
f.opt.AllowImportNameChange = true
defer func() {
f.opt.AllowImportNameChange = oldAllow
}()
testFilesPath, err := filepath.Abs(filepath.FromSlash("test/files"))
require.NoError(t, err)
testFilesFs, err := fs.NewFs(testFilesPath)
require.NoError(t, err)
_, f.importMimeTypes, err = parseExtensions("odt,ods,doc")
require.NoError(t, err)
err = operations.CopyFile(f, testFilesFs, "example2.doc", "example2.doc")
require.NoError(t, err)
}
func (f *Fs) InternalTestDocumentUpdate(t *testing.T) {
testFilesPath, err := filepath.Abs(filepath.FromSlash("test/files"))
require.NoError(t, err)
testFilesFs, err := fs.NewFs(testFilesPath)
require.NoError(t, err)
_, f.importMimeTypes, err = parseExtensions("odt,ods,doc")
require.NoError(t, err)
err = operations.CopyFile(f, testFilesFs, "example2.xlsx", "example1.ods")
require.NoError(t, err)
}
func (f *Fs) InternalTestDocumentExport(t *testing.T) {
var buf bytes.Buffer
var err error
f.exportExtensions, _, err = parseExtensions("txt")
require.NoError(t, err)
obj, err := f.NewObject("example2.txt")
require.NoError(t, err)
rc, err := obj.Open()
require.NoError(t, err)
defer func() { require.NoError(t, rc.Close()) }()
_, err = io.Copy(&buf, rc)
require.NoError(t, err)
text := buf.String()
for _, excerpt := range []string{
"Lorem ipsum dolor sit amet, consectetur",
"porta at ultrices in, consectetur at augue.",
} {
require.Contains(t, text, excerpt)
}
}
func (f *Fs) InternalTestDocumentLink(t *testing.T) {
var buf bytes.Buffer
var err error
f.exportExtensions, _, err = parseExtensions("link.html")
require.NoError(t, err)
obj, err := f.NewObject("example2.link.html")
require.NoError(t, err)
rc, err := obj.Open()
require.NoError(t, err)
defer func() { require.NoError(t, rc.Close()) }()
_, err = io.Copy(&buf, rc)
require.NoError(t, err)
text := buf.String()
require.True(t, strings.HasPrefix(text, "<html>"))
require.True(t, strings.HasSuffix(text, "</html>\n"))
for _, excerpt := range []string{
`<meta http-equiv="refresh"`,
`Loading <a href="`,
} {
require.Contains(t, text, excerpt)
}
}
func (f *Fs) InternalTest(t *testing.T) {
// These tests all depend on each other so run them as nested tests
t.Run("DocumentImport", func(t *testing.T) {
f.InternalTestDocumentImport(t)
t.Run("DocumentUpdate", func(t *testing.T) {
f.InternalTestDocumentUpdate(t)
t.Run("DocumentExport", func(t *testing.T) {
f.InternalTestDocumentExport(t)
t.Run("DocumentLink", func(t *testing.T) {
f.InternalTestDocumentLink(t)
})
})
})
})
}
var _ fstests.InternalTester = (*Fs)(nil)

View File

@@ -1,10 +1,13 @@
// Test Drive filesystem interface
package drive_test
// +build go1.9
package drive
import (
"testing"
"github.com/ncw/rclone/backend/drive"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fstest/fstests"
)
@@ -12,6 +15,23 @@ import (
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestDrive:",
NilObject: (*drive.Object)(nil),
NilObject: (*Object)(nil),
ChunkedUpload: fstests.ChunkedUploadConfig{
MinChunkSize: minChunkSize,
CeilChunkSize: fstests.NextPowerOfTwo,
},
})
}
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setUploadChunkSize(cs)
}
func (f *Fs) SetUploadCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setUploadCutoff(cs)
}
var (
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
_ fstests.SetUploadCutoffer = (*Fs)(nil)
)

View File

@@ -0,0 +1,6 @@
// Build for unsupported platforms to stop go complaining
// about "no buildable Go source files "
// +build !go1.9
package drive

View File

@@ -0,0 +1,178 @@
{
"importFormats": {
"text/tab-separated-values": [
"application/vnd.google-apps.spreadsheet"
],
"application/x-vnd.oasis.opendocument.presentation": [
"application/vnd.google-apps.presentation"
],
"image/jpeg": [
"application/vnd.google-apps.document"
],
"image/bmp": [
"application/vnd.google-apps.document"
],
"image/gif": [
"application/vnd.google-apps.document"
],
"application/vnd.ms-excel.sheet.macroenabled.12": [
"application/vnd.google-apps.spreadsheet"
],
"application/vnd.openxmlformats-officedocument.wordprocessingml.template": [
"application/vnd.google-apps.document"
],
"application/vnd.ms-powerpoint.presentation.macroenabled.12": [
"application/vnd.google-apps.presentation"
],
"application/vnd.ms-word.template.macroenabled.12": [
"application/vnd.google-apps.document"
],
"application/vnd.openxmlformats-officedocument.wordprocessingml.document": [
"application/vnd.google-apps.document"
],
"image/pjpeg": [
"application/vnd.google-apps.document"
],
"application/vnd.google-apps.script+text/plain": [
"application/vnd.google-apps.script"
],
"application/vnd.ms-excel": [
"application/vnd.google-apps.spreadsheet"
],
"application/vnd.sun.xml.writer": [
"application/vnd.google-apps.document"
],
"application/vnd.ms-word.document.macroenabled.12": [
"application/vnd.google-apps.document"
],
"application/vnd.ms-powerpoint.slideshow.macroenabled.12": [
"application/vnd.google-apps.presentation"
],
"text/rtf": [
"application/vnd.google-apps.document"
],
"text/plain": [
"application/vnd.google-apps.document"
],
"application/vnd.oasis.opendocument.spreadsheet": [
"application/vnd.google-apps.spreadsheet"
],
"application/x-vnd.oasis.opendocument.spreadsheet": [
"application/vnd.google-apps.spreadsheet"
],
"image/png": [
"application/vnd.google-apps.document"
],
"application/x-vnd.oasis.opendocument.text": [
"application/vnd.google-apps.document"
],
"application/msword": [
"application/vnd.google-apps.document"
],
"application/pdf": [
"application/vnd.google-apps.document"
],
"application/json": [
"application/vnd.google-apps.script"
],
"application/x-msmetafile": [
"application/vnd.google-apps.drawing"
],
"application/vnd.openxmlformats-officedocument.spreadsheetml.template": [
"application/vnd.google-apps.spreadsheet"
],
"application/vnd.ms-powerpoint": [
"application/vnd.google-apps.presentation"
],
"application/vnd.ms-excel.template.macroenabled.12": [
"application/vnd.google-apps.spreadsheet"
],
"image/x-bmp": [
"application/vnd.google-apps.document"
],
"application/rtf": [
"application/vnd.google-apps.document"
],
"application/vnd.openxmlformats-officedocument.presentationml.template": [
"application/vnd.google-apps.presentation"
],
"image/x-png": [
"application/vnd.google-apps.document"
],
"text/html": [
"application/vnd.google-apps.document"
],
"application/vnd.oasis.opendocument.text": [
"application/vnd.google-apps.document"
],
"application/vnd.openxmlformats-officedocument.presentationml.presentation": [
"application/vnd.google-apps.presentation"
],
"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet": [
"application/vnd.google-apps.spreadsheet"
],
"application/vnd.google-apps.script+json": [
"application/vnd.google-apps.script"
],
"application/vnd.openxmlformats-officedocument.presentationml.slideshow": [
"application/vnd.google-apps.presentation"
],
"application/vnd.ms-powerpoint.template.macroenabled.12": [
"application/vnd.google-apps.presentation"
],
"text/csv": [
"application/vnd.google-apps.spreadsheet"
],
"application/vnd.oasis.opendocument.presentation": [
"application/vnd.google-apps.presentation"
],
"image/jpg": [
"application/vnd.google-apps.document"
],
"text/richtext": [
"application/vnd.google-apps.document"
]
},
"exportFormats": {
"application/vnd.google-apps.document": [
"application/rtf",
"application/vnd.oasis.opendocument.text",
"text/html",
"application/pdf",
"application/epub+zip",
"application/zip",
"application/vnd.openxmlformats-officedocument.wordprocessingml.document",
"text/plain"
],
"application/vnd.google-apps.spreadsheet": [
"application/x-vnd.oasis.opendocument.spreadsheet",
"text/tab-separated-values",
"application/pdf",
"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
"text/csv",
"application/zip",
"application/vnd.oasis.opendocument.spreadsheet"
],
"application/vnd.google-apps.jam": [
"application/pdf"
],
"application/vnd.google-apps.script": [
"application/vnd.google-apps.script+json"
],
"application/vnd.google-apps.presentation": [
"application/vnd.oasis.opendocument.presentation",
"application/pdf",
"application/vnd.openxmlformats-officedocument.presentationml.presentation",
"text/plain"
],
"application/vnd.google-apps.form": [
"application/zip"
],
"application/vnd.google-apps.drawing": [
"image/svg+xml",
"image/png",
"application/pdf",
"image/jpeg"
]
}
}

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@@ -8,6 +8,8 @@
//
// This contains code adapted from google.golang.org/api (C) the GO AUTHORS
// +build go1.9
package drive
import (
@@ -30,9 +32,6 @@ import (
const (
// statusResumeIncomplete is the code returned by the Google uploader when the transfer is not yet complete.
statusResumeIncomplete = 308
// Number of times to try each chunk
maxTries = 10
)
// resumableUpload is used by the generated APIs to provide resumable uploads.
@@ -53,14 +52,18 @@ type resumableUpload struct {
}
// Upload the io.Reader in of size bytes with contentType and info
func (f *Fs) Upload(in io.Reader, size int64, contentType string, fileID string, info *drive.File, remote string) (*drive.File, error) {
params := make(url.Values)
params.Set("alt", "json")
params.Set("uploadType", "resumable")
params.Set("fields", partialFields)
func (f *Fs) Upload(in io.Reader, size int64, contentType, fileID, remote string, info *drive.File) (*drive.File, error) {
params := url.Values{
"alt": {"json"},
"uploadType": {"resumable"},
"fields": {partialFields},
}
if f.isTeamDrive {
params.Set("supportsTeamDrives", "true")
}
if f.opt.KeepRevisionForever {
params.Set("keepRevisionForever", "true")
}
urls := "https://www.googleapis.com/upload/drive/v3/files"
method := "POST"
if fileID != "" {
@@ -192,16 +195,16 @@ func (rx *resumableUpload) transferChunk(start int64, chunk io.ReadSeeker, chunk
}
// Upload uploads the chunks from the input
// It retries each chunk maxTries times (with a pause of uploadPause between attempts).
// It retries each chunk using the pacer and --low-level-retries
func (rx *resumableUpload) Upload() (*drive.File, error) {
start := int64(0)
var StatusCode int
var err error
buf := make([]byte, int(chunkSize))
buf := make([]byte, int(rx.f.opt.ChunkSize))
for start < rx.ContentLength {
reqSize := rx.ContentLength - start
if reqSize >= int64(chunkSize) {
reqSize = int64(chunkSize)
if reqSize >= int64(rx.f.opt.ChunkSize) {
reqSize = int64(rx.f.opt.ChunkSize)
}
chunk := readers.NewRepeatableLimitReaderBuffer(rx.Media, buf, reqSize)

View File

@@ -31,12 +31,16 @@ import (
"time"
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox"
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/auth"
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/common"
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/files"
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/sharing"
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/team"
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/users"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/config/flags"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/config/obscure"
"github.com/ncw/rclone/fs/fserrors"
"github.com/ncw/rclone/fs/hash"
@@ -54,24 +58,6 @@ const (
minSleep = 10 * time.Millisecond
maxSleep = 2 * time.Second
decayConstant = 2 // bigger for slower decay, exponential
)
var (
// Description of how to auth for this app
dropboxConfig = &oauth2.Config{
Scopes: []string{},
// Endpoint: oauth2.Endpoint{
// AuthURL: "https://www.dropbox.com/1/oauth2/authorize",
// TokenURL: "https://api.dropboxapi.com/1/oauth2/token",
// },
Endpoint: dropbox.OAuthEndpoint(""),
ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.RedirectLocalhostURL,
}
// A regexp matching path names for files Dropbox ignores
// See https://www.dropbox.com/en/help/145 - Ignored files
ignoredFiles = regexp.MustCompile(`(?i)(^|/)(desktop\.ini|thumbs\.db|\.ds_store|icon\r|\.dropbox|\.dropbox.attr)$`)
// Upload chunk size - setting too small makes uploads slow.
// Chunks are buffered into memory for retries.
//
@@ -95,8 +81,26 @@ var (
// Choose 48MB which is 91% of Maximum speed. rclone by
// default does 4 transfers so this should use 4*48MB = 192MB
// by default.
uploadChunkSize = fs.SizeSuffix(48 * 1024 * 1024)
maxUploadChunkSize = fs.SizeSuffix(150 * 1024 * 1024)
defaultChunkSize = 48 * fs.MebiByte
maxChunkSize = 150 * fs.MebiByte
)
var (
// Description of how to auth for this app
dropboxConfig = &oauth2.Config{
Scopes: []string{},
// Endpoint: oauth2.Endpoint{
// AuthURL: "https://www.dropbox.com/1/oauth2/authorize",
// TokenURL: "https://api.dropboxapi.com/1/oauth2/token",
// },
Endpoint: dropbox.OAuthEndpoint(""),
ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.RedirectLocalhostURL,
}
// A regexp matching path names for files Dropbox ignores
// See https://www.dropbox.com/en/help/145 - Ignored files
ignoredFiles = regexp.MustCompile(`(?i)(^|/)(desktop\.ini|thumbs\.db|\.ds_store|icon\r|\.dropbox|\.dropbox.attr)$`)
)
// Register with Fs
@@ -105,34 +109,59 @@ func init() {
Name: "dropbox",
Description: "Dropbox",
NewFs: NewFs,
Config: func(name string) {
err := oauthutil.ConfigNoOffline("dropbox", name, dropboxConfig)
Config: func(name string, m configmap.Mapper) {
err := oauthutil.ConfigNoOffline("dropbox", name, m, dropboxConfig)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
},
Options: []fs.Option{{
Name: config.ConfigClientID,
Help: "Dropbox App Client Id - leave blank normally.",
Help: "Dropbox App Client Id\nLeave blank normally.",
}, {
Name: config.ConfigClientSecret,
Help: "Dropbox App Client Secret - leave blank normally.",
Help: "Dropbox App Client Secret\nLeave blank normally.",
}, {
Name: "chunk_size",
Help: fmt.Sprintf(`Upload chunk size. (< %v).
Any files larger than this will be uploaded in chunks of this size.
Note that chunks are buffered in memory (one at a time) so rclone can
deal with retries. Setting this larger will increase the speed
slightly (at most 10%% for 128MB in tests) at the cost of using more
memory. It can be set smaller if you are tight on memory.`, fs.SizeSuffix(maxChunkSize)),
Default: fs.SizeSuffix(defaultChunkSize),
Advanced: true,
}, {
Name: "impersonate",
Help: "Impersonate this user when using a business account.",
Default: "",
Advanced: true,
}},
})
flags.VarP(&uploadChunkSize, "dropbox-chunk-size", "", fmt.Sprintf("Upload chunk size. Max %v.", maxUploadChunkSize))
}
// Options defines the configuration for this backend
type Options struct {
ChunkSize fs.SizeSuffix `config:"chunk_size"`
Impersonate string `config:"impersonate"`
}
// Fs represents a remote dropbox server
type Fs struct {
name string // name of this remote
root string // the path we are working on
opt Options // parsed options
features *fs.Features // optional features
srv files.Client // the connection to the dropbox server
sharingClient sharing.Client // as above, but for generating sharing links
sharing sharing.Client // as above, but for generating sharing links
users users.Client // as above, but for accessing user information
team team.Client // for the Teams API
slashRoot string // root with "/" prefix, lowercase
slashRootSlash string // root with "/" prefix and postfix, lowercase
pacer *pacer.Pacer // To pace the API calls
ns string // The namespace we are using or "" for none
}
// Object describes a dropbox object
@@ -175,23 +204,59 @@ func shouldRetry(err error) (bool, error) {
return false, err
}
baseErrString := errors.Cause(err).Error()
// FIXME there is probably a better way of doing this!
// handle any official Retry-After header from Dropbox's SDK first
switch e := err.(type) {
case auth.RateLimitAPIError:
if e.RateLimitError.RetryAfter > 0 {
fs.Debugf(baseErrString, "Too many requests or write operations. Trying again in %d seconds.", e.RateLimitError.RetryAfter)
time.Sleep(time.Duration(e.RateLimitError.RetryAfter) * time.Second)
}
return true, err
}
// Keep old behaviour for backward compatibility
if strings.Contains(baseErrString, "too_many_write_operations") || strings.Contains(baseErrString, "too_many_requests") {
return true, err
}
return fserrors.ShouldRetry(err), err
}
func checkUploadChunkSize(cs fs.SizeSuffix) error {
const minChunkSize = fs.Byte
if cs < minChunkSize {
return errors.Errorf("%s is less than %s", cs, minChunkSize)
}
if cs > maxChunkSize {
return errors.Errorf("%s is greater than %s", cs, maxChunkSize)
}
return nil
}
func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
err = checkUploadChunkSize(cs)
if err == nil {
old, f.opt.ChunkSize = f.opt.ChunkSize, cs
}
return
}
// NewFs contstructs an Fs from the path, container:path
func NewFs(name, root string) (fs.Fs, error) {
if uploadChunkSize > maxUploadChunkSize {
return nil, errors.Errorf("chunk size too big, must be < %v", maxUploadChunkSize)
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
err = checkUploadChunkSize(opt.ChunkSize)
if err != nil {
return nil, errors.Wrap(err, "dropbox: chunk size")
}
// Convert the old token if it exists. The old token was just
// just a string, the new one is a JSON blob
oldToken := strings.TrimSpace(config.FileGet(name, config.ConfigToken))
if oldToken != "" && oldToken[0] != '{' {
oldToken, ok := m.Get(config.ConfigToken)
oldToken = strings.TrimSpace(oldToken)
if ok && oldToken != "" && oldToken[0] != '{' {
fs.Infof(name, "Converting token to new format")
newToken := fmt.Sprintf(`{"access_token":"%s","token_type":"bearer","expiry":"0001-01-01T00:00:00Z"}`, oldToken)
err := config.SetValueAndSave(name, config.ConfigToken, newToken)
@@ -200,26 +265,47 @@ func NewFs(name, root string) (fs.Fs, error) {
}
}
oAuthClient, _, err := oauthutil.NewClient(name, dropboxConfig)
oAuthClient, _, err := oauthutil.NewClient(name, m, dropboxConfig)
if err != nil {
log.Fatalf("Failed to configure dropbox: %v", err)
return nil, errors.Wrap(err, "failed to configure dropbox")
}
config := dropbox.Config{
LogLevel: dropbox.LogOff, // logging in the SDK: LogOff, LogDebug, LogInfo
Client: oAuthClient, // maybe???
}
srv := files.New(config)
sharingClient := sharing.New(config)
users := users.New(config)
f := &Fs{
name: name,
srv: srv,
sharingClient: sharingClient,
users: users,
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
name: name,
opt: *opt,
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
}
config := dropbox.Config{
LogLevel: dropbox.LogOff, // logging in the SDK: LogOff, LogDebug, LogInfo
Client: oAuthClient, // maybe???
HeaderGenerator: f.headerGenerator,
}
// NOTE: needs to be created pre-impersonation so we can look up the impersonated user
f.team = team.New(config)
if opt.Impersonate != "" {
user := team.UserSelectorArg{
Email: opt.Impersonate,
}
user.Tag = "email"
members := []*team.UserSelectorArg{&user}
args := team.NewMembersGetInfoArgs(members)
memberIds, err := f.team.MembersGetInfo(args)
if err != nil {
return nil, errors.Wrapf(err, "invalid dropbox team member: %q", opt.Impersonate)
}
config.AsMemberID = memberIds[0].MemberInfo.Profile.MemberProfile.TeamMemberId
}
f.srv = files.New(config)
f.sharing = sharing.New(config)
f.users = users.New(config)
f.features = (&fs.Features{
CaseInsensitive: true,
ReadMimeType: true,
@@ -227,6 +313,27 @@ func NewFs(name, root string) (fs.Fs, error) {
}).Fill(f)
f.setRoot(root)
// If root starts with / then use the actual root
if strings.HasPrefix(root, "/") {
var acc *users.FullAccount
err = f.pacer.Call(func() (bool, error) {
acc, err = f.users.GetCurrentAccount()
return shouldRetry(err)
})
if err != nil {
return nil, errors.Wrap(err, "get current account failed")
}
switch x := acc.RootInfo.(type) {
case *common.TeamRootInfo:
f.ns = x.RootNamespaceId
case *common.UserRootInfo:
f.ns = x.RootNamespaceId
default:
return nil, errors.Errorf("unknown RootInfo type %v %T", acc.RootInfo, acc.RootInfo)
}
fs.Debugf(f, "Using root namespace %q", f.ns)
}
// See if the root is actually an object
_, err = f.getFileMetadata(f.slashRoot)
if err == nil {
@@ -241,6 +348,16 @@ func NewFs(name, root string) (fs.Fs, error) {
return f, nil
}
// headerGenerator for dropbox sdk
func (f *Fs) headerGenerator(hostType string, style string, namespace string, route string) map[string]string {
if f.ns == "" {
return map[string]string{}
}
return map[string]string{
"Dropbox-API-Path-Root": `{".tag": "namespace_id", "namespace_id": "` + f.ns + `"}`,
}
}
// Sets root in f
func (f *Fs) setRoot(root string) {
f.root = strings.Trim(root, "/")
@@ -419,21 +536,6 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
return entries, nil
}
// A read closer which doesn't close the input
type readCloser struct {
in io.Reader
}
// Read bytes from the object - see io.Reader
func (rc *readCloser) Read(p []byte) (n int, err error) {
return rc.in.Read(p)
}
// Dummy close function
func (rc *readCloser) Close() error {
return nil
}
// Put the object
//
// Copy the reader in to the new object which is returned
@@ -651,7 +753,7 @@ func (f *Fs) PublicLink(remote string) (link string, err error) {
}
var linkRes sharing.IsSharedLinkMetadata
err = f.pacer.Call(func() (bool, error) {
linkRes, err = f.sharingClient.CreateSharedLinkWithSettings(&createArg)
linkRes, err = f.sharing.CreateSharedLinkWithSettings(&createArg)
return shouldRetry(err)
})
@@ -663,7 +765,7 @@ func (f *Fs) PublicLink(remote string) (link string, err error) {
}
var listRes *sharing.ListSharedLinksResult
err = f.pacer.Call(func() (bool, error) {
listRes, err = f.sharingClient.ListSharedLinks(&listArg)
listRes, err = f.sharing.ListSharedLinks(&listArg)
return shouldRetry(err)
})
if err != nil {
@@ -896,7 +998,7 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
// unknown (i.e. -1) or smaller than uploadChunkSize, the method incurs an
// avoidable request to the Dropbox API that does not carry payload.
func (o *Object) uploadChunked(in0 io.Reader, commitInfo *files.CommitInfo, size int64) (entry *files.FileMetadata, err error) {
chunkSize := int64(uploadChunkSize)
chunkSize := int64(o.fs.opt.ChunkSize)
chunks := 0
if size != -1 {
chunks = int(size/chunkSize) + 1
@@ -1011,7 +1113,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
size := src.Size()
var err error
var entry *files.FileMetadata
if size > int64(uploadChunkSize) || size == -1 {
if size > int64(o.fs.opt.ChunkSize) || size == -1 {
entry, err = o.uploadChunked(in, commitInfo, size)
} else {
err = o.fs.pacer.CallNoRetry(func() (bool, error) {

View File

@@ -1,10 +1,10 @@
// Test Dropbox filesystem interface
package dropbox_test
package dropbox
import (
"testing"
"github.com/ncw/rclone/backend/dropbox"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fstest/fstests"
)
@@ -12,6 +12,15 @@ import (
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestDropbox:",
NilObject: (*dropbox.Object)(nil),
NilObject: (*Object)(nil),
ChunkedUpload: fstests.ChunkedUploadConfig{
MaxChunkSize: maxChunkSize,
},
})
}
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setUploadChunkSize(cs)
}
var _ fstests.SetUploadChunkSizer = (*Fs)(nil)

View File

@@ -4,16 +4,15 @@ package ftp
import (
"io"
"net/textproto"
"net/url"
"os"
"path"
"strings"
"sync"
"time"
"github.com/jlaffaye/ftp"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/config/obscure"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/lib/readers"
@@ -30,33 +29,40 @@ func init() {
{
Name: "host",
Help: "FTP host to connect to",
Optional: false,
Required: true,
Examples: []fs.OptionExample{{
Value: "ftp.example.com",
Help: "Connect to ftp.example.com",
}},
}, {
Name: "user",
Help: "FTP username, leave blank for current username, " + os.Getenv("USER"),
Optional: true,
Name: "user",
Help: "FTP username, leave blank for current username, " + os.Getenv("USER"),
}, {
Name: "port",
Help: "FTP port, leave blank to use default (21) ",
Optional: true,
Name: "port",
Help: "FTP port, leave blank to use default (21)",
}, {
Name: "pass",
Help: "FTP password",
IsPassword: true,
Optional: false,
Required: true,
},
},
})
}
// Options defines the configuration for this backend
type Options struct {
Host string `config:"host"`
User string `config:"user"`
Pass string `config:"pass"`
Port string `config:"port"`
}
// Fs represents a remote FTP server
type Fs struct {
name string // name of this remote
root string // the path we are working on if any
opt Options // parsed options
features *fs.Features // optional features
url string
user string
@@ -161,51 +167,33 @@ func (f *Fs) putFtpConnection(pc **ftp.ServerConn, err error) {
}
// NewFs contstructs an Fs from the path, container:path
func NewFs(name, root string) (ff fs.Fs, err error) {
func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
// defer fs.Trace(nil, "name=%q, root=%q", name, root)("fs=%v, err=%v", &ff, &err)
// FIXME Convert the old scheme used for the first beta - remove after release
if ftpURL := config.FileGet(name, "url"); ftpURL != "" {
fs.Infof(name, "Converting old configuration")
u, err := url.Parse(ftpURL)
if err != nil {
return nil, errors.Wrapf(err, "Failed to parse old url %q", ftpURL)
}
parts := strings.Split(u.Host, ":")
config.FileSet(name, "host", parts[0])
if len(parts) > 1 {
config.FileSet(name, "port", parts[1])
}
config.FileSet(name, "host", u.Host)
config.FileSet(name, "user", config.FileGet(name, "username"))
config.FileSet(name, "pass", config.FileGet(name, "password"))
config.FileDeleteKey(name, "username")
config.FileDeleteKey(name, "password")
config.FileDeleteKey(name, "url")
config.SaveConfig()
if u.Path != "" && u.Path != "/" {
fs.Errorf(name, "Path %q in FTP URL no longer supported - put it on the end of the remote %s:%s", u.Path, name, u.Path)
}
// Parse config into Options struct
opt := new(Options)
err = configstruct.Set(m, opt)
if err != nil {
return nil, err
}
host := config.FileGet(name, "host")
user := config.FileGet(name, "user")
pass := config.FileGet(name, "pass")
port := config.FileGet(name, "port")
pass, err = obscure.Reveal(pass)
pass, err := obscure.Reveal(opt.Pass)
if err != nil {
return nil, errors.Wrap(err, "NewFS decrypt password")
}
user := opt.User
if user == "" {
user = os.Getenv("USER")
}
port := opt.Port
if port == "" {
port = "21"
}
dialAddr := host + ":" + port
dialAddr := opt.Host + ":" + port
u := "ftp://" + path.Join(dialAddr+"/", root)
f := &Fs{
name: name,
root: root,
opt: *opt,
url: u,
user: user,
pass: pass,
@@ -480,6 +468,8 @@ func (f *Fs) mkdir(abspath string) error {
switch errX.Code {
case ftp.StatusFileUnavailable: // dir already exists: see issue #2181
err = nil
case 521: // dir already exists: error number according to RFC 959: issue #2363
err = nil
}
}
return err
@@ -656,7 +646,21 @@ func (f *ftpReadCloser) Read(p []byte) (n int, err error) {
// Close the FTP reader and return the connection to the pool
func (f *ftpReadCloser) Close() error {
err := f.rc.Close()
var err error
errchan := make(chan error, 1)
go func() {
errchan <- f.rc.Close()
}()
// Wait for Close for up to 60 seconds
timer := time.NewTimer(60 * time.Second)
select {
case err = <-errchan:
timer.Stop()
case <-timer.C:
// if timer fired assume no error but connection dead
fs.Errorf(f.f, "Timeout when waiting for connection Close")
return nil
}
// if errors while reading or closing, dump the connection
if err != nil || f.err != nil {
_ = f.c.Quit()
@@ -714,6 +718,11 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
path := path.Join(o.fs.root, o.remote)
// remove the file if upload failed
remove := func() {
// Give the FTP server a chance to get its internal state in order after the error.
// The error may have been local in which case we closed the connection. The server
// may still be dealing with it for a moment. A sleep isn't ideal but I haven't been
// able to think of a better method to find out if the server has finished - ncw
time.Sleep(1 * time.Second)
removeErr := o.Remove()
if removeErr != nil {
fs.Debugf(o, "Failed to remove: %v", removeErr)
@@ -727,7 +736,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
}
err = c.Stor(path, in)
if err != nil {
_ = c.Quit()
_ = c.Quit() // toss this connection to avoid sync errors
remove()
return errors.Wrap(err, "update stor")
}

View File

@@ -1,4 +1,7 @@
// Package googlecloudstorage provides an interface to Google Cloud Storage
// +build go1.9
package googlecloudstorage
/*
@@ -29,12 +32,15 @@ import (
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/config/flags"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/config/obscure"
"github.com/ncw/rclone/fs/fserrors"
"github.com/ncw/rclone/fs/fshttp"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/fs/walk"
"github.com/ncw/rclone/lib/oauthutil"
"github.com/ncw/rclone/lib/pacer"
"github.com/pkg/errors"
"golang.org/x/oauth2"
"golang.org/x/oauth2/google"
@@ -49,11 +55,10 @@ const (
timeFormatOut = "2006-01-02T15:04:05.000000000Z07:00"
metaMtime = "mtime" // key to store mtime under in metadata
listChunks = 1000 // chunk size to read directory listings
minSleep = 10 * time.Millisecond
)
var (
gcsLocation = flags.StringP("gcs-location", "", "", "Default location for buckets (us|eu|asia|us-central1|us-east1|us-east4|us-west1|asia-east1|asia-noetheast1|asia-southeast1|australia-southeast1|europe-west1|europe-west2).")
gcsStorageClass = flags.StringP("gcs-storage-class", "", "", "Default storage class for buckets (MULTI_REGIONAL|REGIONAL|STANDARD|NEARLINE|COLDLINE|DURABLE_REDUCED_AVAILABILITY).")
// Description of how to auth for this app
storageConfig = &oauth2.Config{
Scopes: []string{storage.DevstorageFullControlScope},
@@ -68,29 +73,36 @@ var (
func init() {
fs.Register(&fs.RegInfo{
Name: "google cloud storage",
Prefix: "gcs",
Description: "Google Cloud Storage (this is not Google Drive)",
NewFs: NewFs,
Config: func(name string) {
if config.FileGet(name, "service_account_file") != "" {
Config: func(name string, m configmap.Mapper) {
saFile, _ := m.Get("service_account_file")
saCreds, _ := m.Get("service_account_credentials")
if saFile != "" || saCreds != "" {
return
}
err := oauthutil.Config("google cloud storage", name, storageConfig)
err := oauthutil.Config("google cloud storage", name, m, storageConfig)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
},
Options: []fs.Option{{
Name: config.ConfigClientID,
Help: "Google Application Client Id - leave blank normally.",
Help: "Google Application Client Id\nLeave blank normally.",
}, {
Name: config.ConfigClientSecret,
Help: "Google Application Client Secret - leave blank normally.",
Help: "Google Application Client Secret\nLeave blank normally.",
}, {
Name: "project_number",
Help: "Project number optional - needed only for list/create/delete buckets - see your developer console.",
Help: "Project number.\nOptional - needed only for list/create/delete buckets - see your developer console.",
}, {
Name: "service_account_file",
Help: "Service Account Credentials JSON file path - needed only if you want use SA instead of interactive login.",
Help: "Service Account Credentials JSON file path\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
}, {
Name: "service_account_credentials",
Help: "Service Account Credentials JSON blob\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
Hide: fs.OptionHideBoth,
}, {
Name: "object_acl",
Help: "Access Control List for new objects.",
@@ -204,21 +216,29 @@ func init() {
})
}
// Options defines the configuration for this backend
type Options struct {
ProjectNumber string `config:"project_number"`
ServiceAccountFile string `config:"service_account_file"`
ServiceAccountCredentials string `config:"service_account_credentials"`
ObjectACL string `config:"object_acl"`
BucketACL string `config:"bucket_acl"`
Location string `config:"location"`
StorageClass string `config:"storage_class"`
}
// Fs represents a remote storage server
type Fs struct {
name string // name of this remote
root string // the path we are working on if any
features *fs.Features // optional features
svc *storage.Service // the connection to the storage server
client *http.Client // authorized client
bucket string // the bucket we are working on
bucketOKMu sync.Mutex // mutex to protect bucket OK
bucketOK bool // true if we have created the bucket
projectNumber string // used for finding buckets
objectACL string // used when creating new objects
bucketACL string // used when creating new buckets
location string // location of new buckets
storageClass string // storage class of new buckets
name string // name of this remote
root string // the path we are working on if any
opt Options // parsed options
features *fs.Features // optional features
svc *storage.Service // the connection to the storage server
client *http.Client // authorized client
bucket string // the bucket we are working on
bucketOKMu sync.Mutex // mutex to protect bucket OK
bucketOK bool // true if we have created the bucket
pacer *pacer.Pacer // To pace the API calls
}
// Object describes a storage object
@@ -262,6 +282,30 @@ func (f *Fs) Features() *fs.Features {
return f.features
}
// shouldRetry determines whehter a given err rates being retried
func shouldRetry(err error) (again bool, errOut error) {
again = false
if err != nil {
if fserrors.ShouldRetry(err) {
again = true
} else {
switch gerr := err.(type) {
case *googleapi.Error:
if gerr.Code >= 500 && gerr.Code < 600 {
// All 5xx errors should be retried
again = true
} else if len(gerr.Errors) > 0 {
reason := gerr.Errors[0].Reason
if reason == "rateLimitExceeded" || reason == "userRateLimitExceeded" {
again = true
}
}
}
}
}
return again, err
}
// Pattern to match a storage path
var matcher = regexp.MustCompile(`^([^/]*)(.*)$`)
@@ -287,27 +331,37 @@ func getServiceAccountClient(credentialsData []byte) (*http.Client, error) {
}
// NewFs contstructs an Fs from the path, bucket:path
func NewFs(name, root string) (fs.Fs, error) {
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
var oAuthClient *http.Client
var err error
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
if opt.ObjectACL == "" {
opt.ObjectACL = "private"
}
if opt.BucketACL == "" {
opt.BucketACL = "private"
}
// try loading service account credentials from env variable, then from a file
serviceAccountCreds := []byte(config.FileGet(name, "service_account_credentials"))
serviceAccountPath := config.FileGet(name, "service_account_file")
if len(serviceAccountCreds) == 0 && serviceAccountPath != "" {
loadedCreds, err := ioutil.ReadFile(os.ExpandEnv(serviceAccountPath))
if opt.ServiceAccountCredentials == "" && opt.ServiceAccountFile != "" {
loadedCreds, err := ioutil.ReadFile(os.ExpandEnv(opt.ServiceAccountFile))
if err != nil {
return nil, errors.Wrap(err, "error opening service account credentials file")
}
serviceAccountCreds = loadedCreds
opt.ServiceAccountCredentials = string(loadedCreds)
}
if len(serviceAccountCreds) > 0 {
oAuthClient, err = getServiceAccountClient(serviceAccountCreds)
if opt.ServiceAccountCredentials != "" {
oAuthClient, err = getServiceAccountClient([]byte(opt.ServiceAccountCredentials))
if err != nil {
return nil, errors.Wrap(err, "failed configuring Google Cloud Storage Service Account")
}
} else {
oAuthClient, _, err = oauthutil.NewClient(name, storageConfig)
oAuthClient, _, err = oauthutil.NewClient(name, m, storageConfig)
if err != nil {
return nil, errors.Wrap(err, "failed to configure Google Cloud Storage")
}
@@ -319,32 +373,17 @@ func NewFs(name, root string) (fs.Fs, error) {
}
f := &Fs{
name: name,
bucket: bucket,
root: directory,
projectNumber: config.FileGet(name, "project_number"),
objectACL: config.FileGet(name, "object_acl"),
bucketACL: config.FileGet(name, "bucket_acl"),
location: config.FileGet(name, "location"),
storageClass: config.FileGet(name, "storage_class"),
name: name,
bucket: bucket,
root: directory,
opt: *opt,
pacer: pacer.New().SetMinSleep(minSleep).SetPacer(pacer.GoogleDrivePacer),
}
f.features = (&fs.Features{
ReadMimeType: true,
WriteMimeType: true,
BucketBased: true,
}).Fill(f)
if f.objectACL == "" {
f.objectACL = "private"
}
if f.bucketACL == "" {
f.bucketACL = "private"
}
if *gcsLocation != "" {
f.location = *gcsLocation
}
if *gcsStorageClass != "" {
f.storageClass = *gcsStorageClass
}
// Create a new authorized Drive client.
f.client = oAuthClient
@@ -356,7 +395,10 @@ func NewFs(name, root string) (fs.Fs, error) {
if f.root != "" {
f.root += "/"
// Check to see if the object exists
_, err = f.svc.Objects.Get(bucket, directory).Do()
err = f.pacer.Call(func() (bool, error) {
_, err = f.svc.Objects.Get(bucket, directory).Do()
return shouldRetry(err)
})
if err == nil {
f.root = path.Dir(directory)
if f.root == "." {
@@ -404,7 +446,7 @@ type listFn func(remote string, object *storage.Object, isDirectory bool) error
// dir is the starting directory, "" for root
//
// Set recurse to read sub directories
func (f *Fs) list(dir string, recurse bool, fn listFn) error {
func (f *Fs) list(dir string, recurse bool, fn listFn) (err error) {
root := f.root
rootLength := len(root)
if dir != "" {
@@ -415,7 +457,11 @@ func (f *Fs) list(dir string, recurse bool, fn listFn) error {
list = list.Delimiter("/")
}
for {
objects, err := list.Do()
var objects *storage.Objects
err = f.pacer.Call(func() (bool, error) {
objects, err = list.Do()
return shouldRetry(err)
})
if err != nil {
if gErr, ok := err.(*googleapi.Error); ok {
if gErr.Code == http.StatusNotFound {
@@ -444,7 +490,7 @@ func (f *Fs) list(dir string, recurse bool, fn listFn) error {
remote := object.Name[rootLength:]
// is this a directory marker?
if (strings.HasSuffix(remote, "/") || remote == "") && object.Size == 0 {
if recurse {
if recurse && remote != "" {
// add a directory in if --fast-list since will have no prefixes
err = fn(remote[:len(remote)-1], object, true)
if err != nil {
@@ -514,12 +560,16 @@ func (f *Fs) listBuckets(dir string) (entries fs.DirEntries, err error) {
if dir != "" {
return nil, fs.ErrorListBucketRequired
}
if f.projectNumber == "" {
if f.opt.ProjectNumber == "" {
return nil, errors.New("can't list buckets without project number")
}
listBuckets := f.svc.Buckets.List(f.projectNumber).MaxResults(listChunks)
listBuckets := f.svc.Buckets.List(f.opt.ProjectNumber).MaxResults(listChunks)
for {
buckets, err := listBuckets.Do()
var buckets *storage.Buckets
err = f.pacer.Call(func() (bool, error) {
buckets, err = listBuckets.Do()
return shouldRetry(err)
})
if err != nil {
return nil, err
}
@@ -607,7 +657,7 @@ func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption
}
// Mkdir creates the bucket if it doesn't exist
func (f *Fs) Mkdir(dir string) error {
func (f *Fs) Mkdir(dir string) (err error) {
f.bucketOKMu.Lock()
defer f.bucketOKMu.Unlock()
if f.bucketOK {
@@ -615,7 +665,11 @@ func (f *Fs) Mkdir(dir string) error {
}
// List something from the bucket to see if it exists. Doing it like this enables the use of a
// service account that only has the "Storage Object Admin" role. See #2193 for details.
_, err := f.svc.Objects.List(f.bucket).MaxResults(1).Do()
err = f.pacer.Call(func() (bool, error) {
_, err = f.svc.Objects.List(f.bucket).MaxResults(1).Do()
return shouldRetry(err)
})
if err == nil {
// Bucket already exists
f.bucketOK = true
@@ -628,16 +682,19 @@ func (f *Fs) Mkdir(dir string) error {
return errors.Wrap(err, "failed to get bucket")
}
if f.projectNumber == "" {
if f.opt.ProjectNumber == "" {
return errors.New("can't make bucket without project number")
}
bucket := storage.Bucket{
Name: f.bucket,
Location: f.location,
StorageClass: f.storageClass,
Location: f.opt.Location,
StorageClass: f.opt.StorageClass,
}
_, err = f.svc.Buckets.Insert(f.projectNumber, &bucket).PredefinedAcl(f.bucketACL).Do()
err = f.pacer.Call(func() (bool, error) {
_, err = f.svc.Buckets.Insert(f.opt.ProjectNumber, &bucket).PredefinedAcl(f.opt.BucketACL).Do()
return shouldRetry(err)
})
if err == nil {
f.bucketOK = true
}
@@ -648,13 +705,16 @@ func (f *Fs) Mkdir(dir string) error {
//
// Returns an error if it isn't empty: Error 409: The bucket you tried
// to delete was not empty.
func (f *Fs) Rmdir(dir string) error {
func (f *Fs) Rmdir(dir string) (err error) {
f.bucketOKMu.Lock()
defer f.bucketOKMu.Unlock()
if f.root != "" || dir != "" {
return nil
}
err := f.svc.Buckets.Delete(f.bucket).Do()
err = f.pacer.Call(func() (bool, error) {
err = f.svc.Buckets.Delete(f.bucket).Do()
return shouldRetry(err)
})
if err == nil {
f.bucketOK = false
}
@@ -696,7 +756,11 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
srcObject := srcObj.fs.root + srcObj.remote
dstBucket := f.bucket
dstObject := f.root + remote
newObject, err := f.svc.Objects.Copy(srcBucket, srcObject, dstBucket, dstObject, nil).Do()
var newObject *storage.Object
err = f.pacer.Call(func() (bool, error) {
newObject, err = f.svc.Objects.Copy(srcBucket, srcObject, dstBucket, dstObject, nil).Do()
return shouldRetry(err)
})
if err != nil {
return nil, err
}
@@ -784,7 +848,11 @@ func (o *Object) readMetaData() (err error) {
if !o.modTime.IsZero() {
return nil
}
object, err := o.fs.svc.Objects.Get(o.fs.bucket, o.fs.root+o.remote).Do()
var object *storage.Object
err = o.fs.pacer.Call(func() (bool, error) {
object, err = o.fs.svc.Objects.Get(o.fs.bucket, o.fs.root+o.remote).Do()
return shouldRetry(err)
})
if err != nil {
if gErr, ok := err.(*googleapi.Error); ok {
if gErr.Code == http.StatusNotFound {
@@ -818,14 +886,18 @@ func metadataFromModTime(modTime time.Time) map[string]string {
}
// SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(modTime time.Time) error {
func (o *Object) SetModTime(modTime time.Time) (err error) {
// This only adds metadata so will perserve other metadata
object := storage.Object{
Bucket: o.fs.bucket,
Name: o.fs.root + o.remote,
Metadata: metadataFromModTime(modTime),
}
newObject, err := o.fs.svc.Objects.Patch(o.fs.bucket, o.fs.root+o.remote, &object).Do()
var newObject *storage.Object
err = o.fs.pacer.Call(func() (bool, error) {
newObject, err = o.fs.svc.Objects.Patch(o.fs.bucket, o.fs.root+o.remote, &object).Do()
return shouldRetry(err)
})
if err != nil {
return err
}
@@ -845,7 +917,17 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
return nil, err
}
fs.OpenOptionAddHTTPHeaders(req.Header, options)
res, err := o.fs.client.Do(req)
var res *http.Response
err = o.fs.pacer.Call(func() (bool, error) {
res, err = o.fs.client.Do(req)
if err == nil {
err = googleapi.CheckResponse(res)
if err != nil {
_ = res.Body.Close() // ignore error
}
}
return shouldRetry(err)
})
if err != nil {
return nil, err
}
@@ -874,7 +956,11 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
Updated: modTime.Format(timeFormatOut), // Doesn't get set
Metadata: metadataFromModTime(modTime),
}
newObject, err := o.fs.svc.Objects.Insert(o.fs.bucket, &object).Media(in, googleapi.ContentType("")).Name(object.Name).PredefinedAcl(o.fs.objectACL).Do()
var newObject *storage.Object
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
newObject, err = o.fs.svc.Objects.Insert(o.fs.bucket, &object).Media(in, googleapi.ContentType("")).Name(object.Name).PredefinedAcl(o.fs.opt.ObjectACL).Do()
return shouldRetry(err)
})
if err != nil {
return err
}
@@ -884,8 +970,12 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
}
// Remove an object
func (o *Object) Remove() error {
return o.fs.svc.Objects.Delete(o.fs.bucket, o.fs.root+o.remote).Do()
func (o *Object) Remove() (err error) {
err = o.fs.pacer.Call(func() (bool, error) {
err = o.fs.svc.Objects.Delete(o.fs.bucket, o.fs.root+o.remote).Do()
return shouldRetry(err)
})
return err
}
// MimeType of an Object if known, "" otherwise

View File

@@ -1,4 +1,7 @@
// Test GoogleCloudStorage filesystem interface
// +build go1.9
package googlecloudstorage_test
import (

View File

@@ -0,0 +1,6 @@
// Build for unsupported platforms to stop go complaining
// about "no buildable Go source files "
// +build !go1.9
package googlecloudstorage

View File

@@ -14,7 +14,8 @@ import (
"time"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/fshttp"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/lib/rest"
@@ -35,7 +36,7 @@ func init() {
Options: []fs.Option{{
Name: "url",
Help: "URL of http host to connect to",
Optional: false,
Required: true,
Examples: []fs.OptionExample{{
Value: "https://example.com",
Help: "Connect to example.com",
@@ -45,11 +46,17 @@ func init() {
fs.Register(fsi)
}
// Options defines the configuration for this backend
type Options struct {
Endpoint string `config:"url"`
}
// Fs stores the interface to the remote HTTP files
type Fs struct {
name string
root string
features *fs.Features // optional features
opt Options // options for this backend
endpoint *url.URL
endpointURL string // endpoint as a string
httpClient *http.Client
@@ -78,14 +85,20 @@ func statusError(res *http.Response, err error) error {
// NewFs creates a new Fs object from the name and root. It connects to
// the host specified in the config file.
func NewFs(name, root string) (fs.Fs, error) {
endpoint := config.FileGet(name, "url")
if !strings.HasSuffix(endpoint, "/") {
endpoint += "/"
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
if !strings.HasSuffix(opt.Endpoint, "/") {
opt.Endpoint += "/"
}
// Parse the endpoint and stick the root onto it
base, err := url.Parse(endpoint)
base, err := url.Parse(opt.Endpoint)
if err != nil {
return nil, err
}
@@ -130,6 +143,7 @@ func NewFs(name, root string) (fs.Fs, error) {
f := &Fs{
name: name,
root: root,
opt: *opt,
httpClient: client,
endpoint: u,
endpointURL: u.String(),
@@ -179,7 +193,7 @@ func (f *Fs) NewObject(remote string) (fs.Object, error) {
}
err := o.stat()
if err != nil {
return nil, errors.Wrap(err, "Stat failed")
return nil, err
}
return o, nil
}
@@ -402,6 +416,9 @@ func (o *Object) url() string {
func (o *Object) stat() error {
url := o.url()
res, err := o.fs.httpClient.Head(url)
if err == nil && res.StatusCode == http.StatusNotFound {
return fs.ErrorObjectNotFound
}
err = statusError(res, err)
if err != nil {
return errors.Wrap(err, "failed to stat")

View File

@@ -16,6 +16,7 @@ import (
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fstest"
"github.com/ncw/rclone/lib/rest"
"github.com/stretchr/testify/assert"
@@ -29,7 +30,7 @@ var (
)
// prepareServer the test server and return a function to tidy it up afterwards
func prepareServer(t *testing.T) func() {
func prepareServer(t *testing.T) (configmap.Simple, func()) {
// file server for test/files
fileServer := http.FileServer(http.Dir(filesPath))
@@ -41,19 +42,24 @@ func prepareServer(t *testing.T) func() {
// fs.Config.LogLevel = fs.LogLevelDebug
// fs.Config.DumpHeaders = true
// fs.Config.DumpBodies = true
config.FileSet(remoteName, "type", "http")
config.FileSet(remoteName, "url", ts.URL)
// config.FileSet(remoteName, "type", "http")
// config.FileSet(remoteName, "url", ts.URL)
m := configmap.Simple{
"type": "http",
"url": ts.URL,
}
// return a function to tidy up
return ts.Close
return m, ts.Close
}
// prepare the test server and return a function to tidy it up afterwards
func prepare(t *testing.T) (fs.Fs, func()) {
tidy := prepareServer(t)
m, tidy := prepareServer(t)
// Instantiate it
f, err := NewFs(remoteName, "")
f, err := NewFs(remoteName, "", m)
require.NoError(t, err)
return f, tidy
@@ -138,6 +144,11 @@ func TestNewObject(t *testing.T) {
dt, ok := fstest.CheckTimeEqualWithPrecision(tObj, tFile, time.Second)
assert.True(t, ok, fmt.Sprintf("%s: Modification time difference too big |%s| > %s (%s vs %s) (precision %s)", o.Remote(), dt, time.Second, tObj, tFile, time.Second))
// check object not found
o, err = f.NewObject("not found.txt")
assert.Nil(t, o)
assert.Equal(t, fs.ErrorObjectNotFound, err)
}
func TestOpen(t *testing.T) {
@@ -151,6 +162,7 @@ func TestOpen(t *testing.T) {
fd, err := o.Open()
require.NoError(t, err)
data, err := ioutil.ReadAll(fd)
require.NoError(t, err)
require.NoError(t, fd.Close())
assert.Equal(t, "beetroot\n", string(data))
@@ -158,6 +170,7 @@ func TestOpen(t *testing.T) {
fd, err = o.Open(&fs.RangeOption{Start: 1, End: 5})
require.NoError(t, err)
data, err = ioutil.ReadAll(fd)
require.NoError(t, err)
require.NoError(t, fd.Close())
assert.Equal(t, "eetro", string(data))
}
@@ -175,20 +188,20 @@ func TestMimeType(t *testing.T) {
}
func TestIsAFileRoot(t *testing.T) {
tidy := prepareServer(t)
m, tidy := prepareServer(t)
defer tidy()
f, err := NewFs(remoteName, "one%.txt")
f, err := NewFs(remoteName, "one%.txt", m)
assert.Equal(t, err, fs.ErrorIsFile)
testListRoot(t, f)
}
func TestIsAFileSubDir(t *testing.T) {
tidy := prepareServer(t)
m, tidy := prepareServer(t)
defer tidy()
f, err := NewFs(remoteName, "three/underthree.txt")
f, err := NewFs(remoteName, "three/underthree.txt", m)
assert.Equal(t, err, fs.ErrorIsFile)
entries, err := f.List("")

View File

@@ -2,7 +2,9 @@ package hubic
import (
"net/http"
"time"
"github.com/ncw/rclone/fs"
"github.com/ncw/swift"
)
@@ -21,12 +23,17 @@ func newAuth(f *Fs) *auth {
// Request constructs a http.Request for authentication
//
// returns nil for not needed
func (a *auth) Request(*swift.Connection) (*http.Request, error) {
err := a.f.getCredentials()
if err != nil {
return nil, err
func (a *auth) Request(*swift.Connection) (r *http.Request, err error) {
const retries = 10
for try := 1; try <= retries; try++ {
err = a.f.getCredentials()
if err == nil {
break
}
time.Sleep(100 * time.Millisecond)
fs.Debugf(a.f, "retrying auth request %d/%d: %v", try, retries, err)
}
return nil, nil
return nil, err
}
// Response parses the result of an http request
@@ -36,7 +43,7 @@ func (a *auth) Response(resp *http.Response) error {
// The public storage URL - set Internal to true to read
// internal/service net URL
func (a *auth) StorageUrl(Internal bool) string {
func (a *auth) StorageUrl(Internal bool) string { // nolint
return a.f.credentials.Endpoint
}
@@ -46,7 +53,7 @@ func (a *auth) Token() string {
}
// The CDN url if available
func (a *auth) CdnUrl() string {
func (a *auth) CdnUrl() string { // nolint
return ""
}

View File

@@ -16,6 +16,8 @@ import (
"github.com/ncw/rclone/backend/swift"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/config/obscure"
"github.com/ncw/rclone/fs/fshttp"
"github.com/ncw/rclone/lib/oauthutil"
@@ -52,19 +54,19 @@ func init() {
Name: "hubic",
Description: "Hubic",
NewFs: NewFs,
Config: func(name string) {
err := oauthutil.Config("hubic", name, oauthConfig)
Config: func(name string, m configmap.Mapper) {
err := oauthutil.Config("hubic", name, m, oauthConfig)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
},
Options: []fs.Option{{
Options: append([]fs.Option{{
Name: config.ConfigClientID,
Help: "Hubic Client Id - leave blank normally.",
Help: "Hubic Client Id\nLeave blank normally.",
}, {
Name: config.ConfigClientSecret,
Help: "Hubic Client Secret - leave blank normally.",
}},
Help: "Hubic Client Secret\nLeave blank normally.",
}}, swift.SharedOptions...),
})
}
@@ -145,8 +147,8 @@ func (f *Fs) getCredentials() (err error) {
}
// NewFs constructs an Fs from the path, container:path
func NewFs(name, root string) (fs.Fs, error) {
client, _, err := oauthutil.NewClient(name, oauthConfig)
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
client, _, err := oauthutil.NewClient(name, m, oauthConfig)
if err != nil {
return nil, errors.Wrap(err, "failed to configure Hubic")
}
@@ -167,8 +169,15 @@ func NewFs(name, root string) (fs.Fs, error) {
return nil, errors.Wrap(err, "error authenticating swift connection")
}
// Parse config into swift.Options struct
opt := new(swift.Options)
err = configstruct.Set(m, opt)
if err != nil {
return nil, err
}
// Make inner swift Fs from the connection
swiftFs, err := swift.NewFsWithConnection(name, root, c, true)
swiftFs, err := swift.NewFsWithConnection(opt, name, root, c, true)
if err != nil && err != fs.ErrorIsFile {
return nil, err
}

View File

@@ -0,0 +1,316 @@
package api
import (
"encoding/xml"
"fmt"
"time"
"github.com/pkg/errors"
)
const (
// default time format for almost all request and responses
timeFormat = "2006-01-02-T15:04:05Z0700"
// the API server seems to use a different format
apiTimeFormat = "2006-01-02T15:04:05Z07:00"
)
// Time represents time values in the Jottacloud API. It uses a custom RFC3339 like format.
type Time time.Time
// UnmarshalXML turns XML into a Time
func (t *Time) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
var v string
if err := d.DecodeElement(&v, &start); err != nil {
return err
}
if v == "" {
*t = Time(time.Time{})
return nil
}
newTime, err := time.Parse(timeFormat, v)
if err == nil {
*t = Time(newTime)
}
return err
}
// MarshalXML turns a Time into XML
func (t *Time) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
return e.EncodeElement(t.String(), start)
}
// Return Time string in Jottacloud format
func (t Time) String() string { return time.Time(t).Format(timeFormat) }
// APIString returns Time string in Jottacloud API format
func (t Time) APIString() string { return time.Time(t).Format(apiTimeFormat) }
// Flag is a hacky type for checking if an attribute is present
type Flag bool
// UnmarshalXMLAttr sets Flag to true if the attribute is present
func (f *Flag) UnmarshalXMLAttr(attr xml.Attr) error {
*f = true
return nil
}
// MarshalXMLAttr : Do not use
func (f *Flag) MarshalXMLAttr(name xml.Name) (xml.Attr, error) {
attr := xml.Attr{
Name: name,
Value: "false",
}
return attr, errors.New("unimplemented")
}
// TokenJSON is the struct representing the HTTP response from OAuth2
// providers returning a token in JSON form.
type TokenJSON struct {
AccessToken string `json:"access_token"`
TokenType string `json:"token_type"`
RefreshToken string `json:"refresh_token"`
ExpiresIn int32 `json:"expires_in"` // at least PayPal returns string, while most return number
}
/*
GET http://www.jottacloud.com/JFS/<account>
<user time="2018-07-18-T21:39:10Z" host="dn-132">
<username>12qh1wsht8cssxdtwl15rqh9</username>
<account-type>free</account-type>
<locked>false</locked>
<capacity>5368709120</capacity>
<max-devices>-1</max-devices>
<max-mobile-devices>-1</max-mobile-devices>
<usage>0</usage>
<read-locked>false</read-locked>
<write-locked>false</write-locked>
<quota-write-locked>false</quota-write-locked>
<enable-sync>true</enable-sync>
<enable-foldershare>true</enable-foldershare>
<devices>
<device>
<name xml:space="preserve">Jotta</name>
<display_name xml:space="preserve">Jotta</display_name>
<type>JOTTA</type>
<sid>5c458d01-9eaf-4f23-8d3c-2486fd9704d8</sid>
<size>0</size>
<modified>2018-07-15-T22:04:59Z</modified>
</device>
</devices>
</user>
*/
// AccountInfo represents a Jottacloud account
type AccountInfo struct {
Username string `xml:"username"`
AccountType string `xml:"account-type"`
Locked bool `xml:"locked"`
Capacity int64 `xml:"capacity"`
MaxDevices int `xml:"max-devices"`
MaxMobileDevices int `xml:"max-mobile-devices"`
Usage int64 `xml:"usage"`
ReadLocked bool `xml:"read-locked"`
WriteLocked bool `xml:"write-locked"`
QuotaWriteLocked bool `xml:"quota-write-locked"`
EnableSync bool `xml:"enable-sync"`
EnableFolderShare bool `xml:"enable-foldershare"`
Devices []JottaDevice `xml:"devices>device"`
}
/*
GET http://www.jottacloud.com/JFS/<account>/<device>
<device time="2018-07-23-T20:21:50Z" host="dn-158">
<name xml:space="preserve">Jotta</name>
<display_name xml:space="preserve">Jotta</display_name>
<type>JOTTA</type>
<sid>5c458d01-9eaf-4f23-8d3c-2486fd9704d8</sid>
<size>0</size>
<modified>2018-07-15-T22:04:59Z</modified>
<user>12qh1wsht8cssxdtwl15rqh9</user>
<mountPoints>
<mountPoint>
<name xml:space="preserve">Archive</name>
<size>0</size>
<modified>2018-07-15-T22:04:59Z</modified>
</mountPoint>
<mountPoint>
<name xml:space="preserve">Shared</name>
<size>0</size>
<modified></modified>
</mountPoint>
<mountPoint>
<name xml:space="preserve">Sync</name>
<size>0</size>
<modified></modified>
</mountPoint>
</mountPoints>
<metadata first="" max="" total="3" num_mountpoints="3"/>
</device>
*/
// JottaDevice represents a Jottacloud Device
type JottaDevice struct {
Name string `xml:"name"`
DisplayName string `xml:"display_name"`
Type string `xml:"type"`
Sid string `xml:"sid"`
Size int64 `xml:"size"`
User string `xml:"user"`
MountPoints []JottaMountPoint `xml:"mountPoints>mountPoint"`
}
/*
GET http://www.jottacloud.com/JFS/<account>/<device>/<mountpoint>
<mountPoint time="2018-07-24-T20:35:02Z" host="dn-157">
<name xml:space="preserve">Sync</name>
<path xml:space="preserve">/12qh1wsht8cssxdtwl15rqh9/Jotta</path>
<abspath xml:space="preserve">/12qh1wsht8cssxdtwl15rqh9/Jotta</abspath>
<size>0</size>
<modified></modified>
<device>Jotta</device>
<user>12qh1wsht8cssxdtwl15rqh9</user>
<folders>
<folder name="test"/>
</folders>
<metadata first="" max="" total="1" num_folders="1" num_files="0"/>
</mountPoint>
*/
// JottaMountPoint represents a Jottacloud mountpoint
type JottaMountPoint struct {
Name string `xml:"name"`
Size int64 `xml:"size"`
Device string `xml:"device"`
Folders []JottaFolder `xml:"folders>folder"`
Files []JottaFile `xml:"files>file"`
}
/*
GET http://www.jottacloud.com/JFS/<account>/<device>/<mountpoint>/<folder>
<folder name="test" time="2018-07-24-T20:41:37Z" host="dn-158">
<path xml:space="preserve">/12qh1wsht8cssxdtwl15rqh9/Jotta/Sync</path>
<abspath xml:space="preserve">/12qh1wsht8cssxdtwl15rqh9/Jotta/Sync</abspath>
<folders>
<folder name="t2"/>c
</folders>
<files>
<file name="block.csv" uuid="f6553cd4-1135-48fe-8e6a-bb9565c50ef2">
<currentRevision>
<number>1</number>
<state>COMPLETED</state>
<created>2018-07-05-T15:08:02Z</created>
<modified>2018-07-05-T15:08:02Z</modified>
<mime>application/octet-stream</mime>
<size>30827730</size>
<md5>1e8a7b728ab678048df00075c9507158</md5>
<updated>2018-07-24-T20:41:10Z</updated>
</currentRevision>
</file>
</files>
<metadata first="" max="" total="2" num_folders="1" num_files="1"/>
</folder>
*/
// JottaFolder represents a JottacloudFolder
type JottaFolder struct {
XMLName xml.Name
Name string `xml:"name,attr"`
Deleted Flag `xml:"deleted,attr"`
Path string `xml:"path"`
CreatedAt Time `xml:"created"`
ModifiedAt Time `xml:"modified"`
Updated Time `xml:"updated"`
Folders []JottaFolder `xml:"folders>folder"`
Files []JottaFile `xml:"files>file"`
}
/*
GET http://www.jottacloud.com/JFS/<account>/<device>/<mountpoint>/.../<file>
<file name="block.csv" uuid="f6553cd4-1135-48fe-8e6a-bb9565c50ef2">
<currentRevision>
<number>1</number>
<state>COMPLETED</state>
<created>2018-07-05-T15:08:02Z</created>
<modified>2018-07-05-T15:08:02Z</modified>
<mime>application/octet-stream</mime>
<size>30827730</size>
<md5>1e8a7b728ab678048df00075c9507158</md5>
<updated>2018-07-24-T20:41:10Z</updated>
</currentRevision>
</file>
*/
// JottaFile represents a Jottacloud file
type JottaFile struct {
XMLName xml.Name
Name string `xml:"name,attr"`
Deleted Flag `xml:"deleted,attr"`
PublicSharePath string `xml:"publicSharePath"`
State string `xml:"currentRevision>state"`
CreatedAt Time `xml:"currentRevision>created"`
ModifiedAt Time `xml:"currentRevision>modified"`
Updated Time `xml:"currentRevision>updated"`
Size int64 `xml:"currentRevision>size"`
MimeType string `xml:"currentRevision>mime"`
MD5 string `xml:"currentRevision>md5"`
}
// Error is a custom Error for wrapping Jottacloud error responses
type Error struct {
StatusCode int `xml:"code"`
Message string `xml:"message"`
Reason string `xml:"reason"`
Cause string `xml:"cause"`
}
// Error returns a string for the error and statistifes the error interface
func (e *Error) Error() string {
out := fmt.Sprintf("error %d", e.StatusCode)
if e.Message != "" {
out += ": " + e.Message
}
if e.Reason != "" {
out += fmt.Sprintf(" (%+v)", e.Reason)
}
return out
}
// AllocateFileRequest to prepare an upload to Jottacloud
type AllocateFileRequest struct {
Bytes int64 `json:"bytes"`
Created string `json:"created"`
Md5 string `json:"md5"`
Modified string `json:"modified"`
Path string `json:"path"`
}
// AllocateFileResponse for upload requests
type AllocateFileResponse struct {
Name string `json:"name"`
Path string `json:"path"`
State string `json:"state"`
UploadID string `json:"upload_id"`
UploadURL string `json:"upload_url"`
Bytes int64 `json:"bytes"`
ResumePos int64 `json:"resume_pos"`
}
// UploadResponse after an upload
type UploadResponse struct {
Name string `json:"name"`
Path string `json:"path"`
Kind string `json:"kind"`
ContentID string `json:"content_id"`
Bytes int64 `json:"bytes"`
Md5 string `json:"md5"`
Created int64 `json:"created"`
Modified int64 `json:"modified"`
Deleted interface{} `json:"deleted"`
Mime string `json:"mime"`
}

View File

@@ -0,0 +1,29 @@
package api
import (
"encoding/xml"
"testing"
"time"
)
func TestMountpointEmptyModificationTime(t *testing.T) {
mountpoint := `
<mountPoint time="2018-08-12-T09:58:24Z" host="dn-157">
<name xml:space="preserve">Sync</name>
<path xml:space="preserve">/foo/Jotta</path>
<abspath xml:space="preserve">/foo/Jotta</abspath>
<size>0</size>
<modified></modified>
<device>Jotta</device>
<user>foo</user>
<metadata first="" max="" total="0" num_folders="0" num_files="0"/>
</mountPoint>
`
var jf JottaFolder
if err := xml.Unmarshal([]byte(mountpoint), &jf); err != nil {
t.Fatal(err)
}
if !time.Time(jf.ModifiedAt).IsZero() {
t.Errorf("got non-zero time, want zero")
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,42 @@
package jottacloud
import (
"crypto/md5"
"fmt"
"io"
"testing"
"github.com/ncw/rclone/lib/readers"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestReadMD5(t *testing.T) {
// Check readMD5 for different size and threshold
for _, size := range []int64{0, 1024, 10 * 1024, 100 * 1024} {
t.Run(fmt.Sprintf("%d", size), func(t *testing.T) {
hasher := md5.New()
n, err := io.Copy(hasher, readers.NewPatternReader(size))
require.NoError(t, err)
assert.Equal(t, n, size)
wantMD5 := fmt.Sprintf("%x", hasher.Sum(nil))
for _, threshold := range []int64{512, 1024, 10 * 1024, 20 * 1024} {
t.Run(fmt.Sprintf("%d", threshold), func(t *testing.T) {
in := readers.NewPatternReader(size)
gotMD5, out, cleanup, err := readMD5(in, size, threshold)
defer cleanup()
require.NoError(t, err)
assert.Equal(t, wantMD5, gotMD5)
// check md5hash of out
hasher := md5.New()
n, err := io.Copy(hasher, out)
require.NoError(t, err)
assert.Equal(t, n, size)
outMD5 := fmt.Sprintf("%x", hasher.Sum(nil))
assert.Equal(t, wantMD5, outMD5)
})
}
})
}
}

View File

@@ -0,0 +1,17 @@
// Test Box filesystem interface
package jottacloud_test
import (
"testing"
"github.com/ncw/rclone/backend/jottacloud"
"github.com/ncw/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestJottacloud:",
NilObject: (*jottacloud.Object)(nil),
})
}

View File

@@ -0,0 +1,77 @@
/*
Translate file names for JottaCloud adapted from OneDrive
The following characters are JottaClous reserved characters, and can't
be used in JottaCloud folder and file names.
jottacloud = "/" / "\" / "*" / "<" / ">" / "?" / "!" / "&" / ":" / ";" / "|" / "#" / "%" / """ / "'" / "." / "~"
*/
package jottacloud
import (
"regexp"
"strings"
)
// charMap holds replacements for characters
//
// Onedrive has a restricted set of characters compared to other cloud
// storage systems, so we to map these to the FULLWIDTH unicode
// equivalents
//
// http://unicode-search.net/unicode-namesearch.pl?term=SOLIDUS
var (
charMap = map[rune]rune{
'\\': '', // FULLWIDTH REVERSE SOLIDUS
'*': '', // FULLWIDTH ASTERISK
'<': '', // FULLWIDTH LESS-THAN SIGN
'>': '', // FULLWIDTH GREATER-THAN SIGN
'?': '', // FULLWIDTH QUESTION MARK
':': '', // FULLWIDTH COLON
';': '', // FULLWIDTH SEMICOLON
'|': '', // FULLWIDTH VERTICAL LINE
'"': '', // FULLWIDTH QUOTATION MARK - not on the list but seems to be reserved
' ': '␠', // SYMBOL FOR SPACE
}
invCharMap map[rune]rune
fixStartingWithSpace = regexp.MustCompile(`(/|^) `)
fixEndingWithSpace = regexp.MustCompile(` (/|$)`)
)
func init() {
// Create inverse charMap
invCharMap = make(map[rune]rune, len(charMap))
for k, v := range charMap {
invCharMap[v] = k
}
}
// replaceReservedChars takes a path and substitutes any reserved
// characters in it
func replaceReservedChars(in string) string {
// Filenames can't start with space
in = fixStartingWithSpace.ReplaceAllString(in, "$1"+string(charMap[' ']))
// Filenames can't end with space
in = fixEndingWithSpace.ReplaceAllString(in, string(charMap[' '])+"$1")
return strings.Map(func(c rune) rune {
if replacement, ok := charMap[c]; ok && c != ' ' {
return replacement
}
return c
}, in)
}
// restoreReservedChars takes a path and undoes any substitutions
// made by replaceReservedChars
func restoreReservedChars(in string) string {
return strings.Map(func(c rune) rune {
if replacement, ok := invCharMap[c]; ok {
return replacement
}
return c
}, in)
}

View File

@@ -0,0 +1,28 @@
package jottacloud
import "testing"
func TestReplace(t *testing.T) {
for _, test := range []struct {
in string
out string
}{
{"", ""},
{"abc 123", "abc 123"},
{`\*<>?:;|"`, ``},
{`\*<>?:;|"\*<>?:;|"`, ``},
{" leading space", "␠leading space"},
{"trailing space ", "trailing space␠"},
{" leading space/ leading space/ leading space", "␠leading space/␠leading space/␠leading space"},
{"trailing space /trailing space /trailing space ", "trailing space␠/trailing space␠/trailing space␠"},
} {
got := replaceReservedChars(test.in)
if got != test.out {
t.Errorf("replaceReservedChars(%q) want %q got %q", test.in, test.out, got)
}
got2 := restoreReservedChars(got)
if got2 != test.in {
t.Errorf("restoreReservedChars(%q) want %q got %q", got, test.in, got2)
}
}
}

20
backend/local/lchtimes.go Normal file
View File

@@ -0,0 +1,20 @@
// +build windows plan9
package local
import (
"time"
)
const haveLChtimes = false
// lChtimes changes the access and modification times of the named
// link, similar to the Unix utime() or utimes() functions.
//
// The underlying filesystem may truncate or round the values to a
// less precise time unit.
// If there is an error, it will be of type *PathError.
func lChtimes(name string, atime time.Time, mtime time.Time) error {
// Does nothing
return nil
}

View File

@@ -0,0 +1,28 @@
// +build !windows,!plan9
package local
import (
"os"
"time"
"golang.org/x/sys/unix"
)
const haveLChtimes = true
// lChtimes changes the access and modification times of the named
// link, similar to the Unix utime() or utimes() functions.
//
// The underlying filesystem may truncate or round the values to a
// less precise time unit.
// If there is an error, it will be of type *PathError.
func lChtimes(name string, atime time.Time, mtime time.Time) error {
var utimes [2]unix.Timespec
utimes[0] = unix.NsecToTimespec(atime.UnixNano())
utimes[1] = unix.NsecToTimespec(mtime.UnixNano())
if e := unix.UtimesNanoAt(unix.AT_FDCWD, name, utimes[0:], unix.AT_SYMLINK_NOFOLLOW); e != nil {
return &os.PathError{Op: "lchtimes", Path: name, Err: e}
}
return nil
}

View File

@@ -2,6 +2,7 @@
package local
import (
"bytes"
"fmt"
"io"
"io/ioutil"
@@ -16,23 +17,19 @@ import (
"unicode/utf8"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/config/flags"
"github.com/ncw/rclone/fs/accounting"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/fserrors"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/lib/file"
"github.com/ncw/rclone/lib/readers"
"github.com/pkg/errors"
"google.golang.org/appengine/log"
)
var (
followSymlinks = flags.BoolP("copy-links", "L", false, "Follow symlinks and copy the pointed to item.")
skipSymlinks = flags.BoolP("skip-links", "", false, "Don't warn about skipped symlinks.")
noUTFNorm = flags.BoolP("local-no-unicode-normalization", "", false, "Don't apply unicode normalization to paths and filenames")
noCheckUpdated = flags.BoolP("local-no-check-updated", "", false, "Don't check to see if the files change during upload")
)
// Constants
const devUnset = 0xdeadbeefcafebabe // a device id meaning it is unset
const linkSuffix = ".rclonelink" // The suffix added to a translated symbolic link
// Register with Fs
func init() {
@@ -41,29 +38,90 @@ func init() {
Description: "Local Disk",
NewFs: NewFs,
Options: []fs.Option{{
Name: "nounc",
Help: "Disable UNC (long path names) conversion on Windows",
Optional: true,
Name: "nounc",
Help: "Disable UNC (long path names) conversion on Windows",
Examples: []fs.OptionExample{{
Value: "true",
Help: "Disables long file names",
}},
}, {
Name: "copy_links",
Help: "Follow symlinks and copy the pointed to item.",
Default: false,
NoPrefix: true,
ShortOpt: "L",
Advanced: true,
}, {
Name: "links",
Help: "Translate symlinks to/from regular files with a '" + linkSuffix + "' extension",
Default: false,
NoPrefix: true,
ShortOpt: "l",
Advanced: true,
}, {
Name: "skip_links",
Help: `Don't warn about skipped symlinks.
This flag disables warning messages on skipped symlinks or junction
points, as you explicitly acknowledge that they should be skipped.`,
Default: false,
NoPrefix: true,
Advanced: true,
}, {
Name: "no_unicode_normalization",
Help: `Don't apply unicode normalization to paths and filenames (Deprecated)
This flag is deprecated now. Rclone no longer normalizes unicode file
names, but it compares them with unicode normalization in the sync
routine instead.`,
Default: false,
Advanced: true,
}, {
Name: "no_check_updated",
Help: `Don't check to see if the files change during upload
Normally rclone checks the size and modification time of files as they
are being uploaded and aborts with a message which starts "can't copy
- source file is being updated" if the file changes during upload.
However on some file systems this modification time check may fail (eg
[Glusterfs #2206](https://github.com/ncw/rclone/issues/2206)) so this
check can be disabled with this flag.`,
Default: false,
Advanced: true,
}, {
Name: "one_file_system",
Help: "Don't cross filesystem boundaries (unix/macOS only).",
Default: false,
NoPrefix: true,
ShortOpt: "x",
Advanced: true,
}},
}
fs.Register(fsi)
}
// Options defines the configuration for this backend
type Options struct {
FollowSymlinks bool `config:"copy_links"`
TranslateSymlinks bool `config:"links"`
SkipSymlinks bool `config:"skip_links"`
NoUTFNorm bool `config:"no_unicode_normalization"`
NoCheckUpdated bool `config:"no_check_updated"`
NoUNC bool `config:"nounc"`
OneFileSystem bool `config:"one_file_system"`
}
// Fs represents a local filesystem rooted at root
type Fs struct {
name string // the name of the remote
root string // The root directory (OS path)
opt Options // parsed config options
features *fs.Features // optional features
dev uint64 // device number of root node
precisionOk sync.Once // Whether we need to read the precision
precision time.Duration // precision of local filesystem
wmu sync.Mutex // used for locking access to 'warned'.
warned map[string]struct{} // whether we have warned about this string
nounc bool // Skip UNC conversion on Windows
// do os.Lstat or os.Stat
lstat func(name string) (os.FileInfo, error)
dirNames *mapper // directory name mapping
@@ -72,30 +130,40 @@ type Fs struct {
// Object represents a local filesystem object
type Object struct {
fs *Fs // The Fs this object is part of
remote string // The remote path - properly UTF-8 encoded - for rclone
path string // The local path - may not be properly UTF-8 encoded - for OS
size int64 // file metadata - always present
mode os.FileMode
modTime time.Time
hashes map[hash.Type]string // Hashes
fs *Fs // The Fs this object is part of
remote string // The remote path - properly UTF-8 encoded - for rclone
path string // The local path - may not be properly UTF-8 encoded - for OS
size int64 // file metadata - always present
mode os.FileMode
modTime time.Time
hashes map[hash.Type]string // Hashes
translatedLink bool // Is this object a translated link
}
// ------------------------------------------------------------
// NewFs constructs an Fs from the path
func NewFs(name, root string) (fs.Fs, error) {
var err error
var errLinksAndCopyLinks = errors.New("can't use -l/--links with -L/--copy-links")
if *noUTFNorm {
log.Errorf(nil, "The --local-no-unicode-normalization flag is deprecated and will be removed")
// NewFs constructs an Fs from the path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
if opt.TranslateSymlinks && opt.FollowSymlinks {
return nil, errLinksAndCopyLinks
}
if opt.NoUTFNorm {
fs.Errorf(nil, "The --local-no-unicode-normalization flag is deprecated and will be removed")
}
nounc := config.FileGet(name, "nounc")
f := &Fs{
name: name,
opt: *opt,
warned: make(map[string]struct{}),
nounc: nounc == "true",
dev: devUnset,
lstat: os.Lstat,
dirNames: newMapper(),
@@ -105,24 +173,38 @@ func NewFs(name, root string) (fs.Fs, error) {
CaseInsensitive: f.caseInsensitive(),
CanHaveEmptyDirectories: true,
}).Fill(f)
if *followSymlinks {
if opt.FollowSymlinks {
f.lstat = os.Stat
}
// Check to see if this points to a file
fi, err := f.lstat(f.root)
if err == nil {
f.dev = readDevice(fi)
f.dev = readDevice(fi, f.opt.OneFileSystem)
}
if err == nil && fi.Mode().IsRegular() {
if err == nil && f.isRegular(fi.Mode()) {
// It is a file, so use the parent as the root
f.root, _ = getDirFile(f.root)
f.root = filepath.Dir(f.root)
// return an error with an fs which points to the parent
return f, fs.ErrorIsFile
}
return f, nil
}
// Determine whether a file is a 'regular' file,
// Symlinks are regular files, only if the TranslateSymlink
// option is in-effect
func (f *Fs) isRegular(mode os.FileMode) bool {
if !f.opt.TranslateSymlinks {
return mode.IsRegular()
}
// fi.Mode().IsRegular() tests that all mode bits are zero
// Since symlinks are accepted, test that all other bits are zero,
// except the symlink bit
return mode&os.ModeType&^os.ModeSymlink == 0
}
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string {
return f.name
@@ -153,18 +235,38 @@ func (f *Fs) caseInsensitive() bool {
return runtime.GOOS == "windows" || runtime.GOOS == "darwin"
}
// translateLink checks whether the remote is a translated link
// and returns a new path, removing the suffix as needed,
// It also returns whether this is a translated link at all
//
// for regular files, dstPath is returned unchanged
func translateLink(remote, dstPath string) (newDstPath string, isTranslatedLink bool) {
isTranslatedLink = strings.HasSuffix(remote, linkSuffix)
newDstPath = strings.TrimSuffix(dstPath, linkSuffix)
return newDstPath, isTranslatedLink
}
// newObject makes a half completed Object
//
// if dstPath is empty then it is made from remote
func (f *Fs) newObject(remote, dstPath string) *Object {
translatedLink := false
if dstPath == "" {
dstPath = f.cleanPath(filepath.Join(f.root, remote))
}
remote = f.cleanRemote(remote)
if f.opt.TranslateSymlinks {
// Possibly receive a new name for dstPath
dstPath, translatedLink = translateLink(remote, dstPath)
}
return &Object{
fs: f,
remote: remote,
path: dstPath,
fs: f,
remote: remote,
path: dstPath,
translatedLink: translatedLink,
}
}
@@ -186,6 +288,11 @@ func (f *Fs) newObjectWithInfo(remote, dstPath string, info os.FileInfo) (fs.Obj
}
return nil, err
}
// Handle the odd case, that a symlink was specfied by name without the link suffix
if o.fs.opt.TranslateSymlinks && o.mode&os.ModeSymlink != 0 && !o.translatedLink {
return nil, fs.ErrorObjectNotFound
}
}
if o.mode.IsDir() {
return nil, errors.Wrapf(fs.ErrorNotAFile, "%q", remote)
@@ -209,6 +316,7 @@ func (f *Fs) NewObject(remote string) (fs.Object, error) {
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
dir = f.dirNames.Load(dir)
fsDirPath := f.cleanPath(filepath.Join(f.root, dir))
remote := f.cleanRemote(dir)
@@ -243,8 +351,15 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
newRemote := path.Join(remote, name)
newPath := filepath.Join(fsDirPath, name)
// Follow symlinks if required
if *followSymlinks && (mode&os.ModeSymlink) != 0 {
if f.opt.FollowSymlinks && (mode&os.ModeSymlink) != 0 {
fi, err = os.Stat(newPath)
if os.IsNotExist(err) {
// Skip bad symlinks
err = fserrors.NoRetryError(errors.Wrap(err, "symlink"))
fs.Errorf(newRemote, "Listing error: %v", err)
accounting.Stats.Error(err)
continue
}
if err != nil {
return nil, err
}
@@ -253,11 +368,15 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
if fi.IsDir() {
// Ignore directories which are symlinks. These are junction points under windows which
// are kind of a souped up symlink. Unix doesn't have directories which are symlinks.
if (mode&os.ModeSymlink) == 0 && f.dev == readDevice(fi) {
if (mode&os.ModeSymlink) == 0 && f.dev == readDevice(fi, f.opt.OneFileSystem) {
d := fs.NewDir(f.dirNames.Save(newRemote, f.cleanRemote(newRemote)), fi.ModTime())
entries = append(entries, d)
}
} else {
// Check whether this link should be translated
if f.opt.TranslateSymlinks && fi.Mode()&os.ModeSymlink != 0 {
newRemote += linkSuffix
}
fso, err := f.newObjectWithInfo(newRemote, newPath, fi)
if err != nil {
return nil, err
@@ -357,7 +476,7 @@ func (f *Fs) Mkdir(dir string) error {
if err != nil {
return err
}
f.dev = readDevice(fi)
f.dev = readDevice(fi, f.opt.OneFileSystem)
}
return nil
}
@@ -471,7 +590,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
// OK
} else if err != nil {
return nil, err
} else if !dstObj.mode.IsRegular() {
} else if !dstObj.fs.isRegular(dstObj.mode) {
// It isn't a file
return nil, errors.New("can't move file onto non-file")
}
@@ -530,7 +649,7 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
}
// Create parent of destination
dstParentPath, _ := getDirFile(dstPath)
dstParentPath := filepath.Dir(dstPath)
err = os.MkdirAll(dstParentPath, 0777)
if err != nil {
return err
@@ -593,8 +712,13 @@ func (o *Object) Hash(r hash.Type) (string, error) {
o.fs.objectHashesMu.Unlock()
if !o.modTime.Equal(oldtime) || oldsize != o.size || hashes == nil {
hashes = make(map[hash.Type]string)
in, err := os.Open(o.path)
var in io.ReadCloser
if !o.translatedLink {
in, err = file.Open(o.path)
} else {
in, err = o.openTranslatedLink(0, -1)
}
if err != nil {
return "", errors.Wrap(err, "hash: failed to open")
}
@@ -625,7 +749,12 @@ func (o *Object) ModTime() time.Time {
// SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(modTime time.Time) error {
err := os.Chtimes(o.path, modTime, modTime)
var err error
if o.translatedLink {
err = lChtimes(o.path, modTime, modTime)
} else {
err = os.Chtimes(o.path, modTime, modTime)
}
if err != nil {
return err
}
@@ -643,13 +772,8 @@ func (o *Object) Storable() bool {
}
}
mode := o.mode
// On windows a file with os.ModeSymlink represents a file with reparse points
if runtime.GOOS == "windows" && (mode&os.ModeSymlink) != 0 {
fs.Debugf(o, "Clearing symlink bit to allow a file with reparse points to be copied")
mode &^= os.ModeSymlink
}
if mode&os.ModeSymlink != 0 {
if !*skipSymlinks {
if mode&os.ModeSymlink != 0 && !o.fs.opt.TranslateSymlinks {
if !o.fs.opt.SkipSymlinks {
fs.Logf(o, "Can't follow symlink without -L/--copy-links")
}
return false
@@ -674,7 +798,7 @@ type localOpenFile struct {
// Read bytes from the object - see io.Reader
func (file *localOpenFile) Read(p []byte) (n int, err error) {
if !*noCheckUpdated {
if !file.o.fs.opt.NoCheckUpdated {
// Check if file has the same size and modTime
fi, err := file.fd.Stat()
if err != nil {
@@ -709,6 +833,16 @@ func (file *localOpenFile) Close() (err error) {
return err
}
// Returns a ReadCloser() object that contains the contents of a symbolic link
func (o *Object) openTranslatedLink(offset, limit int64) (lrc io.ReadCloser, err error) {
// Read the link and return the destination it as the contents of the object
linkdst, err := os.Readlink(o.path)
if err != nil {
return nil, err
}
return readers.NewLimitedReadCloser(ioutil.NopCloser(strings.NewReader(linkdst[offset:])), limit), nil
}
// Open an object for read
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
var offset, limit int64 = 0, -1
@@ -728,7 +862,12 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
}
}
fd, err := os.Open(o.path)
// Handle a translated link
if o.translatedLink {
return o.openTranslatedLink(offset, limit)
}
fd, err := file.Open(o.path)
if err != nil {
return
}
@@ -755,12 +894,23 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
// mkdirAll makes all the directories needed to store the object
func (o *Object) mkdirAll() error {
dir, _ := getDirFile(o.path)
dir := filepath.Dir(o.path)
return os.MkdirAll(dir, 0777)
}
type nopWriterCloser struct {
*bytes.Buffer
}
func (nwc nopWriterCloser) Close() error {
// noop
return nil
}
// Update the object from in with modTime and size
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
var out io.WriteCloser
hashes := hash.Supported
for _, option := range options {
switch x := option.(type) {
@@ -774,9 +924,23 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
return err
}
out, err := os.OpenFile(o.path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666)
if err != nil {
return err
var symlinkData bytes.Buffer
// If the object is a regular file, create it.
// If it is a translated link, just read in the contents, and
// then create a symlink
if !o.translatedLink {
f, err := file.OpenFile(o.path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666)
if err != nil {
return err
}
// Pre-allocate the file for performance reasons
err = preAllocate(src.Size(), f)
if err != nil {
fs.Debugf(o, "Failed to pre-allocate: %v", err)
}
out = f
} else {
out = nopWriterCloser{&symlinkData}
}
// Calculate the hash of the object we are reading as we go along
@@ -791,6 +955,26 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
if err == nil {
err = closeErr
}
if o.translatedLink {
if err == nil {
// Remove any current symlink or file, if one exsits
if _, err := os.Lstat(o.path); err == nil {
if removeErr := os.Remove(o.path); removeErr != nil {
fs.Errorf(o, "Failed to remove previous file: %v", removeErr)
return removeErr
}
}
// Use the contents for the copied object to create a symlink
err = os.Symlink(symlinkData.String(), o.path)
}
// only continue if symlink creation succeeded
if err != nil {
return err
}
}
if err != nil {
fs.Logf(o, "Removing partially written file on error: %v", err)
if removeErr := os.Remove(o.path); removeErr != nil {
@@ -843,17 +1027,6 @@ func (o *Object) Remove() error {
return remove(o.path)
}
// Return the directory and file from an OS path. Assumes
// os.PathSeparator is used.
func getDirFile(s string) (string, string) {
i := strings.LastIndex(s, string(os.PathSeparator))
dir, file := s[:i], s[i+1:]
if dir == "" {
dir = string(os.PathSeparator)
}
return dir, file
}
// cleanPathFragment cleans an OS path fragment which is part of a
// bigger path and not necessarily absolute
func cleanPathFragment(s string) string {
@@ -884,7 +1057,7 @@ func (f *Fs) cleanPath(s string) string {
s = s2
}
}
if !f.nounc {
if !f.opt.NoUNC {
// Convert to UNC
s = uncPath(s)
}

View File

@@ -1,13 +1,19 @@
package local
import (
"io/ioutil"
"os"
"path"
"path/filepath"
"runtime"
"testing"
"time"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/fstest"
"github.com/ncw/rclone/lib/file"
"github.com/ncw/rclone/lib/readers"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@@ -38,15 +44,20 @@ func TestUpdatingCheck(t *testing.T) {
filePath := "sub dir/local test"
r.WriteFile(filePath, "content", time.Now())
fd, err := os.Open(path.Join(r.LocalName, filePath))
fd, err := file.Open(path.Join(r.LocalName, filePath))
if err != nil {
t.Fatalf("failed opening file %q: %v", filePath, err)
}
defer func() {
require.NoError(t, fd.Close())
}()
fi, err := fd.Stat()
o := &Object{size: fi.Size(), modTime: fi.ModTime()}
require.NoError(t, err)
o := &Object{size: fi.Size(), modTime: fi.ModTime(), fs: &Fs{}}
wrappedFd := readers.NewLimitedReadCloser(fd, -1)
hash, err := hash.NewMultiHasherTypes(hash.Supported)
require.NoError(t, err)
in := localOpenFile{
o: o,
in: wrappedFd,
@@ -63,14 +74,115 @@ func TestUpdatingCheck(t *testing.T) {
require.Errorf(t, err, "can't copy - source file is being updated")
// turn the checking off and try again
*noCheckUpdated = true
defer func() {
*noCheckUpdated = false
}()
in.o.fs.opt.NoCheckUpdated = true
r.WriteFile(filePath, "content updated", time.Now())
_, err = in.Read(buf)
require.NoError(t, err)
}
func TestSymlink(t *testing.T) {
r := fstest.NewRun(t)
defer r.Finalise()
f := r.Flocal.(*Fs)
dir := f.root
// Write a file
modTime1 := fstest.Time("2001-02-03T04:05:10.123123123Z")
file1 := r.WriteFile("file.txt", "hello", modTime1)
// Write a symlink
modTime2 := fstest.Time("2002-02-03T04:05:10.123123123Z")
symlinkPath := filepath.Join(dir, "symlink.txt")
require.NoError(t, os.Symlink("file.txt", symlinkPath))
require.NoError(t, lChtimes(symlinkPath, modTime2, modTime2))
// Object viewed as symlink
file2 := fstest.NewItem("symlink.txt"+linkSuffix, "file.txt", modTime2)
if runtime.GOOS == "windows" {
file2.Size = 0 // symlinks are 0 length under Windows
}
// Object viewed as destination
file2d := fstest.NewItem("symlink.txt", "hello", modTime1)
// Check with no symlink flags
fstest.CheckItems(t, r.Flocal, file1)
fstest.CheckItems(t, r.Fremote)
// Set fs into "-L" mode
f.opt.FollowSymlinks = true
f.opt.TranslateSymlinks = false
f.lstat = os.Stat
fstest.CheckItems(t, r.Flocal, file1, file2d)
fstest.CheckItems(t, r.Fremote)
// Set fs into "-l" mode
f.opt.FollowSymlinks = false
f.opt.TranslateSymlinks = true
f.lstat = os.Lstat
fstest.CheckListingWithPrecision(t, r.Flocal, []fstest.Item{file1, file2}, nil, fs.ModTimeNotSupported)
if haveLChtimes {
fstest.CheckItems(t, r.Flocal, file1, file2)
}
// Create a symlink
modTime3 := fstest.Time("2002-03-03T04:05:10.123123123Z")
file3 := r.WriteObjectTo(r.Flocal, "symlink2.txt"+linkSuffix, "file.txt", modTime3, false)
if runtime.GOOS == "windows" {
file3.Size = 0 // symlinks are 0 length under Windows
}
fstest.CheckListingWithPrecision(t, r.Flocal, []fstest.Item{file1, file2, file3}, nil, fs.ModTimeNotSupported)
if haveLChtimes {
fstest.CheckItems(t, r.Flocal, file1, file2, file3)
}
// Check it got the correct contents
symlinkPath = filepath.Join(dir, "symlink2.txt")
fi, err := os.Lstat(symlinkPath)
require.NoError(t, err)
assert.False(t, fi.Mode().IsRegular())
linkText, err := os.Readlink(symlinkPath)
require.NoError(t, err)
assert.Equal(t, "file.txt", linkText)
// Check that NewObject gets the correct object
o, err := r.Flocal.NewObject("symlink2.txt" + linkSuffix)
require.NoError(t, err)
assert.Equal(t, "symlink2.txt"+linkSuffix, o.Remote())
if runtime.GOOS != "windows" {
assert.Equal(t, int64(8), o.Size())
}
// Check that NewObject doesn't see the non suffixed version
_, err = r.Flocal.NewObject("symlink2.txt")
require.Equal(t, fs.ErrorObjectNotFound, err)
// Check reading the object
in, err := o.Open()
require.NoError(t, err)
contents, err := ioutil.ReadAll(in)
require.NoError(t, err)
require.Equal(t, "file.txt", string(contents))
require.NoError(t, in.Close())
// Check reading the object with range
in, err = o.Open(&fs.RangeOption{Start: 2, End: 5})
require.NoError(t, err)
contents, err = ioutil.ReadAll(in)
require.NoError(t, err)
require.Equal(t, "file.txt"[2:5+1], string(contents))
require.NoError(t, in.Close())
}
func TestSymlinkError(t *testing.T) {
m := configmap.Simple{
"links": "true",
"copy_links": "true",
}
_, err := NewFs("local", "/", m)
assert.Equal(t, errLinksAndCopyLinks, err)
}

View File

@@ -0,0 +1,10 @@
//+build !windows,!linux
package local
import "os"
// preAllocate the file for performance reasons
func preAllocate(size int64, out *os.File) error {
return nil
}

View File

@@ -0,0 +1,22 @@
//+build linux
package local
import (
"os"
"golang.org/x/sys/unix"
)
// preAllocate the file for performance reasons
func preAllocate(size int64, out *os.File) error {
if size <= 0 {
return nil
}
err := unix.Fallocate(int(out.Fd()), unix.FALLOC_FL_KEEP_SIZE, 0, size)
// FIXME could be doing something here
// if err == unix.ENOSPC {
// log.Printf("No space")
// }
return err
}

View File

@@ -0,0 +1,79 @@
//+build windows
package local
import (
"os"
"syscall"
"unsafe"
"github.com/pkg/errors"
"golang.org/x/sys/windows"
)
var (
ntdll = windows.NewLazySystemDLL("ntdll.dll")
ntQueryVolumeInformationFile = ntdll.NewProc("NtQueryVolumeInformationFile")
ntSetInformationFile = ntdll.NewProc("NtSetInformationFile")
)
type fileAllocationInformation struct {
AllocationSize uint64
}
type fileFsSizeInformation struct {
TotalAllocationUnits uint64
AvailableAllocationUnits uint64
SectorsPerAllocationUnit uint32
BytesPerSector uint32
}
type ioStatusBlock struct {
Status, Information uintptr
}
// preAllocate the file for performance reasons
func preAllocate(size int64, out *os.File) error {
if size <= 0 {
return nil
}
var (
iosb ioStatusBlock
fsSizeInfo fileFsSizeInformation
allocInfo fileAllocationInformation
)
// Query info about the block sizes on the file system
_, _, e1 := ntQueryVolumeInformationFile.Call(
uintptr(out.Fd()),
uintptr(unsafe.Pointer(&iosb)),
uintptr(unsafe.Pointer(&fsSizeInfo)),
uintptr(unsafe.Sizeof(fsSizeInfo)),
uintptr(3), // FileFsSizeInformation
)
if e1 != nil && e1 != syscall.Errno(0) {
return errors.Wrap(e1, "preAllocate NtQueryVolumeInformationFile failed")
}
// Calculate the allocation size
clusterSize := uint64(fsSizeInfo.BytesPerSector) * uint64(fsSizeInfo.SectorsPerAllocationUnit)
if clusterSize <= 0 {
return errors.Errorf("preAllocate clusterSize %d <= 0", clusterSize)
}
allocInfo.AllocationSize = (1 + uint64(size-1)/clusterSize) * clusterSize
// Ask for the allocation
_, _, e1 = ntSetInformationFile.Call(
uintptr(out.Fd()),
uintptr(unsafe.Pointer(&iosb)),
uintptr(unsafe.Pointer(&allocInfo)),
uintptr(unsafe.Sizeof(allocInfo)),
uintptr(19), // FileAllocationInformation
)
if e1 != nil && e1 != syscall.Errno(0) {
return errors.Wrap(e1, "preAllocate NtSetInformationFile failed")
}
return nil
}

View File

@@ -8,6 +8,6 @@ import "os"
// readDevice turns a valid os.FileInfo into a device number,
// returning devUnset if it fails.
func readDevice(fi os.FileInfo) uint64 {
func readDevice(fi os.FileInfo, oneFileSystem bool) uint64 {
return devUnset
}

View File

@@ -9,17 +9,12 @@ import (
"syscall"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config/flags"
)
var (
oneFileSystem = flags.BoolP("one-file-system", "x", false, "Don't cross filesystem boundaries.")
)
// readDevice turns a valid os.FileInfo into a device number,
// returning devUnset if it fails.
func readDevice(fi os.FileInfo) uint64 {
if !*oneFileSystem {
func readDevice(fi os.FileInfo, oneFileSystem bool) uint64 {
if !oneFileSystem {
return devUnset
}
statT, ok := fi.Sys().(*syscall.Stat_t)

View File

@@ -19,13 +19,13 @@ import (
"fmt"
"io"
"path"
"regexp"
"strings"
"sync"
"time"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config/flags"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/config/obscure"
"github.com/ncw/rclone/fs/fshttp"
"github.com/ncw/rclone/fs/hash"
@@ -38,12 +38,11 @@ import (
const (
minSleep = 10 * time.Millisecond
maxSleep = 2 * time.Second
decayConstant = 2 // bigger for slower decay, exponential
useTrash = true // FIXME make configurable - rclone global
eventWaitTime = 500 * time.Millisecond
decayConstant = 2 // bigger for slower decay, exponential
)
var (
megaDebug = flags.BoolP("mega-debug", "", false, "If set then output more debug from mega.")
megaCacheMu sync.Mutex // mutex for the below
megaCache = map[string]*mega.Mega{} // cache logged in Mega's by user
)
@@ -57,20 +56,46 @@ func init() {
Options: []fs.Option{{
Name: "user",
Help: "User name",
Optional: true,
Required: true,
}, {
Name: "pass",
Help: "Password.",
Optional: true,
Required: true,
IsPassword: true,
}, {
Name: "debug",
Help: `Output more debug from Mega.
If this flag is set (along with -vv) it will print further debugging
information from the mega backend.`,
Default: false,
Advanced: true,
}, {
Name: "hard_delete",
Help: `Delete files permanently rather than putting them into the trash.
Normally the mega backend will put all deletions into the trash rather
than permanently deleting them. If you specify this then rclone will
permanently delete objects instead.`,
Default: false,
Advanced: true,
}},
})
}
// Options defines the configuration for this backend
type Options struct {
User string `config:"user"`
Pass string `config:"pass"`
Debug bool `config:"debug"`
HardDelete bool `config:"hard_delete"`
}
// Fs represents a remote mega
type Fs struct {
name string // name of this remote
root string // the path we are working on
opt Options // parsed config options
features *fs.Features // optional features
srv *mega.Mega // the connection to the server
pacer *pacer.Pacer // pacer for API calls
@@ -114,9 +139,6 @@ func (f *Fs) Features() *fs.Features {
return f.features
}
// Pattern to match a mega path
var matcher = regexp.MustCompile(`^([^/]*)(.*)$`)
// parsePath parses an mega 'url'
func parsePath(path string) (root string) {
root = strings.Trim(path, "/")
@@ -147,12 +169,16 @@ func (f *Fs) readMetaDataForPath(remote string) (info *mega.Node, err error) {
}
// NewFs constructs an Fs from the path, container:path
func NewFs(name, root string) (fs.Fs, error) {
user := fs.ConfigFileGet(name, "user")
pass := fs.ConfigFileGet(name, "pass")
if pass != "" {
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
if opt.Pass != "" {
var err error
pass, err = obscure.Reveal(pass)
opt.Pass, err = obscure.Reveal(opt.Pass)
if err != nil {
return nil, errors.Wrap(err, "couldn't decrypt password")
}
@@ -165,30 +191,31 @@ func NewFs(name, root string) (fs.Fs, error) {
// them up between different remotes.
megaCacheMu.Lock()
defer megaCacheMu.Unlock()
srv := megaCache[user]
srv := megaCache[opt.User]
if srv == nil {
srv = mega.New().SetClient(fshttp.NewClient(fs.Config))
srv.SetRetries(fs.Config.LowLevelRetries) // let mega do the low level retries
srv.SetLogger(func(format string, v ...interface{}) {
fs.Infof("*go-mega*", format, v...)
})
if *megaDebug {
if opt.Debug {
srv.SetDebugger(func(format string, v ...interface{}) {
fs.Debugf("*go-mega*", format, v...)
})
}
err := srv.Login(user, pass)
err := srv.Login(opt.User, opt.Pass)
if err != nil {
return nil, errors.Wrap(err, "couldn't login")
}
megaCache[user] = srv
megaCache[opt.User] = srv
}
root = parsePath(root)
f := &Fs{
name: name,
root: root,
opt: *opt,
srv: srv,
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
}
@@ -198,7 +225,7 @@ func NewFs(name, root string) (fs.Fs, error) {
}).Fill(f)
// Find the root node and check if it is a file or not
_, err := f.findRoot(false)
_, err = f.findRoot(false)
switch err {
case nil:
// root node found and is a directory
@@ -542,7 +569,7 @@ func (f *Fs) Mkdir(dir string) error {
// deleteNode removes a file or directory, observing useTrash
func (f *Fs) deleteNode(node *mega.Node) (err error) {
err = f.pacer.Call(func() (bool, error) {
err = f.srv.Delete(node, !useTrash)
err = f.srv.Delete(node, f.opt.HardDelete)
return shouldRetry(err)
})
return err
@@ -573,6 +600,8 @@ func (f *Fs) purgeCheck(dir string, check bool) error {
}
}
waitEvent := f.srv.WaitEventsStart()
err = f.deleteNode(dirNode)
if err != nil {
return errors.Wrap(err, "delete directory node failed")
@@ -582,7 +611,8 @@ func (f *Fs) purgeCheck(dir string, check bool) error {
if dirNode == rootNode {
f.clearRoot()
}
time.Sleep(100 * time.Millisecond) // FIXME give the callback a chance
f.srv.WaitEvents(waitEvent, eventWaitTime)
return nil
}
@@ -656,6 +686,8 @@ func (f *Fs) move(dstRemote string, srcFs *Fs, srcRemote string, info *mega.Node
}
}
waitEvent := f.srv.WaitEventsStart()
// rename the object if required
if srcLeaf != dstLeaf {
//log.Printf("rename %q to %q", srcLeaf, dstLeaf)
@@ -668,7 +700,8 @@ func (f *Fs) move(dstRemote string, srcFs *Fs, srcRemote string, info *mega.Node
}
}
time.Sleep(100 * time.Millisecond) // FIXME give events a chance...
f.srv.WaitEvents(waitEvent, eventWaitTime)
return nil
}
@@ -1114,6 +1147,11 @@ func (o *Object) Remove() error {
return nil
}
// ID returns the ID of the Object if known, or "" if not
func (o *Object) ID() string {
return o.info.GetHash()
}
// Check the interfaces are satisfied
var (
_ fs.Fs = (*Fs)(nil)
@@ -1126,4 +1164,5 @@ var (
_ fs.MergeDirser = (*Fs)(nil)
_ fs.Abouter = (*Fs)(nil)
_ fs.Object = (*Object)(nil)
_ fs.IDer = (*Object)(nil)
)

View File

@@ -2,10 +2,16 @@
package api
import "time"
import (
"strings"
"time"
)
const (
timeFormat = `"` + time.RFC3339 + `"`
// PackageTypeOneNote is the package type value for OneNote files
PackageTypeOneNote = "oneNote"
)
// Error is returned from one drive when things go wrong
@@ -88,9 +94,27 @@ func (t *Timestamp) UnmarshalJSON(data []byte) error {
// ItemReference groups data needed to reference a OneDrive item
// across the service into a single structure.
type ItemReference struct {
DriveID string `json:"driveId"` // Unique identifier for the Drive that contains the item. Read-only.
ID string `json:"id"` // Unique identifier for the item. Read/Write.
Path string `json:"path"` // Path that used to navigate to the item. Read/Write.
DriveID string `json:"driveId"` // Unique identifier for the Drive that contains the item. Read-only.
ID string `json:"id"` // Unique identifier for the item. Read/Write.
Path string `json:"path"` // Path that used to navigate to the item. Read/Write.
DriveType string `json:"driveType"` // Type of the drive, Read-Only
}
// RemoteItemFacet groups data needed to reference a OneDrive remote item
type RemoteItemFacet struct {
ID string `json:"id"` // The unique identifier of the item within the remote Drive. Read-only.
Name string `json:"name"` // The name of the item (filename and extension). Read-write.
CreatedBy IdentitySet `json:"createdBy"` // Identity of the user, device, and application which created the item. Read-only.
LastModifiedBy IdentitySet `json:"lastModifiedBy"` // Identity of the user, device, and application which last modified the item. Read-only.
CreatedDateTime Timestamp `json:"createdDateTime"` // Date and time of item creation. Read-only.
LastModifiedDateTime Timestamp `json:"lastModifiedDateTime"` // Date and time the item was last modified. Read-only.
Folder *FolderFacet `json:"folder"` // Folder metadata, if the item is a folder. Read-only.
File *FileFacet `json:"file"` // File metadata, if the item is a file. Read-only.
Package *PackageFacet `json:"package"` // If present, indicates that this item is a package instead of a folder or file. Packages are treated like files in some contexts and folders in others. Read-only.
FileSystemInfo *FileSystemInfoFacet `json:"fileSystemInfo"` // File system information on client. Read-write.
ParentReference *ItemReference `json:"parentReference"` // Parent information, if the item has a parent. Read-write.
Size int64 `json:"size"` // Size of the item in bytes. Read-only.
WebURL string `json:"webUrl"` // URL that displays the resource in the browser. Read-only.
}
// FolderFacet groups folder-related data on OneDrive into a single structure
@@ -127,6 +151,13 @@ type FileSystemInfoFacet struct {
type DeletedFacet struct {
}
// PackageFacet indicates that a DriveItem is the top level item
// in a "package" or a collection of items that should be treated as a collection instead of individual items.
// `oneNote` is the only currently defined value.
type PackageFacet struct {
Type string `json:"type"`
}
// Item represents metadata for an item in OneDrive
type Item struct {
ID string `json:"id"` // The unique identifier of the item within the Drive. Read-only.
@@ -143,12 +174,14 @@ type Item struct {
Description string `json:"description"` // Provide a user-visible description of the item. Read-write.
Folder *FolderFacet `json:"folder"` // Folder metadata, if the item is a folder. Read-only.
File *FileFacet `json:"file"` // File metadata, if the item is a file. Read-only.
RemoteItem *RemoteItemFacet `json:"remoteItem"` // Remote Item metadata, if the item is a remote shared item. Read-only.
FileSystemInfo *FileSystemInfoFacet `json:"fileSystemInfo"` // File system information on client. Read-write.
// Image *ImageFacet `json:"image"` // Image metadata, if the item is an image. Read-only.
// Photo *PhotoFacet `json:"photo"` // Photo metadata, if the item is a photo. Read-only.
// Audio *AudioFacet `json:"audio"` // Audio metadata, if the item is an audio file. Read-only.
// Video *VideoFacet `json:"video"` // Video metadata, if the item is a video. Read-only.
// Location *LocationFacet `json:"location"` // Location metadata, if the item has location data. Read-only.
Package *PackageFacet `json:"package"` // If present, indicates that this item is a package instead of a folder or file. Packages are treated like files in some contexts and folders in others. Read-only.
Deleted *DeletedFacet `json:"deleted"` // Information about the deleted state of the item. Read-only.
}
@@ -217,6 +250,28 @@ type MoveItemRequest struct {
FileSystemInfo *FileSystemInfoFacet `json:"fileSystemInfo,omitempty"` // File system information on client. Read-write.
}
//CreateShareLinkRequest is the request to create a sharing link
//Always Type:view and Scope:anonymous for public sharing
type CreateShareLinkRequest struct {
Type string `json:"type"` //Link type in View, Edit or Embed
Scope string `json:"scope,omitempty"` //Optional. Scope in anonymousi, organization
}
//CreateShareLinkResponse is the response from CreateShareLinkRequest
type CreateShareLinkResponse struct {
ID string `json:"id"`
Roles []string `json:"roles"`
Link struct {
Type string `json:"type"`
Scope string `json:"scope"`
WebURL string `json:"webUrl"`
Application struct {
ID string `json:"id"`
DisplayName string `json:"displayName"`
} `json:"application"`
} `json:"link"`
}
// AsyncOperationStatus provides information on the status of a asynchronous job progress.
//
// The following API calls return AsyncOperationStatus resources:
@@ -224,7 +279,134 @@ type MoveItemRequest struct {
// Copy Item
// Upload From URL
type AsyncOperationStatus struct {
Operation string `json:"operation"` // The type of job being run.
PercentageComplete float64 `json:"percentageComplete"` // An float value between 0 and 100 that indicates the percentage complete.
Status string `json:"status"` // A string value that maps to an enumeration of possible values about the status of the job. "notStarted | inProgress | completed | updating | failed | deletePending | deleteFailed | waiting"
}
// GetID returns a normalized ID of the item
// If DriveID is known it will be prefixed to the ID with # seperator
// Can be parsed using onedrive.parseNormalizedID(normalizedID)
func (i *Item) GetID() string {
if i.IsRemote() && i.RemoteItem.ID != "" {
return i.RemoteItem.ParentReference.DriveID + "#" + i.RemoteItem.ID
} else if i.ParentReference != nil && strings.Index(i.ID, "#") == -1 {
return i.ParentReference.DriveID + "#" + i.ID
}
return i.ID
}
// GetDriveID returns a normalized ParentReferance of the item
func (i *Item) GetDriveID() string {
return i.GetParentReferance().DriveID
}
// GetName returns a normalized Name of the item
func (i *Item) GetName() string {
if i.IsRemote() && i.RemoteItem.Name != "" {
return i.RemoteItem.Name
}
return i.Name
}
// GetFolder returns a normalized Folder of the item
func (i *Item) GetFolder() *FolderFacet {
if i.IsRemote() && i.RemoteItem.Folder != nil {
return i.RemoteItem.Folder
}
return i.Folder
}
// GetPackage returns a normalized Package of the item
func (i *Item) GetPackage() *PackageFacet {
if i.IsRemote() && i.RemoteItem.Package != nil {
return i.RemoteItem.Package
}
return i.Package
}
// GetPackageType returns the package type of the item if available,
// otherwise ""
func (i *Item) GetPackageType() string {
pack := i.GetPackage()
if pack == nil {
return ""
}
return pack.Type
}
// GetFile returns a normalized File of the item
func (i *Item) GetFile() *FileFacet {
if i.IsRemote() && i.RemoteItem.File != nil {
return i.RemoteItem.File
}
return i.File
}
// GetFileSystemInfo returns a normalized FileSystemInfo of the item
func (i *Item) GetFileSystemInfo() *FileSystemInfoFacet {
if i.IsRemote() && i.RemoteItem.FileSystemInfo != nil {
return i.RemoteItem.FileSystemInfo
}
return i.FileSystemInfo
}
// GetSize returns a normalized Size of the item
func (i *Item) GetSize() int64 {
if i.IsRemote() && i.RemoteItem.Size != 0 {
return i.RemoteItem.Size
}
return i.Size
}
// GetWebURL returns a normalized WebURL of the item
func (i *Item) GetWebURL() string {
if i.IsRemote() && i.RemoteItem.WebURL != "" {
return i.RemoteItem.WebURL
}
return i.WebURL
}
// GetCreatedBy returns a normalized CreatedBy of the item
func (i *Item) GetCreatedBy() IdentitySet {
if i.IsRemote() && i.RemoteItem.CreatedBy != (IdentitySet{}) {
return i.RemoteItem.CreatedBy
}
return i.CreatedBy
}
// GetLastModifiedBy returns a normalized LastModifiedBy of the item
func (i *Item) GetLastModifiedBy() IdentitySet {
if i.IsRemote() && i.RemoteItem.LastModifiedBy != (IdentitySet{}) {
return i.RemoteItem.LastModifiedBy
}
return i.LastModifiedBy
}
// GetCreatedDateTime returns a normalized CreatedDateTime of the item
func (i *Item) GetCreatedDateTime() Timestamp {
if i.IsRemote() && i.RemoteItem.CreatedDateTime != (Timestamp{}) {
return i.RemoteItem.CreatedDateTime
}
return i.CreatedDateTime
}
// GetLastModifiedDateTime returns a normalized LastModifiedDateTime of the item
func (i *Item) GetLastModifiedDateTime() Timestamp {
if i.IsRemote() && i.RemoteItem.LastModifiedDateTime != (Timestamp{}) {
return i.RemoteItem.LastModifiedDateTime
}
return i.LastModifiedDateTime
}
// GetParentReferance returns a normalized ParentReferance of the item
func (i *Item) GetParentReferance() *ItemReference {
if i.IsRemote() && i.ParentReference == nil {
return i.RemoteItem.ParentReference
}
return i.ParentReference
}
// IsRemote checks if item is a remote item
func (i *Item) IsRemote() bool {
return i.RemoteItem != nil
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,10 +1,10 @@
// Test OneDrive filesystem interface
package onedrive_test
package onedrive
import (
"testing"
"github.com/ncw/rclone/backend/onedrive"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fstest/fstests"
)
@@ -12,6 +12,15 @@ import (
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestOneDrive:",
NilObject: (*onedrive.Object)(nil),
NilObject: (*Object)(nil),
ChunkedUpload: fstests.ChunkedUploadConfig{
CeilChunkSize: fstests.NextMultipleOf(chunkSizeMultiple),
},
})
}
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setUploadChunkSize(cs)
}
var _ fstests.SetUploadChunkSizer = (*Fs)(nil)

View File

@@ -44,7 +44,6 @@ const (
Size = 20
bitsInLastCell = 32
shift = 11
threshold = 600
widthInBits = 8 * Size
dataSize = (widthInBits-1)/64 + 1
)

View File

@@ -140,7 +140,7 @@ func TestQuickXorHashByBlock(t *testing.T) {
got := h.Sum(nil)
want, err := base64.StdEncoding.DecodeString(test.out)
require.NoError(t, err, what)
assert.Equal(t, want, got[:], test.size, what)
assert.Equal(t, want, got, test.size, what)
}
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,17 @@
// Test Opendrive filesystem interface
package opendrive_test
import (
"testing"
"github.com/ncw/rclone/backend/opendrive"
"github.com/ncw/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestOpenDrive:",
NilObject: (*opendrive.Object)(nil),
})
}

View File

@@ -0,0 +1,78 @@
/*
Translate file names for OpenDrive
OpenDrive reserved characters
The following characters are OpenDrive reserved characters, and can't
be used in OpenDrive folder and file names.
\ / : * ? " < > |
OpenDrive files and folders can't have leading or trailing spaces also.
*/
package opendrive
import (
"regexp"
"strings"
)
// charMap holds replacements for characters
//
// OpenDrive has a restricted set of characters compared to other cloud
// storage systems, so we to map these to the FULLWIDTH unicode
// equivalents
//
// http://unicode-search.net/unicode-namesearch.pl?term=SOLIDUS
var (
charMap = map[rune]rune{
'\\': '', // FULLWIDTH REVERSE SOLIDUS
':': '', // FULLWIDTH COLON
'*': '', // FULLWIDTH ASTERISK
'?': '', // FULLWIDTH QUESTION MARK
'"': '', // FULLWIDTH QUOTATION MARK
'<': '', // FULLWIDTH LESS-THAN SIGN
'>': '', // FULLWIDTH GREATER-THAN SIGN
'|': '', // FULLWIDTH VERTICAL LINE
' ': '␠', // SYMBOL FOR SPACE
}
fixStartingWithSpace = regexp.MustCompile(`(/|^) `)
fixEndingWithSpace = regexp.MustCompile(` (/|$)`)
invCharMap map[rune]rune
)
func init() {
// Create inverse charMap
invCharMap = make(map[rune]rune, len(charMap))
for k, v := range charMap {
invCharMap[v] = k
}
}
// replaceReservedChars takes a path and substitutes any reserved
// characters in it
func replaceReservedChars(in string) string {
// Filenames can't start with space
in = fixStartingWithSpace.ReplaceAllString(in, "$1"+string(charMap[' ']))
// Filenames can't end with space
in = fixEndingWithSpace.ReplaceAllString(in, string(charMap[' '])+"$1")
return strings.Map(func(c rune) rune {
if replacement, ok := charMap[c]; ok && c != ' ' {
return replacement
}
return c
}, in)
}
// restoreReservedChars takes a path and undoes any substitutions
// made by replaceReservedChars
func restoreReservedChars(in string) string {
return strings.Map(func(c rune) rune {
if replacement, ok := invCharMap[c]; ok {
return replacement
}
return c
}, in)
}

View File

@@ -0,0 +1,28 @@
package opendrive
import "testing"
func TestReplace(t *testing.T) {
for _, test := range []struct {
in string
out string
}{
{"", ""},
{"abc 123", "abc 123"},
{`\*<>?:|#%".~`, `#%.~`},
{`\*<>?:|#%".~/\*<>?:|#%".~`, `#%.~/#%.~`},
{" leading space", "␠leading space"},
{" path/ leading spaces", "␠path/␠ leading spaces"},
{"trailing space ", "trailing space␠"},
{"trailing spaces /path ", "trailing spaces ␠/path␠"},
} {
got := replaceReservedChars(test.in)
if got != test.out {
t.Errorf("replaceReservedChars(%q) want %q got %q", test.in, test.out, got)
}
got2 := restoreReservedChars(got)
if got2 != test.in {
t.Errorf("restoreReservedChars(%q) want %q got %q", got, test.in, got2)
}
}
}

214
backend/opendrive/types.go Normal file
View File

@@ -0,0 +1,214 @@
package opendrive
import (
"encoding/json"
"fmt"
)
// Error describes an openDRIVE error response
type Error struct {
Info struct {
Code int `json:"code"`
Message string `json:"message"`
} `json:"error"`
}
// Error statisfies the error interface
func (e *Error) Error() string {
return fmt.Sprintf("%s (Error %d)", e.Info.Message, e.Info.Code)
}
// Account describes a OpenDRIVE account
type Account struct {
Username string `json:"username"`
Password string `json:"passwd"`
}
// UserSessionInfo describes a OpenDRIVE session
type UserSessionInfo struct {
Username string `json:"username"`
Password string `json:"passwd"`
SessionID string `json:"SessionID"`
UserName string `json:"UserName"`
UserFirstName string `json:"UserFirstName"`
UserLastName string `json:"UserLastName"`
AccType string `json:"AccType"`
UserLang string `json:"UserLang"`
UserID string `json:"UserID"`
IsAccountUser json.RawMessage `json:"IsAccountUser"`
DriveName string `json:"DriveName"`
UserLevel string `json:"UserLevel"`
UserPlan string `json:"UserPlan"`
FVersioning string `json:"FVersioning"`
UserDomain string `json:"UserDomain"`
PartnerUsersDomain string `json:"PartnerUsersDomain"`
}
// FolderList describes a OpenDRIVE listing
type FolderList struct {
// DirUpdateTime string `json:"DirUpdateTime,string"`
Name string `json:"Name"`
ParentFolderID string `json:"ParentFolderID"`
DirectFolderLink string `json:"DirectFolderLink"`
ResponseType int `json:"ResponseType"`
Folders []Folder `json:"Folders"`
Files []File `json:"Files"`
}
// Folder describes a OpenDRIVE folder
type Folder struct {
FolderID string `json:"FolderID"`
Name string `json:"Name"`
DateCreated int `json:"DateCreated"`
DirUpdateTime int `json:"DirUpdateTime"`
Access int `json:"Access"`
DateModified int64 `json:"DateModified"`
Shared string `json:"Shared"`
ChildFolders int `json:"ChildFolders"`
Link string `json:"Link"`
Encrypted string `json:"Encrypted"`
}
type createFolder struct {
SessionID string `json:"session_id"`
FolderName string `json:"folder_name"`
FolderSubParent string `json:"folder_sub_parent"`
FolderIsPublic int64 `json:"folder_is_public"` // (0 = private, 1 = public, 2 = hidden)
FolderPublicUpl int64 `json:"folder_public_upl"` // (0 = disabled, 1 = enabled)
FolderPublicDisplay int64 `json:"folder_public_display"` // (0 = disabled, 1 = enabled)
FolderPublicDnl int64 `json:"folder_public_dnl"` // (0 = disabled, 1 = enabled).
}
type createFolderResponse struct {
FolderID string `json:"FolderID"`
Name string `json:"Name"`
DateCreated int `json:"DateCreated"`
DirUpdateTime int `json:"DirUpdateTime"`
Access int `json:"Access"`
DateModified int `json:"DateModified"`
Shared string `json:"Shared"`
Description string `json:"Description"`
Link string `json:"Link"`
}
type moveCopyFolder struct {
SessionID string `json:"session_id"`
FolderID string `json:"folder_id"`
DstFolderID string `json:"dst_folder_id"`
Move string `json:"move"`
NewFolderName string `json:"new_folder_name"` // New name for destination folder.
}
type moveCopyFolderResponse struct {
FolderID string `json:"FolderID"`
}
type removeFolder struct {
SessionID string `json:"session_id"`
FolderID string `json:"folder_id"`
}
// File describes a OpenDRIVE file
type File struct {
FileID string `json:"FileId"`
FileHash string `json:"FileHash"`
Name string `json:"Name"`
GroupID int `json:"GroupID"`
Extension string `json:"Extension"`
Size int64 `json:"Size,string"`
Views string `json:"Views"`
Version string `json:"Version"`
Downloads string `json:"Downloads"`
DateModified int64 `json:"DateModified,string"`
Access string `json:"Access"`
Link string `json:"Link"`
DownloadLink string `json:"DownloadLink"`
StreamingLink string `json:"StreamingLink"`
TempStreamingLink string `json:"TempStreamingLink"`
EditLink string `json:"EditLink"`
ThumbLink string `json:"ThumbLink"`
Password string `json:"Password"`
EditOnline int `json:"EditOnline"`
}
type moveCopyFile struct {
SessionID string `json:"session_id"`
SrcFileID string `json:"src_file_id"`
DstFolderID string `json:"dst_folder_id"`
Move string `json:"move"`
OverwriteIfExists string `json:"overwrite_if_exists"`
NewFileName string `json:"new_file_name"` // New name for destination file.
}
type moveCopyFileResponse struct {
FileID string `json:"FileID"`
Size string `json:"Size"`
}
type createFile struct {
SessionID string `json:"session_id"`
FolderID string `json:"folder_id"`
Name string `json:"file_name"`
}
type createFileResponse struct {
FileID string `json:"FileId"`
Name string `json:"Name"`
GroupID int `json:"GroupID"`
Extension string `json:"Extension"`
Size string `json:"Size"`
Views string `json:"Views"`
Downloads string `json:"Downloads"`
DateModified string `json:"DateModified"`
Access string `json:"Access"`
Link string `json:"Link"`
DownloadLink string `json:"DownloadLink"`
StreamingLink string `json:"StreamingLink"`
TempStreamingLink string `json:"TempStreamingLink"`
DirUpdateTime int `json:"DirUpdateTime"`
TempLocation string `json:"TempLocation"`
SpeedLimit int `json:"SpeedLimit"`
RequireCompression int `json:"RequireCompression"`
RequireHash int `json:"RequireHash"`
RequireHashOnly int `json:"RequireHashOnly"`
}
type modTimeFile struct {
SessionID string `json:"session_id"`
FileID string `json:"file_id"`
FileModificationTime string `json:"file_modification_time"`
}
type openUpload struct {
SessionID string `json:"session_id"`
FileID string `json:"file_id"`
Size int64 `json:"file_size"`
}
type openUploadResponse struct {
TempLocation string `json:"TempLocation"`
RequireCompression bool `json:"RequireCompression"`
RequireHash bool `json:"RequireHash"`
RequireHashOnly bool `json:"RequireHashOnly"`
SpeedLimit int `json:"SpeedLimit"`
}
type closeUpload struct {
SessionID string `json:"session_id"`
FileID string `json:"file_id"`
Size int64 `json:"file_size"`
TempLocation string `json:"temp_location"`
}
type closeUploadResponse struct {
FileID string `json:"FileID"`
FileHash string `json:"FileHash"`
Size int64 `json:"Size"`
}
type permissions struct {
SessionID string `json:"session_id"`
FileID string `json:"file_id"`
FileIsPublic int64 `json:"file_ispublic"`
}

View File

@@ -17,13 +17,14 @@ import (
"net/http"
"net/url"
"path"
"regexp"
"strings"
"time"
"github.com/ncw/rclone/backend/pcloud/api"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/config/obscure"
"github.com/ncw/rclone/fs/fserrors"
"github.com/ncw/rclone/fs/hash"
@@ -66,26 +67,31 @@ func init() {
Name: "pcloud",
Description: "Pcloud",
NewFs: NewFs,
Config: func(name string) {
err := oauthutil.Config("pcloud", name, oauthConfig)
Config: func(name string, m configmap.Mapper) {
err := oauthutil.Config("pcloud", name, m, oauthConfig)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
},
Options: []fs.Option{{
Name: config.ConfigClientID,
Help: "Pcloud App Client Id - leave blank normally.",
Help: "Pcloud App Client Id\nLeave blank normally.",
}, {
Name: config.ConfigClientSecret,
Help: "Pcloud App Client Secret - leave blank normally.",
Help: "Pcloud App Client Secret\nLeave blank normally.",
}},
})
}
// Options defines the configuration for this backend
type Options struct {
}
// Fs represents a remote pcloud
type Fs struct {
name string // name of this remote
root string // the path we are working on
opt Options // parsed options
features *fs.Features // optional features
srv *rest.Client // the connection to the server
dirCache *dircache.DirCache // Map of directory path to directory id
@@ -130,9 +136,6 @@ func (f *Fs) Features() *fs.Features {
return f.features
}
// Pattern to match a pcloud path
var matcher = regexp.MustCompile(`^([^/]*)(.*)$`)
// parsePath parses an pcloud 'url'
func parsePath(path string) (root string) {
root = strings.Trim(path, "/")
@@ -233,16 +236,23 @@ func errorHandler(resp *http.Response) error {
}
// NewFs constructs an Fs from the path, container:path
func NewFs(name, root string) (fs.Fs, error) {
root = parsePath(root)
oAuthClient, ts, err := oauthutil.NewClient(name, oauthConfig)
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
log.Fatalf("Failed to configure Pcloud: %v", err)
return nil, err
}
root = parsePath(root)
oAuthClient, ts, err := oauthutil.NewClient(name, m, oauthConfig)
if err != nil {
return nil, errors.Wrap(err, "failed to configure Pcloud")
}
f := &Fs{
name: name,
root: root,
opt: *opt,
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
}
@@ -266,16 +276,16 @@ func NewFs(name, root string) (fs.Fs, error) {
if err != nil {
// Assume it is a file
newRoot, remote := dircache.SplitPath(root)
newF := *f
newF.dirCache = dircache.New(newRoot, rootID, &newF)
newF.root = newRoot
tempF := *f
tempF.dirCache = dircache.New(newRoot, rootID, &tempF)
tempF.root = newRoot
// Make new Fs which is the parent
err = newF.dirCache.FindRoot(false)
err = tempF.dirCache.FindRoot(false)
if err != nil {
// No root so return old f
return f, nil
}
_, err := newF.newObjectWithInfo(remote, nil)
_, err := tempF.newObjectWithInfo(remote, nil)
if err != nil {
if err == fs.ErrorObjectNotFound {
// File doesn't exist so return old f
@@ -283,8 +293,13 @@ func NewFs(name, root string) (fs.Fs, error) {
}
return nil, err
}
// XXX: update the old f here instead of returning tempF, since
// `features` were already filled with functions having *f as a receiver.
// See https://github.com/ncw/rclone/issues/2182
f.dirCache = tempF.dirCache
f.root = tempF.root
// return an error with an fs which points to the parent
return &newF, fs.ErrorIsFile
return f, fs.ErrorIsFile
}
return f, nil
}
@@ -1097,6 +1112,12 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
return shouldRetry(resp, err)
})
if err != nil {
// sometimes pcloud leaves a half complete file on
// error, so delete it if it exists
delObj, delErr := o.fs.NewObject(o.remote)
if delErr == nil && delObj != nil {
_ = delObj.Remove()
}
return err
}
if len(result.Items) != 1 {
@@ -1122,6 +1143,11 @@ func (o *Object) Remove() error {
})
}
// ID returns the ID of the Object if known, or "" if not
func (o *Object) ID() string {
return o.id
}
// Check the interfaces are satisfied
var (
_ fs.Fs = (*Fs)(nil)
@@ -1133,4 +1159,5 @@ var (
_ fs.DirCacheFlusher = (*Fs)(nil)
_ fs.Abouter = (*Fs)(nil)
_ fs.Object = (*Object)(nil)
_ fs.IDer = (*Object)(nil)
)

View File

@@ -17,7 +17,8 @@ import (
"time"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/fshttp"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/fs/walk"
@@ -34,57 +35,91 @@ func init() {
Description: "QingCloud Object Storage",
NewFs: NewFs,
Options: []fs.Option{{
Name: "env_auth",
Help: "Get QingStor credentials from runtime. Only applies if access_key_id and secret_access_key is blank.",
Examples: []fs.OptionExample{
{
Value: "false",
Help: "Enter QingStor credentials in the next step",
}, {
Value: "true",
Help: "Get QingStor credentials from the environment (env vars or IAM)",
},
},
Name: "env_auth",
Help: "Get QingStor credentials from runtime. Only applies if access_key_id and secret_access_key is blank.",
Default: false,
Examples: []fs.OptionExample{{
Value: "false",
Help: "Enter QingStor credentials in the next step",
}, {
Value: "true",
Help: "Get QingStor credentials from the environment (env vars or IAM)",
}},
}, {
Name: "access_key_id",
Help: "QingStor Access Key ID - leave blank for anonymous access or runtime credentials.",
Help: "QingStor Access Key ID\nLeave blank for anonymous access or runtime credentials.",
}, {
Name: "secret_access_key",
Help: "QingStor Secret Access Key (password) - leave blank for anonymous access or runtime credentials.",
Help: "QingStor Secret Access Key (password)\nLeave blank for anonymous access or runtime credentials.",
}, {
Name: "endpoint",
Help: "Enter a endpoint URL to connection QingStor API.\nLeave blank will use the default value \"https://qingstor.com:443\"",
}, {
Name: "zone",
Help: "Choose or Enter a zone to connect. Default is \"pek3a\".",
Examples: []fs.OptionExample{
{
Value: "pek3a",
Help: "The Beijing (China) Three Zone\nNeeds location constraint pek3a.",
},
{
Value: "sh1a",
Help: "The Shanghai (China) First Zone\nNeeds location constraint sh1a.",
},
{
Value: "gd2a",
Help: "The Guangdong (China) Second Zone\nNeeds location constraint gd2a.",
},
},
Help: "Zone to connect to.\nDefault is \"pek3a\".",
Examples: []fs.OptionExample{{
Value: "pek3a",
Help: "The Beijing (China) Three Zone\nNeeds location constraint pek3a.",
}, {
Value: "sh1a",
Help: "The Shanghai (China) First Zone\nNeeds location constraint sh1a.",
}, {
Value: "gd2a",
Help: "The Guangdong (China) Second Zone\nNeeds location constraint gd2a.",
}},
}, {
Name: "connection_retries",
Help: "Number of connnection retry.\nLeave blank will use the default value \"3\".",
Name: "connection_retries",
Help: "Number of connection retries.",
Default: 3,
Advanced: true,
}, {
Name: "upload_cutoff",
Help: `Cutoff for switching to chunked upload
Any files larger than this will be uploaded in chunks of chunk_size.
The minimum is 0 and the maximum is 5GB.`,
Default: defaultUploadCutoff,
Advanced: true,
}, {
Name: "chunk_size",
Help: `Chunk size to use for uploading.
When uploading files larger than upload_cutoff they will be uploaded
as multipart uploads using this chunk size.
Note that "--qingstor-upload-concurrency" chunks of this size are buffered
in memory per transfer.
If you are transferring large files over high speed links and you have
enough memory, then increasing this will speed up the transfers.`,
Default: minChunkSize,
Advanced: true,
}, {
Name: "upload_concurrency",
Help: `Concurrency for multipart uploads.
This is the number of chunks of the same file that are uploaded
concurrently.
NB if you set this to > 1 then the checksums of multpart uploads
become corrupted (the uploads themselves are not corrupted though).
If you are uploading small numbers of large file over high speed link
and these uploads do not fully utilize your bandwidth, then increasing
this may help to speed up the transfers.`,
Default: 1,
Advanced: true,
}},
})
}
// Constants
const (
listLimitSize = 1000 // Number of items to read at once
maxSizeForCopy = 1024 * 1024 * 1024 * 5 // The maximum size of object we can COPY
listLimitSize = 1000 // Number of items to read at once
maxSizeForCopy = 1024 * 1024 * 1024 * 5 // The maximum size of object we can COPY
minChunkSize = fs.SizeSuffix(minMultiPartSize)
defaultUploadCutoff = fs.SizeSuffix(200 * 1024 * 1024)
maxUploadCutoff = fs.SizeSuffix(5 * 1024 * 1024 * 1024)
)
// Globals
@@ -95,17 +130,31 @@ func timestampToTime(tp int64) time.Time {
return tm.UTC()
}
// Options defines the configuration for this backend
type Options struct {
EnvAuth bool `config:"env_auth"`
AccessKeyID string `config:"access_key_id"`
SecretAccessKey string `config:"secret_access_key"`
Endpoint string `config:"endpoint"`
Zone string `config:"zone"`
ConnectionRetries int `config:"connection_retries"`
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
ChunkSize fs.SizeSuffix `config:"chunk_size"`
UploadConcurrency int `config:"upload_concurrency"`
}
// Fs represents a remote qingstor server
type Fs struct {
name string // The name of the remote
root string // The root is a subdir, is a special object
opt Options // parsed options
features *fs.Features // optional features
svc *qs.Service // The connection to the qingstor server
zone string // The zone we are working on
bucket string // The bucket we are working on
bucketOKMu sync.Mutex // mutex to protect bucketOK and bucketDeleted
bucketOK bool // true if we have created the bucket
bucketDeleted bool // true if we have deleted the bucket
root string // The root is a subdir, is a special object
features *fs.Features // optional features
svc *qs.Service // The connection to the qingstor server
}
// Object describes a qingstor object
@@ -126,10 +175,12 @@ type Object struct {
// ------------------------------------------------------------
// Pattern to match a qingstor path
var matcher = regexp.MustCompile(`^/*([^/]*)(.*)$`)
// parseParse parses a qingstor 'url'
func qsParsePath(path string) (bucket, key string, err error) {
// Pattern to match a qingstor path
var matcher = regexp.MustCompile(`^([^/]*)(.*)$`)
parts := matcher.FindStringSubmatch(path)
if parts == nil {
err = errors.Errorf("Couldn't parse bucket out of qingstor path %q", path)
@@ -165,12 +216,12 @@ func qsParseEndpoint(endpoint string) (protocol, host, port string, err error) {
}
// qsConnection makes a connection to qingstor
func qsServiceConnection(name string) (*qs.Service, error) {
accessKeyID := config.FileGet(name, "access_key_id")
secretAccessKey := config.FileGet(name, "secret_access_key")
func qsServiceConnection(opt *Options) (*qs.Service, error) {
accessKeyID := opt.AccessKeyID
secretAccessKey := opt.SecretAccessKey
switch {
case config.FileGetBool(name, "env_auth", false):
case opt.EnvAuth:
// No need for empty checks if "env_auth" is true
case accessKeyID == "" && secretAccessKey == "":
// if no access key/secret and iam is explicitly disabled then fall back to anon interaction
@@ -184,7 +235,7 @@ func qsServiceConnection(name string) (*qs.Service, error) {
host := "qingstor.com"
port := 443
endpoint := config.FileGet(name, "endpoint", "")
endpoint := opt.Endpoint
if endpoint != "" {
_protocol, _host, _port, err := qsParseEndpoint(endpoint)
@@ -204,48 +255,87 @@ func qsServiceConnection(name string) (*qs.Service, error) {
}
connectionRetries := 3
retries := config.FileGet(name, "connection_retries", "")
if retries != "" {
connectionRetries, _ = strconv.Atoi(retries)
}
cf, err := qsConfig.NewDefault()
if err != nil {
return nil, err
}
cf.AccessKeyID = accessKeyID
cf.SecretAccessKey = secretAccessKey
cf.Protocol = protocol
cf.Host = host
cf.Port = port
cf.ConnectionRetries = connectionRetries
cf.ConnectionRetries = opt.ConnectionRetries
cf.Connection = fshttp.NewClient(fs.Config)
svc, _ := qs.Init(cf)
return qs.Init(cf)
}
return svc, err
func checkUploadChunkSize(cs fs.SizeSuffix) error {
if cs < minChunkSize {
return errors.Errorf("%s is less than %s", cs, minChunkSize)
}
return nil
}
func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
err = checkUploadChunkSize(cs)
if err == nil {
old, f.opt.ChunkSize = f.opt.ChunkSize, cs
}
return
}
func checkUploadCutoff(cs fs.SizeSuffix) error {
if cs > maxUploadCutoff {
return errors.Errorf("%s is greater than %s", cs, maxUploadCutoff)
}
return nil
}
func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
err = checkUploadCutoff(cs)
if err == nil {
old, f.opt.UploadCutoff = f.opt.UploadCutoff, cs
}
return
}
// NewFs constructs an Fs from the path, bucket:path
func NewFs(name, root string) (fs.Fs, error) {
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
err = checkUploadChunkSize(opt.ChunkSize)
if err != nil {
return nil, errors.Wrap(err, "qingstor: chunk size")
}
err = checkUploadCutoff(opt.UploadCutoff)
if err != nil {
return nil, errors.Wrap(err, "qingstor: upload cutoff")
}
bucket, key, err := qsParsePath(root)
if err != nil {
return nil, err
}
svc, err := qsServiceConnection(name)
svc, err := qsServiceConnection(opt)
if err != nil {
return nil, err
}
zone := config.FileGet(name, "zone")
if zone == "" {
zone = "pek3a"
if opt.Zone == "" {
opt.Zone = "pek3a"
}
f := &Fs{
name: name,
zone: zone,
root: key,
bucket: bucket,
opt: *opt,
svc: svc,
zone: opt.Zone,
bucket: bucket,
}
f.features = (&fs.Features{
ReadMimeType: true,
@@ -258,7 +348,7 @@ func NewFs(name, root string) (fs.Fs, error) {
f.root += "/"
}
//Check to see if the object exists
bucketInit, err := svc.Bucket(bucket, zone)
bucketInit, err := svc.Bucket(bucket, opt.Zone)
if err != nil {
return nil, err
}
@@ -904,16 +994,24 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
mimeType := fs.MimeType(src)
req := uploadInput{
body: in,
qsSvc: o.fs.svc,
bucket: o.fs.bucket,
zone: o.fs.zone,
key: key,
mimeType: mimeType,
body: in,
qsSvc: o.fs.svc,
bucket: o.fs.bucket,
zone: o.fs.zone,
key: key,
mimeType: mimeType,
partSize: int64(o.fs.opt.ChunkSize),
concurrency: o.fs.opt.UploadConcurrency,
}
uploader := newUploader(&req)
err = uploader.upload()
size := src.Size()
multipart := size < 0 || size >= int64(o.fs.opt.UploadCutoff)
if multipart {
err = uploader.upload()
} else {
err = uploader.singlePartUpload(in, size)
}
if err != nil {
return err
}

View File

@@ -2,12 +2,12 @@
// +build !plan9
package qingstor_test
package qingstor
import (
"testing"
"github.com/ncw/rclone/backend/qingstor"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fstest/fstests"
)
@@ -15,6 +15,19 @@ import (
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestQingStor:",
NilObject: (*qingstor.Object)(nil),
NilObject: (*Object)(nil),
ChunkedUpload: fstests.ChunkedUploadConfig{
MinChunkSize: minChunkSize,
},
})
}
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setUploadChunkSize(cs)
}
func (f *Fs) SetUploadCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setUploadCutoff(cs)
}
var _ fstests.SetUploadChunkSizer = (*Fs)(nil)

View File

@@ -19,10 +19,10 @@ import (
)
const (
maxSinglePartSize = 1024 * 1024 * 1024 * 5 // The maximum allowed size when uploading a single object to QingStor
maxMultiPartSize = 1024 * 1024 * 1024 * 1 // The maximum allowed part size when uploading a part to QingStor
minMultiPartSize = 1024 * 1024 * 4 // The minimum allowed part size when uploading a part to QingStor
maxMultiParts = 10000 // The maximum allowed number of parts in an multi-part upload
// maxSinglePartSize = 1024 * 1024 * 1024 * 5 // The maximum allowed size when uploading a single object to QingStor
// maxMultiPartSize = 1024 * 1024 * 1024 * 1 // The maximum allowed part size when uploading a part to QingStor
minMultiPartSize = 1024 * 1024 * 4 // The minimum allowed part size when uploading a part to QingStor
maxMultiParts = 10000 // The maximum allowed number of parts in an multi-part upload
)
const (
@@ -152,11 +152,11 @@ func (u *uploader) init() {
}
// singlePartUpload upload a single object that contentLength less than "defaultUploadPartSize"
func (u *uploader) singlePartUpload(buf io.ReadSeeker) error {
func (u *uploader) singlePartUpload(buf io.Reader, size int64) error {
bucketInit, _ := u.bucketInit()
req := qs.PutObjectInput{
ContentLength: &u.readerPos,
ContentLength: &size,
ContentType: &u.cfg.mimeType,
Body: buf,
}
@@ -179,13 +179,13 @@ func (u *uploader) upload() error {
// Do one read to determine if we have more than one part
reader, _, err := u.nextReader()
if err == io.EOF { // single part
fs.Debugf(u, "Tried to upload a singile object to QingStor")
return u.singlePartUpload(reader)
fs.Debugf(u, "Uploading as single part object to QingStor")
return u.singlePartUpload(reader, u.readerPos)
} else if err != nil {
return errors.Errorf("read upload data failed: %s", err)
}
fs.Debugf(u, "Treied to upload a multi-part object to QingStor")
fs.Debugf(u, "Uploading as multi-part object to QingStor")
mu := multiUploader{uploader: u}
return mu.multiPartUpload(reader)
}
@@ -261,7 +261,7 @@ func (mu *multiUploader) initiate() error {
req := qs.InitiateMultipartUploadInput{
ContentType: &mu.cfg.mimeType,
}
fs.Debugf(mu, "Tried to initiate a multi-part upload")
fs.Debugf(mu, "Initiating a multi-part upload")
rsp, err := bucketInit.InitiateMultipartUpload(mu.cfg.key, &req)
if err == nil {
mu.uploadID = rsp.UploadID
@@ -279,12 +279,12 @@ func (mu *multiUploader) send(c chunk) error {
ContentLength: &c.size,
Body: c.buffer,
}
fs.Debugf(mu, "Tried to upload a part to QingStor that partNumber %d and partSize %d", c.partNumber, c.size)
fs.Debugf(mu, "Uploading a part to QingStor with partNumber %d and partSize %d", c.partNumber, c.size)
_, err := bucketInit.UploadMultipart(mu.cfg.key, &req)
if err != nil {
return err
}
fs.Debugf(mu, "Upload part finished that partNumber %d and partSize %d", c.partNumber, c.size)
fs.Debugf(mu, "Done uploading part partNumber %d and partSize %d", c.partNumber, c.size)
mu.mtx.Lock()
defer mu.mtx.Unlock()
@@ -304,7 +304,7 @@ func (mu *multiUploader) list() error {
req := qs.ListMultipartInput{
UploadID: mu.uploadID,
}
fs.Debugf(mu, "Tried to list a multi-part")
fs.Debugf(mu, "Reading multi-part details")
rsp, err := bucketInit.ListMultipart(mu.cfg.key, &req)
if err == nil {
mu.objectParts = rsp.ObjectParts
@@ -331,7 +331,7 @@ func (mu *multiUploader) complete() error {
ObjectParts: mu.objectParts,
ETag: &md5String,
}
fs.Debugf(mu, "Tried to complete a multi-part")
fs.Debugf(mu, "Completing multi-part object")
_, err = bucketInit.CompleteMultipartUpload(mu.cfg.key, &req)
if err == nil {
fs.Debugf(mu, "Complete multi-part finished")
@@ -348,7 +348,7 @@ func (mu *multiUploader) abort() error {
req := qs.AbortMultipartUploadInput{
UploadID: uploadID,
}
fs.Debugf(mu, "Tried to abort a multi-part")
fs.Debugf(mu, "Aborting multi-part object %q", *uploadID)
_, err = bucketInit.AbortMultipartUpload(mu.cfg.key, &req)
}
@@ -392,6 +392,14 @@ func (mu *multiUploader) multiPartUpload(firstBuf io.ReadSeeker) error {
var nextChunkLen int
reader, nextChunkLen, err = mu.nextReader()
if err != nil && err != io.EOF {
// empty ch
go func() {
for range ch {
}
}()
// Wait for all goroutines finish
close(ch)
mu.wg.Wait()
return err
}
if nextChunkLen == 0 && partNumber > 0 {

File diff suppressed because it is too large Load Diff

View File

@@ -1,10 +1,10 @@
// Test S3 filesystem interface
package s3_test
package s3
import (
"testing"
"github.com/ncw/rclone/backend/s3"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fstest/fstests"
)
@@ -12,6 +12,19 @@ import (
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestS3:",
NilObject: (*s3.Object)(nil),
NilObject: (*Object)(nil),
ChunkedUpload: fstests.ChunkedUploadConfig{
MinChunkSize: minChunkSize,
},
})
}
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setUploadChunkSize(cs)
}
func (f *Fs) SetUploadCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setUploadCutoff(cs)
}
var _ fstests.SetUploadChunkSizer = (*Fs)(nil)

Some files were not shown because too many files have changed in this diff Show More