1
0
mirror of https://github.com/rclone/rclone.git synced 2025-12-06 00:03:32 +00:00

Compare commits

..

257 Commits

Author SHA1 Message Date
Nick Craig-Wood
c4700f4bf1 vendor patch: allow 2048 bit DSA keys DO NOT MERGE 2020-05-15 09:46:42 +01:00
Nick Craig-Wood
e4f1e19127 sftp: fix post transfer copies failing with 0 size when using set_modtime=false
Before this change we early exited the SetModTime call which means we
skipped reading the info about the file.

This change reads info about the file in the SetModTime call even if
we are skipping setting the modtime.

See: https://forum.rclone.org/t/sftp-and-set-modtime-false-error/16362
2020-05-14 17:30:01 +01:00
Nick Craig-Wood
4a1b644bfb azureblob: implement streaming of unknown sized files
See: https://forum.rclone.org/t/rclone-rcat-azure-blob-container-sas-token-403-error/16286/3
2020-05-14 11:56:15 +01:00
Nick Craig-Wood
8c9c86c3d6 putio: fix parsing of remotes with leading and trailing /
See: https://forum.rclone.org/t/unable-to-copy-from-remote-but-mount-works/16351/
2020-05-14 11:52:43 +01:00
Nick Craig-Wood
8a58e0235d s3: don't leak memory or tokens in edge cases for multipart upload 2020-05-14 07:48:18 +01:00
Nick Craig-Wood
52b7337d28 crypt: change backend encode/decode to output a plain list
This commit changes the output of the rclone backend encode crypt: and
decode commands to output a plain list of decoded or encoded file
names.

This makes the command much more useful for command line scripting.
2020-05-13 18:11:45 +01:00
Nick Craig-Wood
995cd0dc32 backend: add --json flag to always output JSON 2020-05-13 18:07:41 +01:00
Nick Craig-Wood
5eb558e058 backend: fix output of an array of strings 2020-05-13 17:55:56 +01:00
Max Sum
33d9310c49 union: enable ListR when upstreams contain local
Enable fast list functions for union backend when:

- at least one of the upstreams supports fast list
- upstreams only consist of backends that support fast list and local backend.

Fixes #3000
2020-05-13 13:10:35 +01:00
Nick Craig-Wood
aba89e2737 Add Caleb Case @calebcase as the tardigrade backend maintainer 2020-05-13 12:57:29 +01:00
Nick Craig-Wood
d685e7b4b5 bin: add check-merged.go to find local branches which might have been merged 2020-05-13 12:42:45 +01:00
Nick Craig-Wood
9e4b68a364 drive: server side copy docs use default description if empty
When server side copying Google docs files we attempt to preserve the
description.

This patch makes it so that we use the default description if the
original description was empty.

See: 6fdd7149c1 (commitcomment-38008638)
2020-05-13 12:31:37 +01:00
Nick Craig-Wood
044a3b3920 fserrors: Make "tls: use of closed connection" a retriable error
This has happened when uploading very large files to B2. It is
probably a bug in the go runtime but we'll attempt to work-around it
here.

See: https://forum.rclone.org/t/large-file-upload-to-backblaze-failed/16128/5
2020-05-13 11:42:37 +01:00
Nick Craig-Wood
d342f9f942 azureblob: fix permission error on SAS URL limited to container
Before this change, for some operations, eg rcat or copyto (of a file)
rclone would attempt to create the container when using a SAS URL
limited to a container.

After this change we assume the container does not need creating when
using a container SAS URL.

See: https://forum.rclone.org/t/rclone-rcat-azure-blob-container-sas-token-403-error/16286
2020-05-13 09:11:51 +01:00
Nick Craig-Wood
e91b509578 fs: allow --min-age/--max-age to take a date as well as a duration
Fixes #4211
2020-05-12 17:49:33 +01:00
Nick Craig-Wood
8ddb3fbb2e drive: fix using list recursive on shortcuts to directories 2020-05-12 17:08:05 +01:00
Nick Craig-Wood
b91e01fd22 drive: strip trailing slashes in shortcut command #4098
This also fixes typo in the name of the function, and allows making
shortcuts from the root directory which are useful in cross drive
shortcut creation.

This also adds a basic suite of tests for creating listing, removing
shortcuts.
2020-05-12 17:08:05 +01:00
Nick Craig-Wood
177195aeeb accounting: fix race clearing stats
This race was introduced by

10a6a92e52 accounting: reset bytes read during copy retry
2020-05-12 17:02:32 +01:00
Nick Craig-Wood
cb5979a468 accounting: factor stats into its own structure
This makes it very obvious which mutex to take for accessing the
values.
2020-05-12 17:02:32 +01:00
Nick Craig-Wood
32507774de Add Caleb Case to contributors 2020-05-12 17:01:26 +01:00
Caleb Case
0ce662faad Tardigrade Backend 2020-05-12 15:56:50 +00:00
Caleb Case
03b629064a Tardigrade Backend: Dependencies 2020-05-12 15:56:50 +00:00
albertony
962fbc8257 jottacloud: update docs regarding cleanup, removed remains from old auth, and added warning about special mountpoints. 2020-05-11 11:41:17 +02:00
Nick Craig-Wood
bd4b91bd57 test_all: revert running fichier tests one at a time
This didn't achieve the objective of getting the tests to run clean
and makes them take 16 hours!

This reverts commit 97f6f8fe19.
2020-05-11 08:51:33 +01:00
Ankur Gupta
10a6a92e52 accounting: reset bytes read during copy retry - fixes #4178
During a copy/sync command, if an operation fails due to a network
issue and is retried, the underlying io.Reader is re-initialised,
but the stats for bytes already read are not reset, leading to incorrect
stats. THis was fixed by resetting the bytes read when an Account is
re-initialized.
2020-05-10 17:58:22 +00:00
Max Sum
54b16bd054 union: implement ListR 2020-05-10 17:57:03 +00:00
Max Sum
f21e97001b union: fix server-side copy 2020-05-10 17:56:18 +00:00
Nick Craig-Wood
1f005a82ad http: add missing comment to pacify linter 2020-05-10 18:53:38 +01:00
Nick Craig-Wood
bb65974e2f drive: implement backend shortcut command for creating shortcuts #4098 2020-05-09 15:16:15 +01:00
Nick Craig-Wood
bc0f487369 drive: look for dirs as well as files on NewObject
This means that we can return ErrorNotAFile when there is an object
with the same name as a directory rather than potentially creating a
duplicate name.
2020-05-09 15:16:15 +01:00
Nick Craig-Wood
e103c4c26a Add Maxime Suret to contributors 2020-05-09 15:16:15 +01:00
Maxime Suret
79d29bb41e serve sftp: add support for multiple host keys by repeating --key flag 2020-05-09 14:43:17 +01:00
calisro
c80b6d96dd http: improved directory listing with new template from Caddy project
This includes a new directory listing template which was originally
from the Caddy project (used with permission and copyright attribution).

This is used whenever we serve directory listings so `rclone serve
http`, `rclone serve webdav` and `rclone rcd --rc-serve`

This also modifies the tests so they work with the original template which
is easier to debug.
2020-05-08 16:15:21 +01:00
Nick Craig-Wood
97f6f8fe19 test_all: run fichier tests one at a time
This is in attempt to get the tests to run cleanly without hitting the
rate limiter.
2020-05-07 20:39:54 +01:00
Nick Craig-Wood
94920d39ae test_all: increase the test timeout to 60m from 30m
Some tests are failing at 30m - 60m doesn't seem unreasonable
2020-05-07 20:38:40 +01:00
Nick Craig-Wood
9403bd2990 test_all: allow -list-retries to be overridden on the command line 2020-05-07 20:36:45 +01:00
Nick Craig-Wood
e098924e61 fstest: change -list-retries default to 3 from 6
The integration tests have got much better about retrying the tests,
and we aren't testing ACD any more so we don't need it this high.
2020-05-07 20:28:41 +01:00
Nick Craig-Wood
437f9e2cef Add Sébastien Gross to contributors 2020-05-07 11:25:09 +01:00
Sébastien Gross
395f259978 cmd: when running --password-command allow use of stdin
Bind rclone standard input to password command's standard input. This
allows to provide password from a pipe and collect it using cat.

The typical use case is when rclone is on a remote server with an
encrypted configuration. This solved the environment variable
issue (#3368) and the password storage on remote host.

Now the following chain is allowed:

    echo 'secret' | ssh host.example.com \
       sudo -u rclone \
       rclone --config /path/to/rclone.conf \
       --password-command 'cat' ls remote:

Signed-off-by: Sébastien Gross <seb•ɑƬ•chezwam•ɖɵʈ•org>

Co-authored-by: Sébastien Gross <seb•ɑƬ•chezwam•ɖɵʈ•org>
2020-05-07 11:02:52 +01:00
Nick Craig-Wood
b78adc9f03 Add Fred @creativeproject as the seafile backend maintainer 2020-05-07 10:18:10 +01:00
Nick Craig-Wood
b7c21310b4 Add Fred to contributors 2020-05-06 18:33:44 +01:00
Fred
c754e89906 seafile: New backend for seafile server 2020-05-06 17:33:22 +00:00
Fred
62cfe3f384 vendor: add github.com/coreos/go-semver 2020-05-06 17:33:22 +00:00
Nick Craig-Wood
afde340c9e gcs: fix --header-upload - #59
Before this code we were settig the headers on the PUT request. However this isn't where GCS needs them.

After this fix we set the headers in the object upload request itself.

This means that we only support a limited range of headers

- Cache-Control
- Content-Disposition
- Content-Encoding
- Content-Language
- Content-Type
- X-Goog-Meta-

Note for the last of those are for setting custom metadata in the form
"X-Goog-Meta-Key: value".
2020-05-06 17:34:23 +01:00
Nick Craig-Wood
d0ad83de4b Add ElonH to contributors 2020-05-06 17:34:23 +01:00
ElonH
d119bfd934 rcd: disable duplicate log
if running `rclone rcd --rc-user=admin --rc-pass=admin
--rc-allow-origin="*"`, lots of duplicate warnings apperent in log

Warning: Allow origin set to *. This can cause serious security problems.
Warning: Allow origin set to *. This can cause serious security problems.
....

This is not conducive to analyzing debugging info.

Therefore, let's show it only once.
2020-05-05 13:47:25 +00:00
Nick Craig-Wood
bdc91eda0f serve http: add Last-Modified headers to files and directories
This means that using `rclone serve http` preserves modification times
when used with the http backend.

Fixes #4201
2020-05-05 09:41:08 +01:00
Nick Craig-Wood
ef9e6794c2 docs: make serve http/webdav template docs into a table 2020-05-04 17:36:31 +00:00
calistri
4362ca7bb9 serve http, serve webdav: Added a --template flag for user defined markup 2020-05-04 17:36:31 +00:00
Nick Craig-Wood
dcf945ed58 docs: add bin/.ignored-emails for removing email addresses from authors.md
Remove an email as requested for Anagh Kumar Baranwal
2020-05-04 17:38:25 +01:00
Anagh Kumar Baranwal
a86196a156 drive: Added command to change service_account_file and chunk_size
Signed-off-by: Anagh Kumar Baranwal <6824881+darthShadow@users.noreply.github.com>
2020-05-04 16:23:33 +00:00
Anagh Kumar Baranwal
856c2b565f crypt: Added decode/encode commands to replicate functionality of cryptdecode
Signed-off-by: Anagh Kumar Baranwal <6824881+darthShadow@users.noreply.github.com>
2020-05-04 16:23:33 +00:00
Nick Craig-Wood
1a8c5708c5 vfs: ignore file not found errors from Hash in Read.Release
There is nothing we can do about this at this point and this error can
happen when moving files so we ignore it to clean the logs up.
2020-05-04 12:18:28 +01:00
Nick Craig-Wood
14cab0fff0 local: fix "file not found" errors on post transfer Hash calculation
Before this change the local backend was returning file not found
errors for post transfer hashes for files which were moved. This was
caused by the routine which checks for the object being changed.

After this change we ignore file not found errors while checking to
see if the object has changed. If the hash has to be computed then a
file not found error will be thrown when it is opened, otherwise the
cached hash will be returned.
2020-05-04 12:17:46 +01:00
Nick Craig-Wood
69888bf966 cmount: send a hint as to whether the filesystem is case insensitive or not 2020-05-04 11:38:07 +01:00
Nick Craig-Wood
d260238f99 cmount: use ReaddirPlus on Windows to improve directory listing performance
Before this change Windows would read a directory then immedately stat
every item in the directory.

After this change we return the stat information along with the
directory which stops so many callbacks.
2020-05-04 11:38:07 +01:00
Nick Craig-Wood
6ca7198f57 mount: fix disappearing cwd problem - fixes #4104
Before this change, the current working directory could disappear
according to the Linux kernel.

This was caused by mount returning different nodes with the same
information in.

This change uses vfs.Node.SetSys to cache the information so we always
return the same node.
2020-05-04 11:38:07 +01:00
Nick Craig-Wood
cfcdc85b26 vfs: Add SetSys() methods to Node to allow caching stuff on a node 2020-05-04 11:38:07 +01:00
Nick Craig-Wood
7f8d74e903 docs: refresh rclone authorize help 2020-05-04 10:40:26 +01:00
Nick Craig-Wood
f2b1fedc4f drive: follow shortcuts by default, skip with --drive-skip-shortcuts
Before this change rclone would skip all shortcuts with a message

    Ignoring unknown document type "application/vnd.google-apps.shortcut"

After this message rclone resolves the shortcuts by default to the
actual files that they point to. See the docs for more info.

The --drive-skip-shortcuts flag can be used to skip shortcuts.
2020-05-02 18:28:38 +01:00
Nick Craig-Wood
b03cad3cf6 vendor: update google.golang.org/api/drive to pull in shortcuts definition 2020-05-02 18:28:38 +01:00
Nick Craig-Wood
70db13e6e8 vfs: pin the Fs in use in the Fs cache
This means we can reliably look up the Fs from the cache when using
`backend/command`.
2020-05-01 17:11:45 +01:00
Nick Craig-Wood
4c98360356 fs/cache: Add Pin and Unpin and canonicalised lookup
Before this change we stored cached Fs under the config string the
user gave us. As the result of fs.ConfigString() can often be
different after the backend has canonicalised the paths this meant
that we could not look up backends in the cache reliably.

After this change we store cached Fs under their config string as
returned from fs.ConfigString(f) after the Fs has been created. We
also store a map of user to canonical names (where they are different)
so the users can look up Fs under the names they passed to rclone too.

This change along with Pin and Unpin is necessary so we can look up
the Fs in use reliably in the `backend/command` remote control
interface.
2020-05-01 17:11:45 +01:00
Nick Craig-Wood
42f9f7fb5d lib/cache: add Rename, Pin and Unpin 2020-05-01 17:11:45 +01:00
Nick Craig-Wood
ca1856724c fs: add ConfigString function to return a canonical config string 2020-05-01 17:11:45 +01:00
Nick Craig-Wood
b52a39a84e drive: fix merge breakage
In 2f5a2d3c48 an incorrect merge caused compilation to fail
2020-05-01 13:02:32 +01:00
Nick Craig-Wood
a97261c54f Add gitch1 to contributors 2020-05-01 13:02:32 +01:00
gitch1
0f5579c0ba docs : add --password-command Powershell Wiki link
add Windows Powershell example wiki link to  --password-command doc
2020-04-30 17:31:58 +01:00
Nick Craig-Wood
2f5a2d3c48 drive: Don't return nil Object with nil error from newObject* functions.
Before this change the newObject* functions could return object=nil
with err=nil.  The result of these functions are passed outside of the
backend code (eg in Copy, Move) and returning a nil object with a nil
error leads to crashes elsewhere as it breaks expectations.

After this change we return (nil, fs.ErrorObjectNotFound) in these
cases. The one place this is actually needd internally (when turning
items into listings) we detect that error and use it to mean skip the
directory item.

This problem was noticed while testing the shortcuts code. It
shouldn't happen normally but it is conceivable it could.
2020-04-30 17:11:36 +01:00
Nick Craig-Wood
ba7f7c8319 build: add -trimpath to release build for reproduceable builds 2020-04-30 12:43:40 +01:00
Nick Craig-Wood
77fb3c2511 vfs: bring DO NOT EDIT comments in line with "go help generate" 2020-04-30 12:24:44 +01:00
Nick Craig-Wood
74d9dabdff b2: force the case of the SHA1 to lowercase - fixes #4162
Apparently some tools (eg duplicati) upload the SHA1 in uppercase to
b2 to be stored in the `large_file_sha1` metadata. This patch forces
it to lower case.
2020-04-29 17:08:21 +01:00
Nick Craig-Wood
54edf38d0e Add Matan Rosenberg to contributors 2020-04-29 17:08:21 +01:00
Matan Rosenberg
22f06590f7 genautocomplete: add support for fish shell 2020-04-29 16:19:35 +01:00
Matan Rosenberg
3b4c24af4e vendor: update github.com/spf13/cobra to v1.0.0 2020-04-29 16:19:35 +01:00
Nick Craig-Wood
f37af9afec lsjson: Add --hash-type parameter and use it in lsf to speed up hashing
Before this change if you specified --hash MD5 in rclone lsf it would
calculate all the hashes and just return the MD5 hash which was very
slow on the local backend.

Likewise specifying --hash on rclone lsjson was equally slow.

This change introduces the --hash-type flag (and corresponding
internal parameter) so that the hashes required can be selected in
lsjson.

This is used internally in lsf when the --hash parameter is selected
to speed up the hashing by only hashing with the one hash specified.

Fixes #4181
2020-04-29 16:09:45 +01:00
Nick Craig-Wood
a3f0992a22 Add Kush to contributors 2020-04-29 16:09:45 +01:00
Kush
f555873f18 delete: added --rmdirs flag to delete directories as well - fixes #4055
If you supply the --rmdirs flag with delete command,
it will remove all empty directories along with it
leaving the root intact.
2020-04-29 12:15:30 +01:00
Nick Craig-Wood
1c8eab81a5 dbhashsum: hide the command now it is deprecated 2020-04-29 10:12:12 +01:00
Nick Craig-Wood
cbc5af329f cachestats: deprecate in favour of rclone backend stats cache: 2020-04-29 10:10:57 +01:00
Nick Craig-Wood
90d738b561 cache: implement rclone backend stats command 2020-04-29 10:10:57 +01:00
Nick Craig-Wood
d80fdad6da rc: implement backend/command for running backend commands remotely 2020-04-29 10:10:57 +01:00
Nick Craig-Wood
e2916f3a55 local: implement backend command "noop" for testing purposes 2020-04-29 10:10:57 +01:00
Nick Craig-Wood
1aa1a2c174 backend: add new backend command for backend specific commands
These commands are for implementing backend specific
functionality. They have documentation which is placed automatically
into the backend doc.

There is a simple test for the feature in the backend tests.
2020-04-29 10:10:57 +01:00
Nick Craig-Wood
195d152785 rc: add GetStructMissingOK 2020-04-29 09:42:31 +01:00
Nick Craig-Wood
1f61027f51 rc: add -o/--opt and -a/--arg for more structured input 2020-04-29 09:42:31 +01:00
Nick Craig-Wood
37a53570d4 azureblob: implement memory pooling to control memory use
This commit implements memory pooling to control excessive memory use
as was implemented in the s3 backend.
2020-04-28 17:47:10 +01:00
Nick Craig-Wood
ee7219aa20 azureblob: add --azureblob-disable-checksum flag 2020-04-28 17:47:10 +01:00
Nick Craig-Wood
b1d8da484b azureblob: retry InvalidBlobOrBlock error as it may indicate block concurrency problems
According to Microsoft support this error can be caused by

> A timing/concurrency issue where the PUT operations are happening
> about the same time for a single blob. The Put Block List operation
> writes a blob by specifying the list of block IDs that make up the
> blob. In order to be written as part of a blob, a block must have
> been successfully written to the server in a prior Put Block
> operation.
>
> Documentation reference:
>
> https://docs.microsoft.com/en-us/rest/api/storageservices/put-block
>
> This error can happen when doing concurrent upload commits after you
> have started the upload but before you commit. In that case, the
> upload fails. The application can retry this error or attempt some
> other recovery action based on the required scenario.

See: https://forum.rclone.org/t/error-while-syncing-with-azure-blob-storage-x-ms-error-code-invalidbloborblock/15561
2020-04-28 17:47:10 +01:00
Nick Craig-Wood
4e869e03f7 s3: improve docs for --s3-disable-checksum 2020-04-28 17:47:10 +01:00
Nick Craig-Wood
52c9647b06 b2: improve docs for --b2-disable-checksum 2020-04-28 17:47:10 +01:00
Nick Craig-Wood
7238ae18f9 Add Adam Stroud to contributors 2020-04-28 17:47:10 +01:00
Nick Craig-Wood
551a829eba googlephotos: don't put an image in error message - fixes #4144
For a certain class of broken or missing image Google Photos puts an
image in the error message.

Before this fix we blindly chucked it into the error message.

After this fix we replace it with some sensible text.
2020-04-28 16:51:47 +01:00
Adam Stroud
8e91f83174 googlecloudstorage: Add ARCHIVE storage class to help 2020-04-27 11:40:21 +01:00
buengese
7f776c64f0 fichier: implement custom pacer to deal with the new rate limiting 2020-04-26 20:38:56 +02:00
Nick Craig-Wood
8bf6ab2c52 accounting: fix race condition in tests 2020-04-24 12:32:09 +01:00
Nick Craig-Wood
75fc3fe389 fs: fix FixRangeOption so it doesn't add HTTPOptions in place of bad Ranges
Before this fix, FixRangeOption would substitute RangeOptions it
wanted to get rid of with with empty HTTPOption. This caused a problem
now that backends interpret HTTPOptions.

This fix subsitutes those with NullOptions which aren't interpreted as
HTTPOptions. This patch also improves the unit tests.
2020-04-24 12:32:09 +01:00
Xiaoxing Ye
c4572ebc91 rc: fix misplaced http server config - fixes #4130 2020-04-23 20:22:47 +01:00
David
e56976839a jwtutil: Fix error handling 2020-04-23 17:52:14 +01:00
David
0c0ed2fe04 box: Remove unnecessary iat from jws claims 2020-04-23 17:52:14 +01:00
Nick Craig-Wood
ab6ed256e5 putio: add support for --header-upload and --header-download #59 2020-04-23 15:55:52 +01:00
Nick Craig-Wood
7c98ecd3ab putio: make downloading files use the rclone http Client
This fixes `--download-header` and these transactions being missed from
`--dump bodies` or `--tpslimit`
2020-04-23 15:48:30 +01:00
Nick Craig-Wood
f6346a4d29 fs: add --header flag to add options to every HTTP transaction #59 2020-04-23 15:24:21 +01:00
Nick Craig-Wood
b502a74cff gcs: add support for --header-upload and --header-download #59 2020-04-23 11:41:57 +01:00
Nick Craig-Wood
8e9c25063a swift: add support for --header-upload and --header-download #59 2020-04-23 11:34:36 +01:00
Nick Craig-Wood
1dced3b3c4 rcat: add support for --header-upload #59 2020-04-23 11:34:31 +01:00
Nick Craig-Wood
087bf1d584 cat: add support for --header-download #59 2020-04-23 11:34:24 +01:00
Nick Craig-Wood
e051a34fc1 dbhashsum: deprecate: use rclone hashsum DropboxHash instead 2020-04-23 11:13:13 +01:00
Nick Craig-Wood
f5455d865b accounting: check for max transfer in WriteTo
Before this change the max transfer tests were failing for remotes
which were using WriterTo.
2020-04-23 11:13:13 +01:00
Tim Gallant
b705ead3fd docs: adds --header-download and --header-upload 2020-04-23 11:07:21 +01:00
Tim Gallant
c390fc8100 onedrive: pass options to rest.Opts for Put and Update 2020-04-23 11:07:21 +01:00
Tim Gallant
14f6ce1e77 premiumizeme: pass options to rest.Opts for Put and Update 2020-04-23 11:07:21 +01:00
Tim Gallant
385542e2f9 sharefile: pass options to rest.Opts for Put and Update 2020-04-23 11:07:21 +01:00
Tim Gallant
fc946d0c44 fichier: pass options to rest.Opts for uploadFile 2020-04-23 11:07:21 +01:00
Tim Gallant
854c84d0ca pcloud: pass options to rest.Opts for Put and Update 2020-04-23 11:07:21 +01:00
Tim Gallant
90bd0eb44c webdav: pass options to rest.Opts for Put and Update 2020-04-23 11:07:21 +01:00
Tim Gallant
3130f870bb sugarsync: pass options to rest.Opts for Put and Update 2020-04-23 11:07:21 +01:00
Tim Gallant
51b617f601 yandex: pass options to rest.Opts for upload 2020-04-23 11:07:21 +01:00
Tim Gallant
011ca244b2 jottacloud: pass options to rest.Opts for Put and Update 2020-04-23 11:07:21 +01:00
Tim Gallant
9ea1361044 googlephotos: pass options to rest.Opts for Put and Update 2020-04-23 11:07:21 +01:00
Tim Gallant
776966e22c opendrive: pass options to rest.Opts for Put and Update 2020-04-23 11:07:21 +01:00
Tim Gallant
01cb256b84 box: pass options to rest.Opts for uploadPart 2020-04-23 11:07:21 +01:00
Tim Gallant
0b0163dde2 box: pass options to rest.Opts for upload 2020-04-23 11:07:21 +01:00
Tim Gallant
38123c70eb b2: pass options to rest.Opts for Update 2020-04-23 11:07:21 +01:00
Tim Gallant
5cb7229a16 s3: add support for HTTPOption 2020-04-23 11:07:21 +01:00
Tim Gallant
9bf3d3da4c fs: add UploadHeaders, DownloadHeaders to Update/Put/Open options 2020-04-23 11:07:21 +01:00
Tim Gallant
93caa459e3 fs/config: add header-download and header-upload flags 2020-04-23 11:07:21 +01:00
Nick Craig-Wood
f8039deb7c s3: fix detection of BucketAlreadyOwnedByYou and BucketAlreadyExists error
This was being silently ignored until this commit

e2bf91452a s3: report errors on bucket creation (mkdir) correctly
2020-04-22 18:14:03 +01:00
Nick Craig-Wood
86eaf43b00 vfs: fix tests for Statfs when running on backends with unknowns
This was broken in da41db4712
2020-04-22 18:14:03 +01:00
Nick Craig-Wood
8176202e6d Add Sunil Patra to contributors 2020-04-22 18:14:03 +01:00
Nick Craig-Wood
1b74879b8b Add David Bramwell to contributors 2020-04-22 18:14:03 +01:00
Sunil Patra
39319b4858 @Sunil-P
box: Added support for interchangeable root folder for Box backend - #3422
2020-04-22 17:00:13 +01:00
Sunil Patra
4af5c9aed7 pCloud: Added support for interchangeable root folder for pCloud backend - #3957 2020-04-22 16:58:01 +01:00
David Bramwell
8a3c4c6a7b box: add token renew function for jwt auth - Fixes #4901 2020-04-22 16:53:03 +01:00
Nick Craig-Wood
d22e6f5a96 cryptcheck: remove duplicated debug OK message 2020-04-22 11:33:48 +01:00
Nick Craig-Wood
1648c1a0f3 crypt: calculate hashes for uploads from local disk
Before this change crypt would not calculate hashes for files it was
uploading. This is because, in the general case, they have to be
downloaded, encrypted and hashed which is too resource intensive.

However this causes backends which need the hash first before
uploading (eg s3/b2 when uploading chunked files) not to have a hash
of the file. This causes cryptcheck to complain about missing hashes
on large files uploaded via s3/b2.

This change calculates hashes for the upload if the upload is coming
from a local filesystem. It does this by encrypting and hashing the
local file re-using the code used by cryptcheck. For a local disk this
is not a lot more intensive than calculating the hash.

See: https://forum.rclone.org/t/strange-output-for-cryptcheck/15437
Fixes: #2809
2020-04-22 11:33:48 +01:00
Nick Craig-Wood
44b1a591a8 crypt: get rid of the unused Cipher interface as it obfuscated the code 2020-04-22 11:33:48 +01:00
Nick Craig-Wood
bbb6f94377 fstest: create AssertTimeEqualWithPrecision from CheckTimeEqualWithPrecision 2020-04-22 11:33:00 +01:00
Nick Craig-Wood
3f654dac37 mount: map more rclone errors into file systems errors
This improves the error reporting, in particular for
fs.ErrorPermissionDenied which was being reported as an IO error.
2020-04-21 16:31:43 +01:00
Nick Craig-Wood
eed9c5738d vfs: factor the vfs cache into its own package 2020-04-20 10:42:33 +01:00
Nick Craig-Wood
fd39cbc193 vfstests: move functional tests from mountlib and make them work with VFS
The tests are now run for the mount commands and for the plain VFS.

This makes the tests much easier to debug when running with a VFS than
through a mount.
2020-04-20 10:42:33 +01:00
Nick Craig-Wood
b25f5eb0d1 serve sftp: use VFS utility functions instead of own copy 2020-04-19 15:40:55 +01:00
Nick Craig-Wood
0961763082 vfs: add utility methods to match os package 2020-04-19 15:40:55 +01:00
Nick Craig-Wood
49e5299a95 asyncreader: make ErrorStreamAbandoned public 2020-04-19 15:18:49 +01:00
Nick Craig-Wood
07908f3f54 vfs: bring open_tests.go generator back into line with the generated tests
In

54deb01f00 vfs: Make OpenFile and friends return EINVAL if O_RDONLY and O_TRUNC

The generated file open_test.go was edited directly without editing
the generator.

This commit brings the generator make_open_tests.go back into line
with that edit. It also makes it so `go generate` can be used to
regenerate the tests.
2020-04-19 15:18:49 +01:00
Nick Craig-Wood
fdada79ebf accounting: support WriterTo for less memory copying
This should reduce memory copying when the async buffer is in use and
improve speeds.
2020-04-19 15:18:49 +01:00
Nick Craig-Wood
7f15cc9556 operations: make ReOpen and NewReOpen public for re-use elsewhere 2020-04-19 15:18:49 +01:00
Nick Craig-Wood
cd3c699f28 lib/readers: factor ErrorReader from multiple sources 2020-04-19 15:18:49 +01:00
Nick Craig-Wood
36d2c46bcf local: factor PreAllocate and SetSparse to lib/file 2020-04-19 15:18:49 +01:00
Nick Craig-Wood
1f50b70919 vfs: consistently use f.Path() or f._path() in File logs to avoid deadlocks
Previously we were using f which calls f.String() which calls f.Path()
which can cause a deadlock if uses carelessly.

This patch explicitly calls f.Path() or f._path() to bring attention
to the fact that there is a call to a method.
2020-04-19 15:16:43 +01:00
Nick Craig-Wood
19db0df639 vfs: stop reading Dir members from outside dir.go 2020-04-19 15:16:43 +01:00
Nick Craig-Wood
238f26cc90 vfs: stop reading File members from outside file.go
This also fixes locking for ReadFileHandle and WriteFileHandle
accessing File members
2020-04-19 15:16:43 +01:00
Nick Craig-Wood
268fcbb973 vfs: implement lock ordering between File and Dir to eliminate deadlocks
As part of this we take a copy of the directory path as calling
d.Path() violates the total locking order.

See the comment at the top of file.go for details
2020-04-19 15:16:43 +01:00
Nick Craig-Wood
1e4589db18 Add Martin Stone to contributors 2020-04-19 12:41:38 +01:00
Denis
31a1cc46b7 copyurl: add no-clobber flag - fixes #3950 2020-04-19 12:40:17 +01:00
Martin Stone
9d4b3580a5 docs: fix description of --quiet - fixes #4132 2020-04-16 18:12:17 +01:00
Nick Craig-Wood
b07bef2a6b Add Daven to contributors 2020-04-15 18:13:35 +01:00
Nick Craig-Wood
705e60d0ad Add Brandon Philips to contributors 2020-04-15 18:13:35 +01:00
Daven
4c258787b5 googlephotos: make the start year configurable - fixes #3630 2020-04-15 18:08:07 +01:00
Brandon Philips
58ea15078f Dockerfile: remove GOOS and GOARCH
GOOS and GOARCH being set like this makes it impossible to compile on
other archs.

For me GOARCH prevents compilation on my ARM machine.

For others GOOS will prevent windows.

xref https://github.com/rclone/rclone/issues/4086
2020-04-15 17:09:51 +01:00
Dan Dascalescu
756d47fb50 copy: fix typo 2020-04-15 17:08:44 +01:00
Jon Fautley
53874bd8ee cmd: add --error-on-no-transfer option
This allows rclone to exit with a non-zero return code if no files are
transferred. This is useful when calling rclone as part of a workflow/script
pipeline as it allows the end user to stop processing if no files have been
transferred.

NB: Enabling this option will return in rclone exiting non-zero if there are no
transfers. Depending on how your're currently using rclone in your scripts,
this may break existing setups!
2020-04-15 17:06:40 +01:00
Nick Craig-Wood
e2bf91452a s3: report errors on bucket creation (mkdir) correctly
Before this fix errors on bucket creation were being silently
swallowed.

See: https://forum.rclone.org/t/rclone-with-brand-new-aws-account-for-s3/15590
2020-04-15 13:13:13 +01:00
Nick Craig-Wood
9eb17e4ade fs: fix typo in error message 2020-04-15 12:50:26 +01:00
Nick Craig-Wood
2c4aadb588 Revert "install.sh: create ~/.config/rclone directory"
This reverts commit d694bb30e5.

If it creates a config directory then it leaves it owned by root which
is very confusing for new users.
2020-04-11 18:40:46 +01:00
Nick Craig-Wood
424554bc85 fs: generalise machinery for putting extra values when using --use-json-log 2020-04-11 18:16:21 +01:00
reddi
12a208a880 fs: expand stats output for json log 2020-04-11 18:16:21 +01:00
Michał Matczuk
6893ce0bbf s3: do not resize buf on put to memBuf
This is handled by Pool implementation.
2020-04-11 16:35:48 +01:00
Michał Matczuk
399cf18013 s3: use single memory pool
Previously we had a map of pools for different chunk sizes.
In practice the mapping is not very useful and requires a lock.
Pools of size other that ChunkSize can only happen when we have a huge file (over 10k * ChunkSize).
We need to have a bunch of identically sized huge files.
In such case most likely ChunkSize should be increased.

The mapping and its lock is replaced with a single initialised pool for ChunkSize, in other cases pool is allocated and freed on per file basis.
2020-04-11 16:34:05 +01:00
buengese
64b5105edd jottacloud: implement cleanup 2020-04-11 16:42:25 +02:00
buengese
2c2f4a6a05 jottacloud: implement --jottacloud-trashed-only 2020-04-11 16:42:25 +02:00
Nick Craig-Wood
da41db4712 vfs,mount,cmount: report 1PB free for unknown disk sizes
Factor the logic into the VFS layer so we don't have to duplicate it
into mount and cmount.

See: https://forum.rclone.org/t/rclone-mount-question/15454/
2020-04-11 13:31:10 +01:00
Nick Craig-Wood
9f3449d944 Add Michael G to contributors 2020-04-11 13:29:13 +01:00
Michael G
ec8a884787 doc: Clarify 'key' option for host key on serve sftp
The option --key would set the sftp host key. It could be mistaken for a default-user-key. Instead, explicitly call it 'host key' to avoid confusion.
2020-04-10 15:23:58 +01:00
Nick Craig-Wood
fc663d98d1 New email for Ankur Gupta in contributors 2020-04-03 11:51:08 +01:00
Ankur Gupta
08c2cb784f filter: Added --files-from-raw flag
--files-from parses input files by ignoring comments starting with # and ;
and stripping whitespace from start and end of strings.

The --files-from-raw flag was added that reads every line from the file ignoring
comment characters and not stripping whitespace while maintaining
backwards compatibility.

Fixes #3762
2020-04-03 10:36:24 +01:00
Nick Craig-Wood
3911a49256 vfs: make File lock and Dir lock not overlap to avoid deadlock
This was caused by this commit which wasn't part of 1.51.0

3c91abce74 vfs: fix race condition caused by unlocked reading of Dir.path
2020-04-02 21:14:45 +01:00
Nick Craig-Wood
2a62471e4c Add Jack Anderson to contributors 2020-03-31 18:17:36 +01:00
Jack Anderson
815ae7df45 backend/s3: add SSE-C support for AWS, Ceph, and MinIO 2020-03-31 18:16:45 +01:00
Nick Craig-Wood
ff0a299bfb drive: don't delete files with multiple parents to avoid data loss
Rclone can't safely delete files with multiple parents without
PATCHing the parents list. This can be done, but since multiple
parents are going away to be replaced by drive shortcuts we return an
error for now.

See #4013
2020-03-31 17:28:32 +01:00
Nick Craig-Wood
5fa6a28f70 dedupe: Stop dedupe deleting files with identical IDs #4013
Before this change if there were two files with the same name and the
same ID in the same directory, dedupe would delete one of them but
since these are actually the same file (with the same ID) then both
files would be deleted leading to data loss.

This should never actually happen, however it did happen as part of a
bug introduced in rclone which was fixed by

dfc7215bf9 drive: fix duplicate items when using --drive-shared-with-me #4018

This change checks to see if any of the duplicates have the same ID
and if they do it refuses to delete them.
2020-03-31 17:28:26 +01:00
Nick Craig-Wood
9a5178be7a test_all: optionally run Cleanup before cleaning a remote
Add this to the QingStor remote to stop it running out of buckets
2020-03-31 15:56:58 +01:00
Nick Craig-Wood
66e08e0cc8 mount: warn if --allow-non-empty used on Windows and clarify docs 2020-03-31 12:16:03 +01:00
Nick Craig-Wood
b5f78cd7b4 b2: ignore directory markers at the root also
See: https://forum.rclone.org/t/issue-with-lsf-r-files-only-first-line-is-blank/15229/
2020-03-31 11:46:17 +01:00
Nick Craig-Wood
ef99ca68aa gcs: ignore directory markers at the root also
See: https://forum.rclone.org/t/issue-with-lsf-r-files-only-first-line-is-blank/15229/
2020-03-31 11:46:10 +01:00
Nick Craig-Wood
a5c2f2c138 s3: ignore directory markers at the root also
See: https://forum.rclone.org/t/issue-with-lsf-r-files-only-first-line-is-blank/15229/
2020-03-31 11:45:52 +01:00
Nick Craig-Wood
b2c9ef23fa sync: make --track-renames tests only check rename count if expecting renames 2020-03-31 10:58:49 +01:00
Nick Craig-Wood
5f9be3dd05 sync: make --track-renames tests less fragile by using rename stat
Before this change these tests attempted to measure transfers and
checks in lieu of having a rename statistic with a very complicated
heuristic.

The change switches over to using the rename statistic which should be
100% reliable.
2020-03-30 18:30:33 +01:00
Nick Craig-Wood
b5f1bebc52 fs: add renames statistic for file and directory renames 2020-03-30 18:22:28 +01:00
Nick Craig-Wood
ad9c7ff7ed sync: Fix incorrect "nothing to transfer" message using --delete-before
Before this change the first pass of --delete-before would output
"There was nothing to transfer" and then proceed to transfer things.

This makes sure the message isn't printed in the delete phase.

See: https://forum.rclone.org/t/incorrect-debug-output/15267
2020-03-30 16:45:02 +01:00
Nick Craig-Wood
1af9fcbbfa Add Samantha McVey to contributors 2020-03-28 18:33:34 +00:00
Samantha McVey
6765303de4 docs: unmystify how crypt stores encryption password in config
Without explaining exactly how this is generated, it can be confusing
and worrying to not know how the password that encrypts your data is
stored.

This also brings peace of mind to the user that even though
the same password is obscured differently each time, all the data to
get back to the original password remains. Explaining how it works
is much better than the reader of the documentation having to trust
a blackboxy/magical mechanism.
2020-03-26 17:14:45 +00:00
Nick Craig-Wood
304ee97944 Add harry to contributors 2020-03-26 16:23:46 +00:00
harry
d91a547d59 dropbox: make error insufficient space to be fatal 2020-03-26 16:19:50 +00:00
harry
7d9ca3998e drive: Extend --drive-stop-on-upload-limit to respond to teamDriveFileLimitExceeded.
Fixes #3979
2020-03-26 16:19:50 +00:00
harry
9aa32bc269 onedrive: make error quotaLimitReached to be fatal - Fixes #4089 2020-03-26 16:19:50 +00:00
Nick Craig-Wood
d9c8c47e02 onedrive: add missing drive on config - fixes #4068
Before this change we queries /me/drives for a list of the users
drives and asked the user to choose. Sometimes this does not return
the users main drive for reasons unknown.

After this change we query /me/drives first then /me/drive and add
that to the list of drives if it wasn't already there.
2020-03-24 08:44:10 +00:00
Nick Craig-Wood
243a868a5b touch: add --localtime flag to make --timestamp localtime not UTC
Fixes #4067
2020-03-23 17:12:56 +00:00
Nick Craig-Wood
6c351c15f8 Add Mark Spieth to contributors 2020-03-23 17:12:56 +00:00
Mark Spieth
45b63e2d45 oauth: Use custom http client so that --no-check-certificate is honored by oauth token fetch
Fixes #4085
2020-03-22 12:28:19 +00:00
Nick Craig-Wood
32df634cb6 sync: fix --track-renames-strategy modtime test on remotes which don't support modtime 2020-03-22 11:52:40 +00:00
Nick Craig-Wood
e569977c06 Add @Max-Sum as the union backend maintainer 2020-03-21 18:16:42 +00:00
Nick Craig-Wood
b49ab9f9c7 Add Max Sum to contributors 2020-03-21 18:13:05 +00:00
Max Sum
78a9e7440a union: Implement multiple writable remotes 2020-03-21 18:11:24 +00:00
Nick Craig-Wood
93f5125f51 sync: fix --track-renames-strategy tests
This commit corrects the logic for --track-renames-strategy which
broke the integration tests.

It also improves the parsing of the argument and adds a test for that.
2020-03-21 17:39:51 +00:00
Nick Craig-Wood
410e17a2ec Add Elan Ruusamäe to contributors 2020-03-21 17:39:51 +00:00
Elan Ruusamäe
23f7943645 Dropbox: Add info about required redirect url 2020-03-20 14:51:53 +00:00
Nick Craig-Wood
1108895180 Add Bernd Schoolmann to contributors 2020-03-20 14:01:02 +00:00
Bernd Schoolmann
158870bcdb fs: Add --track-renames-strategy for configurable matching criteria for --track-renames
This commit adds the `--track-renames-strategy` flag which allows the
user to choose the strategy for tracking renames when using the
`--track-renames` flag.

This can be "hash" or "modtime" or both currently.

This, when used with `--track-renames-strategy modtime` enables
support for tracking renames in encrypted remotes.

Fixes #3696
Fixes #2721
2020-03-20 13:04:56 +00:00
Nick Craig-Wood
36717c7d98 serve restic: fix tests after restic project removed vendoring 2020-03-18 16:50:01 +00:00
Fernando
1d3987bbbd docs: Fix typo and reword contact.md 2020-03-18 14:21:58 +00:00
Nick Craig-Wood
472d4799d1 qingstor: make rclone cleanup remove pending multipart uploads older than 24h 2020-03-18 12:49:21 +00:00
Nick Craig-Wood
84caf1e158 qingstor: try harder to cancel failed multipart uploads 2020-03-18 12:49:21 +00:00
Nick Craig-Wood
72eba4dbb6 Add greatroar to contributors 2020-03-18 12:49:21 +00:00
greatroar
0f20f23651 cache: move methods used for testing into test file 2020-03-16 18:41:32 +00:00
Nick Craig-Wood
47e2d5c415 config: fsync the config file after writing #3411
This should help with data integrity
2020-03-16 18:20:16 +00:00
Nick Craig-Wood
1e9b8e043a Add fishbullet to contributors 2020-03-16 17:17:08 +00:00
fishbullet
eb0fc21533 fs: filter flags ability to read from stdin - fixes #4034 2020-03-16 17:16:50 +00:00
Lars Lehtonen
a6a2eec392 backend/b2: remove unused largeUpload.clearUploadURL() 2020-03-16 17:11:19 +00:00
Nick Craig-Wood
c227a90b52 sync: implement --order-by xxx,mixed 2020-03-16 15:50:04 +00:00
Nick Craig-Wood
1e3d899db8 sync: replace container/heap with github.com/aalpar/deheap 2020-03-16 15:50:04 +00:00
Nick Craig-Wood
84369286df vendor: add github.com/aalpar/deheap 2020-03-16 15:50:04 +00:00
Nick Craig-Wood
4c82b1f3c6 operations: fix --max-transfer test with jottacloud
Jottacloud was deduplicating the uploads, so make a different upload
each time
2020-03-16 14:05:49 +00:00
Nick Craig-Wood
f94257115f operations: skip part of the --max-transfer test under chunker
This test relies on there being 1 file copied and chunker copies several
2020-03-16 14:05:05 +00:00
Nick Craig-Wood
77e94be280 onedrive: implement --onedrive-server-side-across-configs - fixes #4058 2020-03-15 21:10:23 +00:00
Nick Craig-Wood
37d5e75a56 operations: fix --max-transfer test to have a higher threshold
Before this change backends which introduce overhead (eg crypt) were
failing to upload the first file.

This change increases the threshold to 2k to allow the first file to
go through even with some overhead but the next file to definitely
fail.
2020-03-15 11:13:27 +00:00
Nick Craig-Wood
dc06973796 s3: use rclone's low level retries instead of AWS SDK to fix listing retries
In 5470d34740 "backend/s3: use low-level-retries as the number
of SDK retries" we switched over to using the AWS SDK low level
retries instead of rclone's low level retry logic.

This had the unfortunate attempt that retrying listings to correct XML
Syntax errors failed on non S3 backends such as CEPH. The AWS SDK was
also retrying the XML Syntax error request which doesn't make sense.

This change turns off the AWS SDK retries in favour of just using
rclone's retry logic.
2020-03-14 18:04:24 +00:00
Nick Craig-Wood
b03462ab04 Add Patryk Jakuszew to contributors 2020-03-13 21:45:09 +00:00
Patryk Jakuszew
d4e87a841d fs/log: add support for syslog LOCAL facilities - fixes #4061 2020-03-13 21:44:52 +00:00
Nick Craig-Wood
6d0063d685 operations: Make --max-transfer more accurate
Before this change we checked the transfer was out of range only
before the Read call. This means that we returned all the data to the
reader before declaring an error. This means that some backends wrote
the file even though an error was returned.

This fix checks the transfer after the Read as well, and chops the
excess characters off the read data if we are over the limit so that
we don't ever deliver all the data.

This fixes the tests introduced as part of 6f1766dd9e and #2672
on backends other than local.
2020-03-13 16:40:38 +00:00
Nick Craig-Wood
6fdd7149c1 drive: don't overwrite the description on sever side copy
See: https://forum.rclone.org/t/is-there-a-way-to-sync-while-keeping-file-description-on-the-destination/14609
2020-03-12 10:39:00 +00:00
Harry
fdb07f2f89 onedrive: Added maximum chunk size limit warning in the docs
If chunk size is more than 250M (262,144,000 bytes) then API throws the following error:

Microsoft.SharePoint.Client.InvalidClientQueryException: The request message is too big. The server does not allow messages larger than 262144000 bytes.
2020-03-10 15:14:08 +00:00
Nick Craig-Wood
a433698b00 Add Joachim Brandon LeBlanc to contributors 2020-03-10 12:00:30 +00:00
Anuar Serdaliyev
f14871caf7 accounting: Correct exitcode on Transfer Limit Exceeded flag. Fixes #3203
Before this change the exit code for transfer limit exceeded was
incorrect. This was because the `resolveExitCode` function unwraps the
error thus reading the underlying error which is not the same as the
error it was comparing to (`ErrorMaxTransferLimitReached`).

This change fixes it by splitting the error definition in two so that
when the Fatal error is unwrapped we match against
`ErrorMaxTransferLimitReached` however when we return the error we
return `ErrorMaxTransferLimitReachedFatal`.
2020-03-10 12:00:10 +00:00
Joachim Brandon LeBlanc
132ce94139 backend/s3: use the provided size parameter when allocating a new memory pool - fixes #4047 (#4049) 2020-03-09 16:56:21 +00:00
Nick Craig-Wood
a492c0fb0e local: speed up multi thread downloads by using sparse files on Windows
Before this change rclone didn't use sparse files on Windows. This
means that when you downloaded a file with multithread download it
wrote the entire file with zeros first on the first write not at the
start of the file.

This change makes the file be sparse on Windows. Linux/macOS files
were already sparse.
2020-03-09 10:55:52 +00:00
Nick Craig-Wood
dfc7215bf9 drive: fix duplicate items when using --drive-shared-with-me #4018
Before this change shared with me items with multiple parents (ie most
of them that aren't in the root) would appear twice in the directory
listings.

This fixes the problem by doing an early exit for shared with me
items.
2020-03-07 16:46:53 +00:00
Nick Craig-Wood
38e59ebdf3 drive: fix missing files when using --fast-list and --drive-shared-with-me
This bug was introduced here by removing some necessary code detecting
shared with me items at the root with no parents.

4453fa4ba6 "drive: fix --fast-list when using appDataFolder"

This fix reverts that part of the patch.

Fixes #4018
2020-03-07 16:46:53 +00:00
Yves G
5ee24f804f webdav: report full and consistent usage with about
— allow either Used or Available to be ==0 (remote full or empty)
— compute Total if both values are received
2020-03-05 15:10:19 +00:00
Nick Craig-Wood
747edf42c1 azureblob: document container level SAS URL from root now needs container
In 8a0775ce3c which was released in v1.49.0 we inadvertently
stopped SAS URLs working from the root without a container name.

Previously to this change you could use `rclone mount azsas:` and it
would actually be equivalent to `rclone mount azsas:container`. After
this change, only `rclone mount azsas:container` will work, `rclone
mount azsas:` will have a directory in the root called "container".

After some discussion it was decided not to revert this change as the
current behaviour is more logical and in line with the similar
behaviour for the b2 backend.

Instead the documentation was updated to show exactly how container
level SAS URLs behave.

Fixes #4028
2020-03-05 14:56:36 +00:00
Nick Craig-Wood
ce23cb2093 Add evileye to contributors 2020-03-05 14:07:32 +00:00
evileye
6ff0bb825e mount: fix fail because of too long volume name - fixes #4026 2020-03-05 13:57:20 +00:00
Lars Lehtonen
fef2c6bf7a backend/s3: replace deprecated session.New() with session.NewSession() 2020-03-05 11:34:10 +00:00
Ishuah Kariuki
0c6f14c694 copy/sync: only create empty directories when they don't exist on the remote
Sync/copy now only creates empty directories when they don't exist on the remote (--create-empty-src-dirs flag) - fixes #2800
2020-03-03 16:24:22 +00:00
Nick Craig-Wood
1c800efbac Add Robert-André Mauchin to contributors 2020-03-03 12:41:08 +00:00
Robert-André Mauchin
e2e400e63c Use proper import path go.etcd.io/bbolt
Signed-off-by: Robert-André Mauchin <zebob.m@gmail.com>
2020-03-03 12:40:52 +00:00
Nick Craig-Wood
4d8d1e287b googlephotos: fix "concurrent map write" error - fixes #4003
This adds a bit of missed locking around the uploaded info to fix the
concurrent map write.

All the other accesses have locking - this one must have got missed.
2020-03-02 18:12:46 +00:00
Nick Craig-Wood
452fdbf1c1 Add Franklyn Tackitt to contributors 2020-03-02 17:31:23 +00:00
Nick Craig-Wood
51686bd1ef Add Shing Kit Chan to contributors 2020-03-02 17:31:23 +00:00
Gary Kim
38a4d50e73 rcd: Add Prometheus metrics support - fixes #3858
Signed-off-by: Gary Kim <gary@garykim.dev>
2020-03-01 09:58:34 +00:00
Gary Kim
3fd38cbe8d vendor: add github.com/prometheus/client_golang/prometheus
Signed-off-by: Gary Kim <gary@garykim.dev>
2020-03-01 09:58:34 +00:00
Franklyn Tackitt
2b3d13a841 fs: Use --cutoff-mode hard,soft,catious instead of 3 --max-transfer-mode flags
Fixes #2672
2020-03-01 09:49:55 +00:00
Shing Kit Chan
6f1766dd9e fs: Add support for --max-transfer-cutoff modes #2672
This also adds max transfer cut off check for server side copies too
2020-03-01 09:49:55 +00:00
Nick Craig-Wood
7d70eb0346 ftp: attempt to work-around pureftp sending spurious 150 messages
pureftpd has a bug where it sends messages like this

```
    150-Accepted data connection\r\n
        Response code: File status okay; about to open data connection (150)
        Response arg: Accepted data connection
    150 32768.0 kbytes to download\r\n
    150 0.014 seconds (measured here), 1665.27 Mbytes per second\r\n
```

The last `150` is treated as a new response - the previous `150` should have been `150-`.

This means that rclone sees the `150 0.014 seconds (measured here),
1665.27 Mbytes per second` as a reply to the next message and reports
it as an error.

This fix ignores that specific message when it is received in the
`Close` method. It dumps the FTP connection after as it is out of
sync.

See: #3984
Fixes #3445
2020-03-01 09:17:51 +00:00
Nick Craig-Wood
bae2644667 Add Yves G to contributors 2020-03-01 09:17:51 +00:00
Valeriy.Vyrva
f6f95822c1 doc: fix links in generated documentation 2020-03-01 09:14:37 +00:00
Yves G
b1b5e09081 vfs: make df output more consistent on a rclone mount.
When 2 values are known among vfs:{free,used,total}, compute the 3rd
2020-03-01 08:54:07 +00:00
979 changed files with 131638 additions and 1983 deletions

View File

@@ -5,7 +5,7 @@ WORKDIR /go/src/github.com/rclone/rclone/
RUN make quicktest
RUN \
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 \
CGO_ENABLED=0 \
make
RUN ./rclone version

View File

@@ -2,17 +2,20 @@
Current active maintainers of rclone are:
| Name | GitHub ID | Specific Responsibilities |
| :--------------- | :---------- | :-------------------------- |
| Nick Craig-Wood | @ncw | overall project health |
| Stefan Breunig | @breunigs | |
| Ishuah Kariuki | @ishuah | |
| Remus Bunduc | @remusb | cache backend |
| Fabian Möller | @B4dM4n | |
| Alex Chen | @Cnly | onedrive backend |
| Sandeep Ummadi | @sandeepkru | azureblob backend |
| Sebastian Bünger | @buengese | jottacloud & yandex backends |
| Ivan Andreev | @ivandeex | chunker & mailru backends |
| Name | GitHub ID | Specific Responsibilities |
| :--------------- | :---------------- | :-------------------------- |
| Nick Craig-Wood | @ncw | overall project health |
| Stefan Breunig | @breunigs | |
| Ishuah Kariuki | @ishuah | |
| Remus Bunduc | @remusb | cache backend |
| Fabian Möller | @B4dM4n | |
| Alex Chen | @Cnly | onedrive backend |
| Sandeep Ummadi | @sandeepkru | azureblob backend |
| Sebastian Bünger | @buengese | jottacloud & yandex backends |
| Ivan Andreev | @ivandeex | chunker & mailru backends |
| Max Sum | @Max-Sum | union backend |
| Fred | @creativeprojects | seafile backend |
| Caleb Case | @calebcase | tardigrade backend |
**This is a work in progress Draft**

View File

@@ -59,9 +59,11 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and
* QingStor [:page_facing_up:](https://rclone.org/qingstor/)
* Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
* Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)
* Seafile [:page_facing_up:](https://rclone.org/seafile/)
* SFTP [:page_facing_up:](https://rclone.org/sftp/)
* StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
* SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)
* Tardigrade [:page_facing_up:](https://rclone.org/tardigrade/)
* Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi)
* WebDAV [:page_facing_up:](https://rclone.org/webdav/)
* Yandex Disk [:page_facing_up:](https://rclone.org/yandex/)

View File

@@ -31,10 +31,12 @@ import (
_ "github.com/rclone/rclone/backend/putio"
_ "github.com/rclone/rclone/backend/qingstor"
_ "github.com/rclone/rclone/backend/s3"
_ "github.com/rclone/rclone/backend/seafile"
_ "github.com/rclone/rclone/backend/sftp"
_ "github.com/rclone/rclone/backend/sharefile"
_ "github.com/rclone/rclone/backend/sugarsync"
_ "github.com/rclone/rclone/backend/swift"
_ "github.com/rclone/rclone/backend/tardigrade"
_ "github.com/rclone/rclone/backend/union"
_ "github.com/rclone/rclone/backend/webdav"
_ "github.com/rclone/rclone/backend/yandex"

View File

@@ -9,7 +9,6 @@ import (
"context"
"crypto/md5"
"encoding/base64"
"encoding/binary"
"encoding/hex"
"fmt"
"io"
@@ -35,6 +34,9 @@ import (
"github.com/rclone/rclone/lib/bucket"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/pool"
"github.com/rclone/rclone/lib/readers"
"golang.org/x/sync/errgroup"
)
const (
@@ -59,6 +61,8 @@ const (
emulatorAccount = "devstoreaccount1"
emulatorAccountKey = "Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw=="
emulatorBlobEndpoint = "http://127.0.0.1:10000/devstoreaccount1"
memoryPoolFlushTime = fs.Duration(time.Minute) // flush the cached buffers after this long
memoryPoolUseMmap = false
)
// Register with Fs
@@ -125,6 +129,28 @@ If blobs are in "archive tier" at remote, trying to perform data transfer
operations from remote will not be allowed. User should first restore by
tiering blob to "Hot" or "Cool".`,
Advanced: true,
}, {
Name: "disable_checksum",
Help: `Don't store MD5 checksum with object metadata.
Normally rclone will calculate the MD5 checksum of the input before
uploading it so it can add it to metadata on the object. This is great
for data integrity checking but can cause long delays for large files
to start uploading.`,
Default: false,
Advanced: true,
}, {
Name: "memory_pool_flush_time",
Default: memoryPoolFlushTime,
Advanced: true,
Help: `How often internal memory buffer pools will be flushed.
Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations.
This option controls how often unused buffers will be removed from the pool.`,
}, {
Name: "memory_pool_use_mmap",
Default: memoryPoolUseMmap,
Advanced: true,
Help: `Whether to use mmap buffers in internal memory pool.`,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
@@ -141,16 +167,19 @@ tiering blob to "Hot" or "Cool".`,
// Options defines the configuration for this backend
type Options struct {
Account string `config:"account"`
Key string `config:"key"`
Endpoint string `config:"endpoint"`
SASURL string `config:"sas_url"`
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
ChunkSize fs.SizeSuffix `config:"chunk_size"`
ListChunkSize uint `config:"list_chunk"`
AccessTier string `config:"access_tier"`
UseEmulator bool `config:"use_emulator"`
Enc encoder.MultiEncoder `config:"encoding"`
Account string `config:"account"`
Key string `config:"key"`
Endpoint string `config:"endpoint"`
SASURL string `config:"sas_url"`
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
ChunkSize fs.SizeSuffix `config:"chunk_size"`
ListChunkSize uint `config:"list_chunk"`
AccessTier string `config:"access_tier"`
UseEmulator bool `config:"use_emulator"`
DisableCheckSum bool `config:"disable_checksum"`
MemoryPoolFlushTime fs.Duration `config:"memory_pool_flush_time"`
MemoryPoolUseMmap bool `config:"memory_pool_use_mmap"`
Enc encoder.MultiEncoder `config:"encoding"`
}
// Fs represents a remote azure server
@@ -169,6 +198,7 @@ type Fs struct {
cache *bucket.Cache // cache for container creation status
pacer *fs.Pacer // To pace and retry the API calls
uploadToken *pacer.TokenDispenser // control concurrency
pool *pool.Pool // memory pool
}
// Object describes a azure object
@@ -257,6 +287,12 @@ var retryErrorCodes = []int{
func (f *Fs) shouldRetry(err error) (bool, error) {
// FIXME interpret special errors - more to do here
if storageErr, ok := err.(azblob.StorageError); ok {
switch storageErr.ServiceCode() {
case "InvalidBlobOrBlock":
// These errors happen sometimes in multipart uploads
// because of block concurrency issues
return true, err
}
statusCode := storageErr.Response().StatusCode
for _, e := range retryErrorCodes {
if statusCode == e {
@@ -382,6 +418,12 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
client: fshttp.NewClient(fs.Config),
cache: bucket.NewCache(),
cntURLcache: make(map[string]*azblob.ContainerURL, 1),
pool: pool.New(
time.Duration(opt.MemoryPoolFlushTime),
int(opt.ChunkSize),
fs.Config.Transfers,
opt.MemoryPoolUseMmap,
),
}
f.setRoot(root)
f.features = (&fs.Features{
@@ -816,6 +858,11 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
return fs, fs.Update(ctx, in, src, options...)
}
// PutStream uploads to the remote path with the modTime given of indeterminate size
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return f.Put(ctx, in, src, options...)
}
// Mkdir creates the container if it doesn't exist
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
container, _ := f.split(dir)
@@ -825,6 +872,10 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
// makeContainer creates the container if it doesn't exist
func (f *Fs) makeContainer(ctx context.Context, container string) error {
return f.cache.Create(container, func() error {
// If this is a SAS URL limited to a container then assume it is already created
if f.isLimited {
return nil
}
// now try to create the container
return f.pacer.Call(func() (bool, error) {
_, err := f.cntURL(container).Create(ctx, azblob.Metadata{}, azblob.PublicAccessNone)
@@ -978,6 +1029,19 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
return f.NewObject(ctx, remote)
}
func (f *Fs) getMemoryPool(size int64) *pool.Pool {
if size == int64(f.opt.ChunkSize) {
return f.pool
}
return pool.New(
time.Duration(f.opt.MemoryPoolFlushTime),
int(size),
fs.Config.Transfers,
f.opt.MemoryPoolUseMmap,
)
}
// ------------------------------------------------------------
// Fs returns the parent Fs
@@ -1221,103 +1285,116 @@ type readSeeker struct {
io.Seeker
}
// increment the slice passed in as LSB binary
func increment(xs []byte) {
for i, digit := range xs {
newDigit := digit + 1
xs[i] = newDigit
if newDigit >= digit {
// exit if no carry
break
}
}
}
var warnStreamUpload sync.Once
// uploadMultipart uploads a file using multipart upload
//
// Write a larger blob, using CreateBlockBlob, PutBlock, and PutBlockList.
func (o *Object) uploadMultipart(in io.Reader, size int64, blob *azblob.BlobURL, httpHeaders *azblob.BlobHTTPHeaders) (err error) {
func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, size int64, blob *azblob.BlobURL, httpHeaders *azblob.BlobHTTPHeaders) (err error) {
// Calculate correct chunkSize
chunkSize := int64(o.fs.opt.ChunkSize)
var totalParts int64
for {
// Calculate number of parts
var remainder int64
totalParts, remainder = size/chunkSize, size%chunkSize
if remainder != 0 {
totalParts++
totalParts := -1
// Note that the max size of file is 4.75 TB (100 MB X 50,000
// blocks) and this is bigger than the max uncommitted block
// size (9.52 TB) so we do not need to part commit block lists
// or garbage collect uncommitted blocks.
//
// See: https://docs.microsoft.com/en-gb/rest/api/storageservices/put-block
// size can be -1 here meaning we don't know the size of the incoming file. We use ChunkSize
// buffers here (default 4MB). With a maximum number of parts (50,000) this will be a file of
// 195GB which seems like a not too unreasonable limit.
if size == -1 {
warnStreamUpload.Do(func() {
fs.Logf(o, "Streaming uploads using chunk size %v will have maximum file size of %v",
o.fs.opt.ChunkSize, fs.SizeSuffix(chunkSize*maxTotalParts))
})
} else {
// Adjust partSize until the number of parts is small enough.
if size/chunkSize >= maxTotalParts {
// Calculate partition size rounded up to the nearest MB
chunkSize = (((size / maxTotalParts) >> 20) + 1) << 20
}
if totalParts < maxTotalParts {
break
}
// Double chunk size if the number of parts is too big
chunkSize *= 2
if chunkSize > int64(maxChunkSize) {
return errors.Errorf("can't upload as it is too big %v - takes more than %d chunks of %v", fs.SizeSuffix(size), totalParts, fs.SizeSuffix(chunkSize/2))
}
totalParts = int(size / chunkSize)
if size%chunkSize != 0 {
totalParts++
}
}
fs.Debugf(o, "Multipart upload session started for %d parts of size %v", totalParts, fs.SizeSuffix(chunkSize))
// https://godoc.org/github.com/Azure/azure-storage-blob-go/2017-07-29/azblob#example-BlockBlobURL
// Utilities are cloned from above example
// These helper functions convert a binary block ID to a base-64 string and vice versa
// NOTE: The blockID must be <= 64 bytes and ALL blockIDs for the block must be the same length
blockIDBinaryToBase64 := func(blockID []byte) string { return base64.StdEncoding.EncodeToString(blockID) }
// These helper functions convert an int block ID to a base-64 string and vice versa
blockIDIntToBase64 := func(blockID uint64) string {
binaryBlockID := (&[8]byte{})[:] // All block IDs are 8 bytes long
binary.LittleEndian.PutUint64(binaryBlockID, blockID)
return blockIDBinaryToBase64(binaryBlockID)
}
// block ID variables
var (
rawID uint64
blockID = "" // id in base64 encoded form
blocks []string
)
// increment the blockID
nextID := func() {
rawID++
blockID = blockIDIntToBase64(rawID)
blocks = append(blocks, blockID)
}
// Get BlockBlobURL, we will use default pipeline here
blockBlobURL := blob.ToBlockBlobURL()
ctx := context.Background()
ac := azblob.LeaseAccessConditions{} // Use default lease access conditions
// unwrap the accounting from the input, we use wrap to put it
// back on after the buffering
in, wrap := accounting.UnWrap(in)
// Upload the chunks
remaining := size
position := int64(0)
errs := make(chan error, 1)
var wg sync.WaitGroup
outer:
for part := 0; part < int(totalParts); part++ {
// Check any errors
select {
case err = <-errs:
break outer
default:
var (
g, gCtx = errgroup.WithContext(ctx)
remaining = size // remaining size in file for logging only, -1 if size < 0
position = int64(0) // position in file
memPool = o.fs.getMemoryPool(chunkSize) // pool to get memory from
finished = false // set when we have read EOF
blocks []string // list of blocks for finalize
blockBlobURL = blob.ToBlockBlobURL() // Get BlockBlobURL, we will use default pipeline here
ac = azblob.LeaseAccessConditions{} // Use default lease access conditions
binaryBlockID = make([]byte, 8) // block counter as LSB first 8 bytes
)
for part := 0; !finished; part++ {
// Get a block of memory from the pool and a token which limits concurrency
o.fs.uploadToken.Get()
buf := memPool.Get()
free := func() {
memPool.Put(buf) // return the buf
o.fs.uploadToken.Put() // return the token
}
reqSize := remaining
if reqSize >= chunkSize {
reqSize = chunkSize
// Fail fast, in case an errgroup managed function returns an error
// gCtx is cancelled. There is no point in uploading all the other parts.
if gCtx.Err() != nil {
free()
break
}
// Make a block of memory
buf := make([]byte, reqSize)
// Read the chunk
_, err = io.ReadFull(in, buf)
if err != nil {
err = errors.Wrap(err, "multipart upload failed to read source")
break outer
n, err := readers.ReadFill(in, buf) // this can never return 0, nil
if err == io.EOF {
if n == 0 { // end if no data
free()
break
}
finished = true
} else if err != nil {
free()
return errors.Wrap(err, "multipart upload failed to read source")
}
buf = buf[:n]
// increment the blockID and save the blocks for finalize
increment(binaryBlockID)
blockID := base64.StdEncoding.EncodeToString(binaryBlockID)
blocks = append(blocks, blockID)
// Transfer the chunk
nextID()
wg.Add(1)
o.fs.uploadToken.Get()
go func(part int, position int64, blockID string) {
defer wg.Done()
defer o.fs.uploadToken.Put()
fs.Debugf(o, "Uploading part %d/%d offset %v/%v part size %v", part+1, totalParts, fs.SizeSuffix(position), fs.SizeSuffix(size), fs.SizeSuffix(chunkSize))
fs.Debugf(o, "Uploading part %d/%d offset %v/%v part size %v", part+1, totalParts, fs.SizeSuffix(position), fs.SizeSuffix(size), fs.SizeSuffix(chunkSize))
g.Go(func() (err error) {
defer free()
// Upload the block, with MD5 for check
md5sum := md5.Sum(buf)
@@ -1329,28 +1406,19 @@ outer:
_, err = blockBlobURL.StageBlock(ctx, blockID, &rs, ac, transactionalMD5)
return o.fs.shouldRetry(err)
})
if err != nil {
err = errors.Wrap(err, "multipart upload failed to upload part")
select {
case errs <- err:
default:
}
return
return errors.Wrap(err, "multipart upload failed to upload part")
}
}(part, position, blockID)
return nil
})
// ready for next block
remaining -= chunkSize
if size >= 0 {
remaining -= chunkSize
}
position += chunkSize
}
wg.Wait()
if err == nil {
select {
case err = <-errs:
default:
}
}
err = g.Wait()
if err != nil {
return err
}
@@ -1389,12 +1457,14 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// will be set in PutBlockList API call using the 'x-ms-blob-content-md5' header
// Note: If multipart, a MD5 checksum will also be computed for each uploaded block
// in order to validate its integrity during transport
if sourceMD5, _ := src.Hash(ctx, hash.MD5); sourceMD5 != "" {
sourceMD5bytes, err := hex.DecodeString(sourceMD5)
if err == nil {
httpHeaders.ContentMD5 = sourceMD5bytes
} else {
fs.Debugf(o, "Failed to decode %q as MD5: %v", sourceMD5, err)
if !o.fs.opt.DisableCheckSum {
if sourceMD5, _ := src.Hash(ctx, hash.MD5); sourceMD5 != "" {
sourceMD5bytes, err := hex.DecodeString(sourceMD5)
if err == nil {
httpHeaders.ContentMD5 = sourceMD5bytes
} else {
fs.Debugf(o, "Failed to decode %q as MD5: %v", sourceMD5, err)
}
}
}
@@ -1408,7 +1478,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// is merged the SDK can't upload a single blob of exactly the chunk
// size, so upload with a multpart upload to work around.
// See: https://github.com/rclone/rclone/issues/2653
multipartUpload := size >= int64(o.fs.opt.UploadCutoff)
multipartUpload := size < 0 || size >= int64(o.fs.opt.UploadCutoff)
if size == int64(o.fs.opt.ChunkSize) {
multipartUpload = true
fs.Debugf(o, "Setting multipart upload for file of chunk size (%d) to work around SDK bug", size)
@@ -1418,7 +1488,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
if multipartUpload {
// If a large file upload in chunks
err = o.uploadMultipart(in, size, &blob, &httpHeaders)
err = o.uploadMultipart(ctx, in, size, &blob, &httpHeaders)
} else {
// Write a small blob in one transaction
blockBlobURL := blob.ToBlockBlobURL()
@@ -1503,12 +1573,13 @@ func (o *Object) GetTier() string {
// Check the interfaces are satisfied
var (
_ fs.Fs = &Fs{}
_ fs.Copier = &Fs{}
_ fs.Purger = &Fs{}
_ fs.ListRer = &Fs{}
_ fs.Object = &Object{}
_ fs.MimeTyper = &Object{}
_ fs.GetTierer = &Object{}
_ fs.SetTierer = &Object{}
_ fs.Fs = &Fs{}
_ fs.Copier = &Fs{}
_ fs.PutStreamer = &Fs{}
_ fs.Purger = &Fs{}
_ fs.ListRer = &Fs{}
_ fs.Object = &Object{}
_ fs.MimeTyper = &Object{}
_ fs.GetTierer = &Object{}
_ fs.SetTierer = &Object{}
)

View File

@@ -16,3 +16,20 @@ func (f *Fs) InternalTest(t *testing.T) {
enabled = f.Features().GetTier
assert.True(t, enabled)
}
func TestIncrement(t *testing.T) {
for _, test := range []struct {
in []byte
want []byte
}{
{[]byte{0, 0, 0, 0}, []byte{1, 0, 0, 0}},
{[]byte{0xFE, 0, 0, 0}, []byte{0xFF, 0, 0, 0}},
{[]byte{0xFF, 0, 0, 0}, []byte{0, 1, 0, 0}},
{[]byte{0, 1, 0, 0}, []byte{1, 1, 0, 0}},
{[]byte{0xFF, 0xFF, 0xFF, 0xFE}, []byte{0, 0, 0, 0xFF}},
{[]byte{0xFF, 0xFF, 0xFF, 0xFF}, []byte{0, 0, 0, 0}},
} {
increment(test.in)
assert.Equal(t, test.want, test.in)
}
}

View File

@@ -124,8 +124,13 @@ minimum size.`,
Default: defaultChunkSize,
Advanced: true,
}, {
Name: "disable_checksum",
Help: `Disable checksums for large (> upload cutoff) files`,
Name: "disable_checksum",
Help: `Disable checksums for large (> upload cutoff) files
Normally rclone will calculate the SHA1 checksum of the input before
uploading it so it can add it to metadata on the object. This is great
for data integrity checking but can cause long delays for large files
to start uploading.`,
Default: false,
Advanced: true,
}, {
@@ -668,7 +673,7 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
}
remote := file.Name[len(prefix):]
// Check for directory
isDirectory := strings.HasSuffix(remote, "/")
isDirectory := remote == "" || strings.HasSuffix(remote, "/")
if isDirectory {
remote = remote[:len(remote)-1]
}
@@ -1370,6 +1375,21 @@ func (o *Object) Size() int64 {
return o.size
}
// Clean the SHA1
//
// Make sure it is lower case
//
// Remove unverified prefix - see https://www.backblaze.com/b2/docs/uploading.html
// Some tools (eg Cyberduck) use this
func cleanSHA1(sha1 string) (out string) {
out = strings.ToLower(sha1)
const unverified = "unverified:"
if strings.HasPrefix(out, unverified) {
out = out[len(unverified):]
}
return out
}
// decodeMetaDataRaw sets the metadata from the data passed in
//
// Sets
@@ -1385,12 +1405,7 @@ func (o *Object) decodeMetaDataRaw(ID, SHA1 string, Size int64, UploadTimestamp
if o.sha1 == "" || o.sha1 == "none" {
o.sha1 = Info[sha1Key]
}
// Remove unverified prefix - see https://www.backblaze.com/b2/docs/uploading.html
// Some tools (eg Cyberduck) use this
const unverified = "unverified:"
if strings.HasPrefix(o.sha1, unverified) {
o.sha1 = o.sha1[len(unverified):]
}
o.sha1 = cleanSHA1(o.sha1)
o.size = Size
// Use the UploadTimestamp if can't get file info
o.modTime = time.Time(UploadTimestamp)
@@ -1648,6 +1663,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
o.sha1 = resp.Header.Get(sha1InfoHeader)
fs.Debugf(o, "Reading sha1 from info - %q", o.sha1)
}
o.sha1 = cleanSHA1(o.sha1)
}
// Don't check length or hash on partial content
if resp.StatusCode == http.StatusPartialContent {
@@ -1816,6 +1832,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
Method: "POST",
RootURL: upload.UploadURL,
Body: in,
Options: options,
ExtraHeaders: map[string]string{
"Authorization": upload.AuthorizationToken,
"X-Bz-File-Name": urlEncode(o.fs.opt.Enc.FromStandardPath(bucketPath)),

View File

@@ -184,13 +184,6 @@ func (up *largeUpload) returnUploadURL(upload *api.GetUploadPartURLResponse) {
up.uploadMu.Unlock()
}
// clearUploadURL clears the current UploadURL and the AuthorizationToken
func (up *largeUpload) clearUploadURL() {
up.uploadMu.Lock()
up.uploads = nil
up.uploadMu.Unlock()
}
// Transfer a chunk
func (up *largeUpload) transferChunk(ctx context.Context, part int64, body []byte) error {
err := up.f.pacer.Call(func() (bool, error) {

View File

@@ -53,8 +53,7 @@ const (
rcloneEncryptedClientSecret = "sYbJYm99WB8jzeaLPU0OPDMJKIkZvD2qOn3SyEMfiJr03RdtDt3xcZEIudRhbIDL"
minSleep = 10 * time.Millisecond
maxSleep = 2 * time.Second
decayConstant = 2 // bigger for slower decay, exponential
rootID = "0" // ID of root folder is always this
decayConstant = 2 // bigger for slower decay, exponential
rootURL = "https://api.box.com/2.0"
uploadURL = "https://upload.box.com/api/2.0"
listChunks = 1000 // chunk size to read directory listings
@@ -89,22 +88,7 @@ func init() {
boxSubType, boxSubTypeOk := m.Get("box_sub_type")
var err error
if ok && boxSubTypeOk && jsonFile != "" && boxSubType != "" {
boxConfig, err := getBoxConfig(jsonFile)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
privateKey, err := getDecryptedPrivateKey(boxConfig)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
claims, err := getClaims(boxConfig, boxSubType)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
signingHeaders := getSigningHeaders(boxConfig)
queryParams := getQueryParams(boxConfig)
client := fshttp.NewClient(fs.Config)
err = jwtutil.Config("box", name, claims, signingHeaders, queryParams, privateKey, m, client)
err = refreshJWTToken(jsonFile, boxSubType, name, m)
if err != nil {
log.Fatalf("Failed to configure token with jwt authentication: %v", err)
}
@@ -121,6 +105,11 @@ func init() {
}, {
Name: config.ConfigClientSecret,
Help: "Box App Client Secret\nLeave blank normally.",
}, {
Name: "root_folder_id",
Help: "Fill in for rclone to use a non root folder as its starting point.",
Default: "0",
Advanced: true,
}, {
Name: "box_config_file",
Help: "Box App config.json location\nLeave blank normally.",
@@ -163,6 +152,26 @@ func init() {
})
}
func refreshJWTToken(jsonFile string, boxSubType string, name string, m configmap.Mapper) error {
boxConfig, err := getBoxConfig(jsonFile)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
privateKey, err := getDecryptedPrivateKey(boxConfig)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
claims, err := getClaims(boxConfig, boxSubType)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
signingHeaders := getSigningHeaders(boxConfig)
queryParams := getQueryParams(boxConfig)
client := fshttp.NewClient(fs.Config)
err = jwtutil.Config("box", name, claims, signingHeaders, queryParams, privateKey, m, client)
return err
}
func getBoxConfig(configFile string) (boxConfig *api.ConfigJSON, err error) {
file, err := ioutil.ReadFile(configFile)
if err != nil {
@@ -185,7 +194,6 @@ func getClaims(boxConfig *api.ConfigJSON, boxSubType string) (claims *jws.ClaimS
Iss: boxConfig.BoxAppSettings.ClientID,
Sub: boxConfig.EnterpriseID,
Aud: tokenURL,
Iat: time.Now().Unix(),
Exp: time.Now().Add(time.Second * 45).Unix(),
PrivateClaims: map[string]interface{}{
"box_sub_type": boxSubType,
@@ -236,6 +244,7 @@ type Options struct {
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
CommitRetries int `config:"commit_retries"`
Enc encoder.MultiEncoder `config:"encoding"`
RootFolderID string `config:"root_folder_id"`
}
// Fs represents a remote box
@@ -393,13 +402,27 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
}).Fill(f)
f.srv.SetErrorHandler(errorHandler)
// Renew the token in the background
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
_, err := f.readMetaDataForPath(ctx, "")
return err
})
jsonFile, ok := m.Get("box_config_file")
boxSubType, boxSubTypeOk := m.Get("box_sub_type")
// Get rootID
// If using box config.json and JWT, renewing should just refresh the token and
// should do so whether there are uploads pending or not.
if ok && boxSubTypeOk && jsonFile != "" && boxSubType != "" {
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
err := refreshJWTToken(jsonFile, boxSubType, name, m)
return err
})
f.tokenRenewer.Start()
} else {
// Renew the token in the background
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
_, err := f.readMetaDataForPath(ctx, "")
return err
})
}
// Get rootFolderID
rootID := f.opt.RootFolderID
f.dirCache = dircache.New(root, rootID, f)
// Find the current root
@@ -1167,7 +1190,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
// upload does a single non-multipart upload
//
// This is recommended for less than 50 MB of content
func (o *Object) upload(ctx context.Context, in io.Reader, leaf, directoryID string, modTime time.Time) (err error) {
func (o *Object) upload(ctx context.Context, in io.Reader, leaf, directoryID string, modTime time.Time, options ...fs.OpenOption) (err error) {
upload := api.UploadFile{
Name: o.fs.opt.Enc.FromStandardName(leaf),
ContentModifiedAt: api.Time(modTime),
@@ -1186,6 +1209,7 @@ func (o *Object) upload(ctx context.Context, in io.Reader, leaf, directoryID str
MultipartContentName: "contents",
MultipartFileName: upload.Name,
RootURL: uploadURL,
Options: options,
}
// If object has an ID then it is existing so create a new version
if o.id != "" {
@@ -1227,9 +1251,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// Upload with simple or multipart
if size <= int64(o.fs.opt.UploadCutoff) {
err = o.upload(ctx, in, leaf, directoryID, modTime)
err = o.upload(ctx, in, leaf, directoryID, modTime, options...)
} else {
err = o.uploadMultipart(ctx, in, leaf, directoryID, size, modTime)
err = o.uploadMultipart(ctx, in, leaf, directoryID, size, modTime, options...)
}
return err
}

View File

@@ -54,7 +54,7 @@ func sha1Digest(digest []byte) string {
}
// uploadPart uploads a part in an upload session
func (o *Object) uploadPart(ctx context.Context, SessionID string, offset, totalSize int64, chunk []byte, wrap accounting.WrapFn) (response *api.UploadPartResponse, err error) {
func (o *Object) uploadPart(ctx context.Context, SessionID string, offset, totalSize int64, chunk []byte, wrap accounting.WrapFn, options ...fs.OpenOption) (response *api.UploadPartResponse, err error) {
chunkSize := int64(len(chunk))
sha1sum := sha1.Sum(chunk)
opts := rest.Opts{
@@ -64,6 +64,7 @@ func (o *Object) uploadPart(ctx context.Context, SessionID string, offset, total
ContentType: "application/octet-stream",
ContentLength: &chunkSize,
ContentRange: fmt.Sprintf("bytes %d-%d/%d", offset, offset+chunkSize-1, totalSize),
Options: options,
ExtraHeaders: map[string]string{
"Digest": sha1Digest(sha1sum[:]),
},
@@ -171,7 +172,7 @@ func (o *Object) abortUpload(ctx context.Context, SessionID string) (err error)
}
// uploadMultipart uploads a file using multipart upload
func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, leaf, directoryID string, size int64, modTime time.Time) (err error) {
func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, leaf, directoryID string, size int64, modTime time.Time, options ...fs.OpenOption) (err error) {
// Create upload session
session, err := o.createUploadSession(ctx, leaf, directoryID, size)
if err != nil {
@@ -236,7 +237,7 @@ outer:
defer wg.Done()
defer o.fs.uploadToken.Put()
fs.Debugf(o, "Uploading part %d/%d offset %v/%v part size %v", part+1, session.TotalParts, fs.SizeSuffix(position), fs.SizeSuffix(size), fs.SizeSuffix(chunkSize))
partResponse, err := o.uploadPart(ctx, session.ID, position, size, buf, wrap)
partResponse, err := o.uploadPart(ctx, session.ID, position, size, buf, wrap, options...)
if err != nil {
err = errors.Wrap(err, "multipart upload failed to upload part")
select {

View File

@@ -65,6 +65,7 @@ func init() {
Name: "cache",
Description: "Cache a remote",
NewFs: NewFs,
CommandHelp: commandHelp,
Options: []fs.Option{{
Name: "remote",
Help: "Remote to cache.\nNormally should contain a ':' and a path, eg \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
@@ -1882,6 +1883,31 @@ func (f *Fs) Disconnect(ctx context.Context) error {
return do(ctx)
}
var commandHelp = []fs.CommandHelp{
{
Name: "stats",
Short: "Print stats on the cache backend in JSON format.",
},
}
// Command the backend to run a named command
//
// The command run is name
// args may be used to read arguments from
// opts may be used to read optional arguments from
//
// The result should be capable of being JSON encoded
// If it is a string or a []string it will be shown to the user
// otherwise it will be JSON encoded and shown to the user like that
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (interface{}, error) {
switch name {
case "stats":
return f.Stats()
default:
return nil, fs.ErrorCommandNotFound
}
}
// Check the interfaces are satisfied
var (
_ fs.Fs = (*Fs)(nil)
@@ -1899,4 +1925,5 @@ var (
_ fs.Abouter = (*Fs)(nil)
_ fs.UserInfoer = (*Fs)(nil)
_ fs.Disconnecter = (*Fs)(nil)
_ fs.Commander = (*Fs)(nil)
)

View File

@@ -16,10 +16,10 @@ import (
"sync"
"time"
bolt "github.com/etcd-io/bbolt"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/walk"
bolt "go.etcd.io/bbolt"
)
// Constants
@@ -980,15 +980,6 @@ func (b *Persistent) updatePendingUpload(remote string, fn func(item *tempUpload
})
}
// SetPendingUploadToStarted is a way to mark an entry as started (even if it's not already)
// TO BE USED IN TESTING ONLY
func (b *Persistent) SetPendingUploadToStarted(remote string) error {
return b.updatePendingUpload(remote, func(item *tempUploadInfo) error {
item.Started = true
return nil
})
}
// ReconcileTempUploads will recursively look for all the files in the temp directory and add them to the queue
func (b *Persistent) ReconcileTempUploads(ctx context.Context, cacheFs *Fs) error {
return b.db.Update(func(tx *bolt.Tx) error {
@@ -1036,19 +1027,6 @@ func (b *Persistent) ReconcileTempUploads(ctx context.Context, cacheFs *Fs) erro
})
}
// PurgeTempUploads will remove all the pending uploads from the queue
// TO BE USED IN TESTING ONLY
func (b *Persistent) PurgeTempUploads() {
b.tempQueueMux.Lock()
defer b.tempQueueMux.Unlock()
_ = b.db.Update(func(tx *bolt.Tx) error {
_ = tx.DeleteBucket([]byte(tempBucket))
_, _ = tx.CreateBucketIfNotExists([]byte(tempBucket))
return nil
})
}
// Close should be called when the program ends gracefully
func (b *Persistent) Close() {
b.cleanupMux.Lock()

23
backend/cache/utils_test.go vendored Normal file
View File

@@ -0,0 +1,23 @@
package cache
import bolt "go.etcd.io/bbolt"
// PurgeTempUploads will remove all the pending uploads from the queue
func (b *Persistent) PurgeTempUploads() {
b.tempQueueMux.Lock()
defer b.tempQueueMux.Unlock()
_ = b.db.Update(func(tx *bolt.Tx) error {
_ = tx.DeleteBucket([]byte(tempBucket))
_, _ = tx.CreateBucketIfNotExists([]byte(tempBucket))
return nil
})
}
// SetPendingUploadToStarted is a way to mark an entry as started (even if it's not already)
func (b *Persistent) SetPendingUploadToStarted(remote string) error {
return b.updatePendingUpload(remote, func(item *tempUploadInfo) error {
item.Started = true
return nil
})
}

View File

@@ -71,30 +71,6 @@ type ReadSeekCloser interface {
// OpenRangeSeek opens the file handle at the offset with the limit given
type OpenRangeSeek func(ctx context.Context, offset, limit int64) (io.ReadCloser, error)
// Cipher is used to swap out the encryption implementations
type Cipher interface {
// EncryptFileName encrypts a file path
EncryptFileName(string) string
// DecryptFileName decrypts a file path, returns error if decrypt was invalid
DecryptFileName(string) (string, error)
// EncryptDirName encrypts a directory path
EncryptDirName(string) string
// DecryptDirName decrypts a directory path, returns error if decrypt was invalid
DecryptDirName(string) (string, error)
// EncryptData
EncryptData(io.Reader) (io.Reader, error)
// DecryptData
DecryptData(io.ReadCloser) (io.ReadCloser, error)
// DecryptDataSeek decrypt at a given position
DecryptDataSeek(ctx context.Context, open OpenRangeSeek, offset, limit int64) (ReadSeekCloser, error)
// EncryptedSize calculates the size of the data when encrypted
EncryptedSize(int64) int64
// DecryptedSize calculates the size of the data when decrypted
DecryptedSize(int64) (int64, error)
// NameEncryptionMode returns the used mode for name handling
NameEncryptionMode() NameEncryptionMode
}
// NameEncryptionMode is the type of file name encryption in use
type NameEncryptionMode int
@@ -136,7 +112,8 @@ func (mode NameEncryptionMode) String() (out string) {
return out
}
type cipher struct {
// Cipher defines an encoding and decoding cipher for the crypt backend
type Cipher struct {
dataKey [32]byte // Key for secretbox
nameKey [32]byte // 16,24 or 32 bytes
nameTweak [nameCipherBlockSize]byte // used to tweak the name crypto
@@ -148,8 +125,8 @@ type cipher struct {
}
// newCipher initialises the cipher. If salt is "" then it uses a built in salt val
func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bool) (*cipher, error) {
c := &cipher{
func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bool) (*Cipher, error) {
c := &Cipher{
mode: mode,
cryptoRand: rand.Reader,
dirNameEncrypt: dirNameEncrypt,
@@ -172,7 +149,7 @@ func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bo
//
// Note that empty passsword makes all 0x00 keys which is used in the
// tests.
func (c *cipher) Key(password, salt string) (err error) {
func (c *Cipher) Key(password, salt string) (err error) {
const keySize = len(c.dataKey) + len(c.nameKey) + len(c.nameTweak)
var saltBytes = defaultSalt
if salt != "" {
@@ -196,12 +173,12 @@ func (c *cipher) Key(password, salt string) (err error) {
}
// getBlock gets a block from the pool of size blockSize
func (c *cipher) getBlock() []byte {
func (c *Cipher) getBlock() []byte {
return c.buffers.Get().([]byte)
}
// putBlock returns a block to the pool of size blockSize
func (c *cipher) putBlock(buf []byte) {
func (c *Cipher) putBlock(buf []byte) {
if len(buf) != blockSize {
panic("bad blocksize returned to pool")
}
@@ -246,7 +223,7 @@ func decodeFileName(in string) ([]byte, error) {
// This means that
// * filenames with the same name will encrypt the same
// * filenames which start the same won't have a common prefix
func (c *cipher) encryptSegment(plaintext string) string {
func (c *Cipher) encryptSegment(plaintext string) string {
if plaintext == "" {
return ""
}
@@ -256,7 +233,7 @@ func (c *cipher) encryptSegment(plaintext string) string {
}
// decryptSegment decrypts a path segment
func (c *cipher) decryptSegment(ciphertext string) (string, error) {
func (c *Cipher) decryptSegment(ciphertext string) (string, error) {
if ciphertext == "" {
return "", nil
}
@@ -283,7 +260,7 @@ func (c *cipher) decryptSegment(ciphertext string) (string, error) {
}
// Simple obfuscation routines
func (c *cipher) obfuscateSegment(plaintext string) string {
func (c *Cipher) obfuscateSegment(plaintext string) string {
if plaintext == "" {
return ""
}
@@ -370,7 +347,7 @@ func (c *cipher) obfuscateSegment(plaintext string) string {
return result.String()
}
func (c *cipher) deobfuscateSegment(ciphertext string) (string, error) {
func (c *Cipher) deobfuscateSegment(ciphertext string) (string, error) {
if ciphertext == "" {
return "", nil
}
@@ -457,7 +434,7 @@ func (c *cipher) deobfuscateSegment(ciphertext string) (string, error) {
}
// encryptFileName encrypts a file path
func (c *cipher) encryptFileName(in string) string {
func (c *Cipher) encryptFileName(in string) string {
segments := strings.Split(in, "/")
for i := range segments {
// Skip directory name encryption if the user chose to
@@ -475,7 +452,7 @@ func (c *cipher) encryptFileName(in string) string {
}
// EncryptFileName encrypts a file path
func (c *cipher) EncryptFileName(in string) string {
func (c *Cipher) EncryptFileName(in string) string {
if c.mode == NameEncryptionOff {
return in + encryptedSuffix
}
@@ -483,7 +460,7 @@ func (c *cipher) EncryptFileName(in string) string {
}
// EncryptDirName encrypts a directory path
func (c *cipher) EncryptDirName(in string) string {
func (c *Cipher) EncryptDirName(in string) string {
if c.mode == NameEncryptionOff || !c.dirNameEncrypt {
return in
}
@@ -491,7 +468,7 @@ func (c *cipher) EncryptDirName(in string) string {
}
// decryptFileName decrypts a file path
func (c *cipher) decryptFileName(in string) (string, error) {
func (c *Cipher) decryptFileName(in string) (string, error) {
segments := strings.Split(in, "/")
for i := range segments {
var err error
@@ -514,7 +491,7 @@ func (c *cipher) decryptFileName(in string) (string, error) {
}
// DecryptFileName decrypts a file path
func (c *cipher) DecryptFileName(in string) (string, error) {
func (c *Cipher) DecryptFileName(in string) (string, error) {
if c.mode == NameEncryptionOff {
remainingLength := len(in) - len(encryptedSuffix)
if remainingLength > 0 && strings.HasSuffix(in, encryptedSuffix) {
@@ -526,14 +503,15 @@ func (c *cipher) DecryptFileName(in string) (string, error) {
}
// DecryptDirName decrypts a directory path
func (c *cipher) DecryptDirName(in string) (string, error) {
func (c *Cipher) DecryptDirName(in string) (string, error) {
if c.mode == NameEncryptionOff || !c.dirNameEncrypt {
return in, nil
}
return c.decryptFileName(in)
}
func (c *cipher) NameEncryptionMode() NameEncryptionMode {
// NameEncryptionMode returns the encryption mode in use for names
func (c *Cipher) NameEncryptionMode() NameEncryptionMode {
return c.mode
}
@@ -601,7 +579,7 @@ func (n *nonce) add(x uint64) {
type encrypter struct {
mu sync.Mutex
in io.Reader
c *cipher
c *Cipher
nonce nonce
buf []byte
readBuf []byte
@@ -611,7 +589,7 @@ type encrypter struct {
}
// newEncrypter creates a new file handle encrypting on the fly
func (c *cipher) newEncrypter(in io.Reader, nonce *nonce) (*encrypter, error) {
func (c *Cipher) newEncrypter(in io.Reader, nonce *nonce) (*encrypter, error) {
fh := &encrypter{
in: in,
c: c,
@@ -683,13 +661,19 @@ func (fh *encrypter) finish(err error) (int, error) {
}
// Encrypt data encrypts the data stream
func (c *cipher) EncryptData(in io.Reader) (io.Reader, error) {
func (c *Cipher) encryptData(in io.Reader) (io.Reader, *encrypter, error) {
in, wrap := accounting.UnWrap(in) // unwrap the accounting off the Reader
out, err := c.newEncrypter(in, nil)
if err != nil {
return nil, err
return nil, nil, err
}
return wrap(out), nil // and wrap the accounting back on
return wrap(out), out, nil // and wrap the accounting back on
}
// EncryptData encrypts the data stream
func (c *Cipher) EncryptData(in io.Reader) (io.Reader, error) {
out, _, err := c.encryptData(in)
return out, err
}
// decrypter decrypts an io.ReaderCloser on the fly
@@ -698,7 +682,7 @@ type decrypter struct {
rc io.ReadCloser
nonce nonce
initialNonce nonce
c *cipher
c *Cipher
buf []byte
readBuf []byte
bufIndex int
@@ -709,7 +693,7 @@ type decrypter struct {
}
// newDecrypter creates a new file handle decrypting on the fly
func (c *cipher) newDecrypter(rc io.ReadCloser) (*decrypter, error) {
func (c *Cipher) newDecrypter(rc io.ReadCloser) (*decrypter, error) {
fh := &decrypter{
rc: rc,
c: c,
@@ -737,7 +721,7 @@ func (c *cipher) newDecrypter(rc io.ReadCloser) (*decrypter, error) {
}
// newDecrypterSeek creates a new file handle decrypting on the fly
func (c *cipher) newDecrypterSeek(ctx context.Context, open OpenRangeSeek, offset, limit int64) (fh *decrypter, err error) {
func (c *Cipher) newDecrypterSeek(ctx context.Context, open OpenRangeSeek, offset, limit int64) (fh *decrypter, err error) {
var rc io.ReadCloser
doRangeSeek := false
setLimit := false
@@ -1012,7 +996,7 @@ func (fh *decrypter) finishAndClose(err error) error {
}
// DecryptData decrypts the data stream
func (c *cipher) DecryptData(rc io.ReadCloser) (io.ReadCloser, error) {
func (c *Cipher) DecryptData(rc io.ReadCloser) (io.ReadCloser, error) {
out, err := c.newDecrypter(rc)
if err != nil {
return nil, err
@@ -1025,7 +1009,7 @@ func (c *cipher) DecryptData(rc io.ReadCloser) (io.ReadCloser, error) {
// The open function must return a ReadCloser opened to the offset supplied
//
// You must use this form of DecryptData if you might want to Seek the file handle
func (c *cipher) DecryptDataSeek(ctx context.Context, open OpenRangeSeek, offset, limit int64) (ReadSeekCloser, error) {
func (c *Cipher) DecryptDataSeek(ctx context.Context, open OpenRangeSeek, offset, limit int64) (ReadSeekCloser, error) {
out, err := c.newDecrypterSeek(ctx, open, offset, limit)
if err != nil {
return nil, err
@@ -1034,7 +1018,7 @@ func (c *cipher) DecryptDataSeek(ctx context.Context, open OpenRangeSeek, offset
}
// EncryptedSize calculates the size of the data when encrypted
func (c *cipher) EncryptedSize(size int64) int64 {
func (c *Cipher) EncryptedSize(size int64) int64 {
blocks, residue := size/blockDataSize, size%blockDataSize
encryptedSize := int64(fileHeaderSize) + blocks*(blockHeaderSize+blockDataSize)
if residue != 0 {
@@ -1044,7 +1028,7 @@ func (c *cipher) EncryptedSize(size int64) int64 {
}
// DecryptedSize calculates the size of the data when decrypted
func (c *cipher) DecryptedSize(size int64) (int64, error) {
func (c *Cipher) DecryptedSize(size int64) (int64, error) {
size -= int64(fileHeaderSize)
if size < 0 {
return 0, ErrorEncryptedFileTooShort
@@ -1063,7 +1047,6 @@ func (c *cipher) DecryptedSize(size int64) (int64, error) {
// check interfaces
var (
_ Cipher = (*cipher)(nil)
_ io.ReadCloser = (*decrypter)(nil)
_ io.Seeker = (*decrypter)(nil)
_ fs.RangeSeeker = (*decrypter)(nil)

View File

@@ -12,6 +12,7 @@ import (
"github.com/pkg/errors"
"github.com/rclone/rclone/backend/crypt/pkcs7"
"github.com/rclone/rclone/lib/readers"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@@ -784,7 +785,7 @@ func TestNewEncrypterErrUnexpectedEOF(t *testing.T) {
c, err := newCipher(NameEncryptionStandard, "", "", true)
assert.NoError(t, err)
in := &errorReader{io.ErrUnexpectedEOF}
in := &readers.ErrorReader{Err: io.ErrUnexpectedEOF}
fh, err := c.newEncrypter(in, nil)
assert.NoError(t, err)
@@ -793,14 +794,6 @@ func TestNewEncrypterErrUnexpectedEOF(t *testing.T) {
assert.Equal(t, int64(32), n)
}
type errorReader struct {
err error
}
func (er errorReader) Read(p []byte) (n int, err error) {
return 0, er.err
}
type closeDetector struct {
io.Reader
closed int
@@ -838,7 +831,7 @@ func TestNewDecrypter(t *testing.T) {
assert.Equal(t, 1, cd.closed)
}
er := &errorReader{errors.New("potato")}
er := &readers.ErrorReader{Err: errors.New("potato")}
cd = newCloseDetector(er)
fh, err = c.newDecrypter(cd)
assert.Nil(t, fh)
@@ -864,7 +857,7 @@ func TestNewDecrypterErrUnexpectedEOF(t *testing.T) {
c, err := newCipher(NameEncryptionStandard, "", "", true)
assert.NoError(t, err)
in2 := &errorReader{io.ErrUnexpectedEOF}
in2 := &readers.ErrorReader{Err: io.ErrUnexpectedEOF}
in1 := bytes.NewBuffer(file16)
in := ioutil.NopCloser(io.MultiReader(in1, in2))
@@ -1118,7 +1111,7 @@ func TestDecrypterRead(t *testing.T) {
// Test producing an error on the file on Read the underlying file
in1 := bytes.NewBuffer(file1)
in2 := &errorReader{errors.New("potato")}
in2 := &readers.ErrorReader{Err: errors.New("potato")}
in := io.MultiReader(in1, in2)
cd := newCloseDetector(in)
fh, err := c.newDecrypter(cd)

View File

@@ -26,6 +26,7 @@ func init() {
Name: "crypt",
Description: "Encrypt/Decrypt a remote",
NewFs: NewFs,
CommandHelp: commandHelp,
Options: []fs.Option{{
Name: "remote",
Help: "Remote to encrypt/decrypt.\nNormally should contain a ':' and a path, eg \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
@@ -90,7 +91,7 @@ names, or for debugging purposes.`,
}
// newCipherForConfig constructs a Cipher for the given config name
func newCipherForConfig(opt *Options) (Cipher, error) {
func newCipherForConfig(opt *Options) (*Cipher, error) {
mode, err := NewNameEncryptionMode(opt.FilenameEncryption)
if err != nil {
return nil, err
@@ -117,7 +118,7 @@ func newCipherForConfig(opt *Options) (Cipher, error) {
}
// NewCipher constructs a Cipher for the given config
func NewCipher(m configmap.Mapper) (Cipher, error) {
func NewCipher(m configmap.Mapper) (*Cipher, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
@@ -203,7 +204,7 @@ type Fs struct {
root string
opt Options
features *fs.Features // optional features
cipher Cipher
cipher *Cipher
}
// Name of the remote (as passed into NewFs)
@@ -327,7 +328,7 @@ type putFn func(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ..
// put implements Put or PutStream
func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn) (fs.Object, error) {
// Encrypt the data into wrappedIn
wrappedIn, err := f.cipher.EncryptData(in)
wrappedIn, encrypter, err := f.cipher.encryptData(in)
if err != nil {
return nil, err
}
@@ -351,7 +352,7 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
}
// Transfer the data
o, err := put(ctx, wrappedIn, f.newObjectInfo(src), options...)
o, err := put(ctx, wrappedIn, f.newObjectInfo(src, encrypter.nonce), options...)
if err != nil {
return nil, err
}
@@ -504,11 +505,11 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
if do == nil {
return nil, errors.New("can't PutUnchecked")
}
wrappedIn, err := f.cipher.EncryptData(in)
wrappedIn, encrypter, err := f.cipher.encryptData(in)
if err != nil {
return nil, err
}
o, err := do(ctx, wrappedIn, f.newObjectInfo(src))
o, err := do(ctx, wrappedIn, f.newObjectInfo(src, encrypter.nonce))
if err != nil {
return nil, err
}
@@ -561,6 +562,37 @@ func (f *Fs) DecryptFileName(encryptedFileName string) (string, error) {
return f.cipher.DecryptFileName(encryptedFileName)
}
// computeHashWithNonce takes the nonce and encrypts the contents of
// src with it, and calculates the hash given by HashType on the fly
//
// Note that we break lots of encapsulation in this function.
func (f *Fs) computeHashWithNonce(ctx context.Context, nonce nonce, src fs.Object, hashType hash.Type) (hashStr string, err error) {
// Open the src for input
in, err := src.Open(ctx)
if err != nil {
return "", errors.Wrap(err, "failed to open src")
}
defer fs.CheckClose(in, &err)
// Now encrypt the src with the nonce
out, err := f.cipher.newEncrypter(in, &nonce)
if err != nil {
return "", errors.Wrap(err, "failed to make encrypter")
}
// pipe into hash
m, err := hash.NewMultiHasherTypes(hash.NewHashSet(hashType))
if err != nil {
return "", errors.Wrap(err, "failed to make hasher")
}
_, err = io.Copy(m, out)
if err != nil {
return "", errors.Wrap(err, "failed to hash data")
}
return m.Sums()[hashType], nil
}
// ComputeHash takes the nonce from o, and encrypts the contents of
// src with it, and calculates the hash given by HashType on the fly
//
@@ -572,7 +604,7 @@ func (f *Fs) ComputeHash(ctx context.Context, o *Object, src fs.Object, hashType
if err != nil {
return "", errors.Wrap(err, "failed to open object to read nonce")
}
d, err := f.cipher.(*cipher).newDecrypter(in)
d, err := f.cipher.newDecrypter(in)
if err != nil {
_ = in.Close()
return "", errors.Wrap(err, "failed to open object to read nonce")
@@ -597,30 +629,7 @@ func (f *Fs) ComputeHash(ctx context.Context, o *Object, src fs.Object, hashType
return "", errors.Wrap(err, "failed to close nonce read")
}
// Open the src for input
in, err = src.Open(ctx)
if err != nil {
return "", errors.Wrap(err, "failed to open src")
}
defer fs.CheckClose(in, &err)
// Now encrypt the src with the nonce
out, err := f.cipher.(*cipher).newEncrypter(in, &nonce)
if err != nil {
return "", errors.Wrap(err, "failed to make encrypter")
}
// pipe into hash
m, err := hash.NewMultiHasherTypes(hash.NewHashSet(hashType))
if err != nil {
return "", errors.Wrap(err, "failed to make hasher")
}
_, err = io.Copy(m, out)
if err != nil {
return "", errors.Wrap(err, "failed to hash data")
}
return m.Sums()[hashType], nil
return f.computeHashWithNonce(ctx, nonce, src, hashType)
}
// MergeDirs merges the contents of all the directories passed
@@ -692,6 +701,67 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
do(ctx, wrappedNotifyFunc, pollIntervalChan)
}
var commandHelp = []fs.CommandHelp{
{
Name: "encode",
Short: "Encode the given filename(s)",
Long: `This encodes the filenames given as arguments returning a list of
strings of the encoded results.
Usage Example:
rclone backend encode crypt: file1 [file2...]
rclone rc backend/command command=encode fs=crypt: file1 [file2...]
`,
},
{
Name: "decode",
Short: "Decode the given filename(s)",
Long: `This decodes the filenames given as arguments returning a list of
strings of the decoded results. It will return an error if any of the
inputs are invalid.
Usage Example:
rclone backend decode crypt: encryptedfile1 [encryptedfile2...]
rclone rc backend/command command=decode fs=crypt: encryptedfile1 [encryptedfile2...]
`,
},
}
// Command the backend to run a named command
//
// The command run is name
// args may be used to read arguments from
// opts may be used to read optional arguments from
//
// The result should be capable of being JSON encoded
// If it is a string or a []string it will be shown to the user
// otherwise it will be JSON encoded and shown to the user like that
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
switch name {
case "decode":
out := make([]string, 0, len(arg))
for _, encryptedFileName := range arg {
fileName, err := f.DecryptFileName(encryptedFileName)
if err != nil {
return out, errors.Wrap(err, fmt.Sprintf("Failed to decrypt : %s", encryptedFileName))
}
out = append(out, fileName)
}
return out, nil
case "encode":
out := make([]string, 0, len(arg))
for _, fileName := range arg {
encryptedFileName := f.EncryptFileName(fileName)
out = append(out, encryptedFileName)
}
return out, nil
default:
return nil, fs.ErrorCommandNotFound
}
}
// Object describes a wrapped for being read from the Fs
//
// This decrypts the remote name and decrypts the data
@@ -833,13 +903,15 @@ func (f *Fs) Disconnect(ctx context.Context) error {
// This encrypts the remote name and adjusts the size
type ObjectInfo struct {
fs.ObjectInfo
f *Fs
f *Fs
nonce nonce
}
func (f *Fs) newObjectInfo(src fs.ObjectInfo) *ObjectInfo {
func (f *Fs) newObjectInfo(src fs.ObjectInfo, nonce nonce) *ObjectInfo {
return &ObjectInfo{
ObjectInfo: src,
f: f,
nonce: nonce,
}
}
@@ -865,6 +937,23 @@ func (o *ObjectInfo) Size() int64 {
// Hash returns the selected checksum of the file
// If no checksum is available it returns ""
func (o *ObjectInfo) Hash(ctx context.Context, hash hash.Type) (string, error) {
var srcObj fs.Object
var ok bool
// Get the underlying object if there is one
if srcObj, ok = o.ObjectInfo.(fs.Object); ok {
// Prefer direct interface assertion
} else if do, ok := o.ObjectInfo.(fs.ObjectUnWrapper); ok {
// Otherwise likely is a operations.OverrideRemote
srcObj = do.UnWrap()
} else {
return "", nil
}
// if this is wrapping a local object then we work out the hash
if srcObj.Fs().Features().IsLocal {
// Read the data and encrypt it to calculate the hash
fs.Debugf(o, "Computing %v hash of encrypted source", hash)
return o.f.computeHashWithNonce(ctx, o.nonce, srcObj, hash)
}
return "", nil
}
@@ -903,6 +992,7 @@ var (
_ fs.Copier = (*Fs)(nil)
_ fs.Mover = (*Fs)(nil)
_ fs.DirMover = (*Fs)(nil)
_ fs.Commander = (*Fs)(nil)
_ fs.PutUncheckeder = (*Fs)(nil)
_ fs.PutStreamer = (*Fs)(nil)
_ fs.CleanUpper = (*Fs)(nil)

View File

@@ -0,0 +1,143 @@
package crypt
import (
"bytes"
"context"
"crypto/md5"
"fmt"
"io"
"testing"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/lib/random"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
type testWrapper struct {
fs.ObjectInfo
}
// UnWrap returns the Object that this Object is wrapping or nil if it
// isn't wrapping anything
func (o testWrapper) UnWrap() fs.Object {
if o, ok := o.ObjectInfo.(fs.Object); ok {
return o
}
return nil
}
// Create a temporary local fs to upload things from
func makeTempLocalFs(t *testing.T) (localFs fs.Fs, cleanup func()) {
localFs, err := fs.TemporaryLocalFs()
require.NoError(t, err)
cleanup = func() {
require.NoError(t, localFs.Rmdir(context.Background(), ""))
}
return localFs, cleanup
}
// Upload a file to a remote
func uploadFile(t *testing.T, f fs.Fs, remote, contents string) (obj fs.Object, cleanup func()) {
inBuf := bytes.NewBufferString(contents)
t1 := time.Date(2012, time.December, 17, 18, 32, 31, 0, time.UTC)
upSrc := object.NewStaticObjectInfo(remote, t1, int64(len(contents)), true, nil, nil)
obj, err := f.Put(context.Background(), inBuf, upSrc)
require.NoError(t, err)
cleanup = func() {
require.NoError(t, obj.Remove(context.Background()))
}
return obj, cleanup
}
// Test the ObjectInfo
func testObjectInfo(t *testing.T, f *Fs, wrap bool) {
var (
contents = random.String(100)
path = "hash_test_object"
ctx = context.Background()
)
if wrap {
path = "_wrap"
}
localFs, cleanupLocalFs := makeTempLocalFs(t)
defer cleanupLocalFs()
obj, cleanupObj := uploadFile(t, localFs, path, contents)
defer cleanupObj()
// encrypt the data
inBuf := bytes.NewBufferString(contents)
var outBuf bytes.Buffer
enc, err := f.cipher.newEncrypter(inBuf, nil)
require.NoError(t, err)
nonce := enc.nonce // read the nonce at the start
_, err = io.Copy(&outBuf, enc)
require.NoError(t, err)
var oi fs.ObjectInfo = obj
if wrap {
// wrap the object in a fs.ObjectUnwrapper if required
oi = testWrapper{oi}
}
// wrap the object in a crypt for upload using the nonce we
// saved from the encryptor
src := f.newObjectInfo(oi, nonce)
// Test ObjectInfo methods
assert.Equal(t, int64(outBuf.Len()), src.Size())
assert.Equal(t, f, src.Fs())
assert.NotEqual(t, path, src.Remote())
// Test ObjectInfo.Hash
wantHash := md5.Sum(outBuf.Bytes())
gotHash, err := src.Hash(ctx, hash.MD5)
require.NoError(t, err)
assert.Equal(t, fmt.Sprintf("%x", wantHash), gotHash)
}
func testComputeHash(t *testing.T, f *Fs) {
var (
contents = random.String(100)
path = "compute_hash_test"
ctx = context.Background()
hashType = f.Fs.Hashes().GetOne()
)
if hashType == hash.None {
t.Skipf("%v: does not support hashes", f.Fs)
}
localFs, cleanupLocalFs := makeTempLocalFs(t)
defer cleanupLocalFs()
// Upload a file to localFs as a test object
localObj, cleanupLocalObj := uploadFile(t, localFs, path, contents)
defer cleanupLocalObj()
// Upload the same data to the remote Fs also
remoteObj, cleanupRemoteObj := uploadFile(t, f, path, contents)
defer cleanupRemoteObj()
// Calculate the expected Hash of the remote object
computedHash, err := f.ComputeHash(ctx, remoteObj.(*Object), localObj, hashType)
require.NoError(t, err)
// Test computed hash matches remote object hash
remoteObjHash, err := remoteObj.(*Object).Object.Hash(ctx, hashType)
require.NoError(t, err)
assert.Equal(t, remoteObjHash, computedHash)
}
// InternalTest is called by fstests.Run to extra tests
func (f *Fs) InternalTest(t *testing.T) {
t.Run("ObjectInfo", func(t *testing.T) { testObjectInfo(t, f, false) })
t.Run("ObjectInfoWrap", func(t *testing.T) { testObjectInfo(t, f, true) })
t.Run("ComputeHash", func(t *testing.T) { testComputeHash(t, f) })
}

626
backend/drive/drive.go Normal file → Executable file
View File

@@ -29,6 +29,7 @@ import (
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/cache"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
@@ -54,6 +55,7 @@ const (
rcloneClientID = "202264815644.apps.googleusercontent.com"
rcloneEncryptedClientSecret = "eX8GpZTVx3vxMWVkuuBdDWmAUE6rGhTwVrvG9GhllYccSdj2-mvHVg"
driveFolderType = "application/vnd.google-apps.folder"
shortcutMimeType = "application/vnd.google-apps.shortcut"
timeFormatIn = time.RFC3339
timeFormatOut = "2006-01-02T15:04:05.000000000Z07:00"
defaultMinSleep = fs.Duration(100 * time.Millisecond)
@@ -65,7 +67,7 @@ const (
// 1<<18 is the minimum size supported by the Google uploader, and there is no maximum.
minChunkSize = 256 * fs.KibiByte
defaultChunkSize = 8 * fs.MebiByte
partialFields = "id,name,size,md5Checksum,trashed,modifiedTime,createdTime,mimeType,parents,webViewLink"
partialFields = "id,name,size,md5Checksum,trashed,modifiedTime,createdTime,mimeType,parents,webViewLink,shortcutDetails"
)
// Globals
@@ -157,6 +159,7 @@ func init() {
Name: "drive",
Description: "Google Drive",
NewFs: NewFs,
CommandHelp: commandHelp,
Config: func(name string, m configmap.Mapper) {
ctx := context.TODO()
// Parse config into Options struct
@@ -397,7 +400,7 @@ will download it anyway.`,
Default: false,
Help: `Show sizes as storage quota usage, not actual size.
Show the size of a file as the the storage quota used. This is the
Show the size of a file as the storage quota used. This is the
current version plus any older versions that have been set to keep
forever.
@@ -467,6 +470,16 @@ Google don't document so it may break in the future.
See: https://github.com/rclone/rclone/issues/3857
`,
Advanced: true,
}, {
Name: "skip_shortcuts",
Help: `If set skip shortcut files
Normally rclone dereferences shortcut files making them appear as if
they are the original file (see [the shortcuts section](#shortcuts)).
If this flag is set then rclone will ignore shortcut files completely.
`,
Advanced: true,
Default: false,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
@@ -524,6 +537,7 @@ type Options struct {
ServerSideAcrossConfigs bool `config:"server_side_across_configs"`
DisableHTTP2 bool `config:"disable_http2"`
StopOnUploadLimit bool `config:"stop_on_upload_limit"`
SkipShortcuts bool `config:"skip_shortcuts"`
Enc encoder.MultiEncoder `config:"encoding"`
}
@@ -542,6 +556,8 @@ type Fs struct {
exportExtensions []string // preferred extensions to download docs
importMimeTypes []string // MIME types to convert to docs
isTeamDrive bool // true if this is a team drive
fileFields googleapi.Field // fields to fetch file info with
m configmap.Mapper
}
type baseObject struct {
@@ -551,6 +567,7 @@ type baseObject struct {
modifiedDate string // RFC3339 time it was last modified
mimeType string // The object MIME type
bytes int64 // size of the object
parents int // number of parents
}
type documentObject struct {
baseObject
@@ -616,6 +633,9 @@ func (f *Fs) shouldRetry(err error) (bool, error) {
return false, fserrors.FatalError(err)
}
return true, err
} else if f.opt.StopOnUploadLimit && reason == "teamDriveFileLimitExceeded" {
fs.Errorf(f, "Received team drive file limit error: %v", err)
return false, fserrors.FatalError(err)
}
}
}
@@ -642,17 +662,21 @@ func containsString(slice []string, s string) bool {
return false
}
// getRootID returns the canonical ID for the "root" ID
func (f *Fs) getRootID() (string, error) {
var info *drive.File
var err error
// getFile returns drive.File for the ID passed and fields passed in
func (f *Fs) getFile(ID string, fields googleapi.Field) (info *drive.File, err error) {
err = f.pacer.CallNoRetry(func() (bool, error) {
info, err = f.svc.Files.Get("root").
Fields("id").
info, err = f.svc.Files.Get(ID).
Fields(fields).
SupportsAllDrives(true).
Do()
return f.shouldRetry(err)
})
return info, err
}
// getRootID returns the canonical ID for the "root" ID
func (f *Fs) getRootID() (string, error) {
info, err := f.getFile("root", "id")
if err != nil {
return "", errors.Wrap(err, "couldn't find root directory ID")
}
@@ -718,7 +742,7 @@ func (f *Fs) list(ctx context.Context, dirIDs []string, title string, directorie
query = append(query, titleQuery.String())
}
if directoriesOnly {
query = append(query, fmt.Sprintf("mimeType='%s'", driveFolderType))
query = append(query, fmt.Sprintf("(mimeType='%s' or mimeType='%s')", driveFolderType, shortcutMimeType))
}
if filesOnly {
query = append(query, fmt.Sprintf("mimeType!='%s'", driveFolderType))
@@ -742,22 +766,7 @@ func (f *Fs) list(ctx context.Context, dirIDs []string, title string, directorie
list.Spaces("appDataFolder")
}
var fields = partialFields
if f.opt.AuthOwnerOnly {
fields += ",owners"
}
if f.opt.UseSharedDate {
fields += ",sharedWithMeTime"
}
if f.opt.SkipChecksumGphotos {
fields += ",spaces"
}
if f.opt.SizeAsQuota {
fields += ",quotaBytesUsed"
}
fields = fmt.Sprintf("files(%s),nextPageToken,incompleteSearch", fields)
fields := fmt.Sprintf("files(%s),nextPageToken,incompleteSearch", f.fileFields)
OUTER:
for {
@@ -774,6 +783,24 @@ OUTER:
}
for _, item := range files.Files {
item.Name = f.opt.Enc.ToStandardName(item.Name)
if isShortcut(item) {
// ignore shortcuts if directed
if f.opt.SkipShortcuts {
continue
}
// skip file shortcuts if directory only
if directoriesOnly && item.ShortcutDetails.TargetMimeType != driveFolderType {
continue
}
// skip directory shortcuts if file only
if filesOnly && item.ShortcutDetails.TargetMimeType == driveFolderType {
continue
}
item, err = f.resolveShortcut(item)
if err != nil {
return false, errors.Wrap(err, "list")
}
}
// Check the case of items is correct since
// the `=` operator is case insensitive.
if title != "" && title != item.Name {
@@ -1056,8 +1083,10 @@ func NewFs(name, path string, m configmap.Mapper) (fs.Fs, error) {
root: root,
opt: *opt,
pacer: newPacer(opt),
m: m,
}
f.isTeamDrive = opt.TeamDriveID != ""
f.fileFields = f.getFileFields()
f.features = (&fs.Features{
DuplicateFiles: true,
ReadMimeType: true,
@@ -1169,9 +1198,28 @@ func (f *Fs) newBaseObject(remote string, info *drive.File) baseObject {
modifiedDate: modifiedDate,
mimeType: info.MimeType,
bytes: size,
parents: len(info.Parents),
}
}
// getFileFields gets the fields for a normal file Get or List
func (f *Fs) getFileFields() (fields googleapi.Field) {
fields = partialFields
if f.opt.AuthOwnerOnly {
fields += ",owners"
}
if f.opt.UseSharedDate {
fields += ",sharedWithMeTime"
}
if f.opt.SkipChecksumGphotos {
fields += ",spaces"
}
if f.opt.SizeAsQuota {
fields += ",quotaBytesUsed"
}
return fields
}
// newRegularObject creates a fs.Object for a normal drive.File
func (f *Fs) newRegularObject(remote string, info *drive.File) fs.Object {
// wipe checksum if SkipChecksumGphotos and file is type Photo or Video
@@ -1185,7 +1233,7 @@ func (f *Fs) newRegularObject(remote string, info *drive.File) fs.Object {
}
return &Object{
baseObject: f.newBaseObject(remote, info),
url: fmt.Sprintf("%sfiles/%s?alt=media", f.svc.BasePath, info.Id),
url: fmt.Sprintf("%sfiles/%s?alt=media", f.svc.BasePath, actualID(info.Id)),
md5sum: strings.ToLower(info.Md5Checksum),
v2Download: f.opt.V2DownloadMinSize != -1 && info.Size >= int64(f.opt.V2DownloadMinSize),
}
@@ -1197,17 +1245,18 @@ func (f *Fs) newDocumentObject(remote string, info *drive.File, extension, expor
if err != nil {
return nil, err
}
url := fmt.Sprintf("%sfiles/%s/export?mimeType=%s", f.svc.BasePath, info.Id, url.QueryEscape(mediaType))
id := actualID(info.Id)
url := fmt.Sprintf("%sfiles/%s/export?mimeType=%s", f.svc.BasePath, id, url.QueryEscape(mediaType))
if f.opt.AlternateExport {
switch info.MimeType {
case "application/vnd.google-apps.drawing":
url = fmt.Sprintf("https://docs.google.com/drawings/d/%s/export/%s", info.Id, extension[1:])
url = fmt.Sprintf("https://docs.google.com/drawings/d/%s/export/%s", id, extension[1:])
case "application/vnd.google-apps.document":
url = fmt.Sprintf("https://docs.google.com/document/d/%s/export?format=%s", info.Id, extension[1:])
url = fmt.Sprintf("https://docs.google.com/document/d/%s/export?format=%s", id, extension[1:])
case "application/vnd.google-apps.spreadsheet":
url = fmt.Sprintf("https://docs.google.com/spreadsheets/d/%s/export?format=%s", info.Id, extension[1:])
url = fmt.Sprintf("https://docs.google.com/spreadsheets/d/%s/export?format=%s", id, extension[1:])
case "application/vnd.google-apps.presentation":
url = fmt.Sprintf("https://docs.google.com/presentation/d/%s/export/%s", info.Id, extension[1:])
url = fmt.Sprintf("https://docs.google.com/presentation/d/%s/export/%s", id, extension[1:])
}
}
baseObject := f.newBaseObject(remote+extension, info)
@@ -1265,23 +1314,37 @@ func (f *Fs) newObjectWithInfo(remote string, info *drive.File) (fs.Object, erro
// When the drive.File cannot be represented as a fs.Object it will return (nil, nil).
func (f *Fs) newObjectWithExportInfo(
remote string, info *drive.File,
extension, exportName, exportMimeType string, isDocument bool) (fs.Object, error) {
extension, exportName, exportMimeType string, isDocument bool) (o fs.Object, err error) {
// Note that resolveShortcut will have been called already if
// we are being called from a listing. However the drive.Item
// will have been resolved so this will do nothing.
info, err = f.resolveShortcut(info)
if err != nil {
return nil, errors.Wrap(err, "new object")
}
switch {
case info.MimeType == driveFolderType:
return nil, fs.ErrorNotAFile
case info.MimeType == shortcutMimeType:
// We can only get here if f.opt.SkipShortcuts is set
// and not from a listing. This is unlikely.
fs.Debugf(remote, "Ignoring shortcut as skip shortcuts is set")
return nil, fs.ErrorObjectNotFound
case info.Md5Checksum != "" || info.Size > 0:
// If item has MD5 sum or a length it is a file stored on drive
return f.newRegularObject(remote, info), nil
case f.opt.SkipGdocs:
fs.Debugf(remote, "Skipping google document type %q", info.MimeType)
return nil, nil
return nil, fs.ErrorObjectNotFound
default:
// If item MimeType is in the ExportFormats then it is a google doc
if !isDocument {
fs.Debugf(remote, "Ignoring unknown document type %q", info.MimeType)
return nil, nil
return nil, fs.ErrorObjectNotFound
}
if extension == "" {
fs.Debugf(remote, "No export formats found for %q", info.MimeType)
return nil, nil
return nil, fs.ErrorObjectNotFound
}
if isLinkMimeType(exportMimeType) {
return f.newLinkObject(remote, info, extension, exportMimeType)
@@ -1313,6 +1376,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
// FindLeaf finds a directory of name leaf in the folder with ID pathID
func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) {
// Find the leaf in pathID
pathID = actualID(pathID)
found, err = f.list(ctx, []string{pathID}, leaf, true, false, false, func(item *drive.File) bool {
if !f.opt.SkipGdocs {
_, exportName, _, isDocument := f.findExportFormat(item)
@@ -1506,6 +1570,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
if err != nil {
return nil, err
}
directoryID = actualID(directoryID)
var iErr error
_, err = f.list(ctx, []string{directoryID}, "", false, false, false, func(item *drive.File) bool {
@@ -1591,8 +1656,13 @@ func (f *Fs) listRRunner(ctx context.Context, wg *sync.WaitGroup, in <-chan list
listRSlices{dirs, paths}.Sort()
var iErr error
_, err := f.list(ctx, dirs, "", false, false, false, func(item *drive.File) bool {
// shared with me items have no parents when at the root
if f.opt.SharedWithMe && len(item.Parents) == 0 && len(paths) == 1 && paths[0] == "" {
item.Parents = dirs
}
for _, parent := range item.Parents {
var i int
earlyExit := false
// If only one item in paths then no need to search for the ID
// assuming google drive is doing its job properly.
//
@@ -1602,6 +1672,9 @@ func (f *Fs) listRRunner(ctx context.Context, wg *sync.WaitGroup, in <-chan list
// - shared with me items have no parents at the root
// - if using a root alias, eg "root" or "appDataFolder" the ID won't match
i = 0
// items at root can have more than one parent so we need to put
// the item in just once.
earlyExit = true
} else {
// only handle parents that are in the requested dirs list if not at root
i = sort.SearchStrings(dirs, parent)
@@ -1621,6 +1694,11 @@ func (f *Fs) listRRunner(ctx context.Context, wg *sync.WaitGroup, in <-chan list
iErr = err
return true
}
// If didn't check parents then insert only once
if earlyExit {
break
}
}
return false
})
@@ -1671,6 +1749,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
if err != nil {
return err
}
directoryID = actualID(directoryID)
mu := sync.Mutex{} // protects in and overflow
wg := sync.WaitGroup{}
@@ -1684,11 +1763,12 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
mu.Lock()
defer mu.Unlock()
if d, isDir := entry.(*fs.Dir); isDir && in != nil {
job := listREntry{actualID(d.ID()), d.Remote()}
select {
case in <- listREntry{d.ID(), d.Remote()}:
case in <- job:
wg.Add(1)
default:
overflow = append(overflow, listREntry{d.ID(), d.Remote()})
overflow = append(overflow, job)
}
}
listed++
@@ -1765,10 +1845,87 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
return nil
}
const shortcutSeparator = '\t'
// joinID adds an actual drive ID to the shortcut ID it came from
//
// directoryIDs in the dircache are these composite directory IDs so
// we must always unpack them before use.
func joinID(actual, shortcut string) string {
return actual + string(shortcutSeparator) + shortcut
}
// splitID separates an actual ID and a shortcut ID from a composite
// ID. If there was no shortcut ID then it will return "" for it.
func splitID(compositeID string) (actualID, shortcutID string) {
i := strings.IndexRune(compositeID, shortcutSeparator)
if i < 0 {
return compositeID, ""
}
return compositeID[:i], compositeID[i+1:]
}
// isShortcutID returns true if compositeID refers to a shortcut
func isShortcutID(compositeID string) bool {
return strings.IndexRune(compositeID, shortcutSeparator) >= 0
}
// actualID returns an actual ID from a composite ID
func actualID(compositeID string) (actualID string) {
actualID, _ = splitID(compositeID)
return actualID
}
// shortcutID returns a shortcut ID from a composite ID if available,
// or the actual ID if not.
func shortcutID(compositeID string) (shortcutID string) {
actualID, shortcutID := splitID(compositeID)
if shortcutID != "" {
return shortcutID
}
return actualID
}
// isShortcut returns true of the item is a shortcut
func isShortcut(item *drive.File) bool {
return item.MimeType == shortcutMimeType && item.ShortcutDetails != nil
}
// Dereference shortcut if required. It returns the newItem (which may
// be just item).
//
// If we return a new item then the ID will be adjusted to be a
// composite of the actual ID and the shortcut ID. This is to make
// sure that we have decided in all use places what we are doing with
// the ID.
//
// Note that we assume shortcuts can't point to shortcuts. Google
// drive web interface doesn't offer the option to create a shortcut
// to a shortcut. The documentation is silent on the issue.
func (f *Fs) resolveShortcut(item *drive.File) (newItem *drive.File, err error) {
if f.opt.SkipShortcuts || item.MimeType != shortcutMimeType {
return item, nil
}
if item.ShortcutDetails == nil {
fs.Errorf(nil, "Expecting shortcutDetails in %v", item)
return item, nil
}
newItem, err = f.getFile(item.ShortcutDetails.TargetId, f.fileFields)
if err != nil {
return nil, errors.Wrap(err, "failed to resolve shortcut")
}
// make sure we use the Name and Parents from the original item
newItem.Name = item.Name
newItem.Parents = item.Parents
// the new ID is a composite ID
newItem.Id = joinID(newItem.Id, item.Id)
return newItem, nil
}
// itemToDirEntry converts a drive.File to a fs.DirEntry.
// When the drive.File cannot be represented as a fs.DirEntry
// (nil, nil) is returned.
func (f *Fs) itemToDirEntry(remote string, item *drive.File) (fs.DirEntry, error) {
func (f *Fs) itemToDirEntry(remote string, item *drive.File) (entry fs.DirEntry, err error) {
switch {
case item.MimeType == driveFolderType:
// cache the directory ID for later lookups
@@ -1779,7 +1936,11 @@ func (f *Fs) itemToDirEntry(remote string, item *drive.File) (fs.DirEntry, error
case f.opt.AuthOwnerOnly && !isAuthOwned(item):
// ignore object
default:
return f.newObjectWithInfo(remote, item)
entry, err = f.newObjectWithInfo(remote, item)
if err == fs.ErrorObjectNotFound {
return nil, nil
}
return entry, err
}
return nil, nil
}
@@ -1792,6 +1953,7 @@ func (f *Fs) createFileInfo(ctx context.Context, remote string, modTime time.Tim
if err != nil {
return nil, err
}
directoryID = actualID(directoryID)
leaf = f.opt.Enc.FromStandardName(leaf)
// Define the metadata for the file we are going to create.
@@ -1895,6 +2057,18 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
// MergeDirs merges the contents of all the directories passed
// in into the first one and rmdirs the other directories.
func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
if len(dirs) < 2 {
return nil
}
newDirs := dirs[:0]
for _, dir := range dirs {
if isShortcutID(dir.ID()) {
fs.Infof(dir, "skipping shortcut directory")
continue
}
newDirs = append(newDirs, dir)
}
dirs = newDirs
if len(dirs) < 2 {
return nil
}
@@ -1928,7 +2102,7 @@ func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
}
// rmdir (into trash) the now empty source directory
fs.Infof(srcDir, "removing empty directory")
err = f.rmdir(ctx, srcDir.ID(), true)
err = f.delete(ctx, srcDir.ID(), true)
if err != nil {
return errors.Wrapf(err, "MergeDirs move failed to rmdir %q", srcDir)
}
@@ -1948,20 +2122,20 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
return err
}
// Rmdir deletes a directory unconditionally by ID
func (f *Fs) rmdir(ctx context.Context, directoryID string, useTrash bool) error {
// delete a file or directory unconditionally by ID
func (f *Fs) delete(ctx context.Context, id string, useTrash bool) error {
return f.pacer.Call(func() (bool, error) {
var err error
if useTrash {
info := drive.File{
Trashed: true,
}
_, err = f.svc.Files.Update(directoryID, &info).
_, err = f.svc.Files.Update(id, &info).
Fields("").
SupportsAllDrives(true).
Do()
} else {
err = f.svc.Files.Delete(directoryID).
err = f.svc.Files.Delete(id).
Fields("").
SupportsAllDrives(true).
Do()
@@ -1980,6 +2154,11 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
if err != nil {
return err
}
directoryID, shortcutID := splitID(directoryID)
// if directory is a shortcut remove it regardless
if shortcutID != "" {
return f.delete(ctx, shortcutID, f.opt.UseTrash)
}
var trashedFiles = false
found, err := f.list(ctx, []string{directoryID}, "", false, false, true, func(item *drive.File) bool {
if !item.Trashed {
@@ -2000,7 +2179,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
// trash the directory if it had trashed files
// in or the user wants to trash, otherwise
// delete it.
err = f.rmdir(ctx, directoryID, trashedFiles || f.opt.UseTrash)
err = f.delete(ctx, directoryID, trashedFiles || f.opt.UseTrash)
if err != nil {
return err
}
@@ -2029,11 +2208,13 @@ func (f *Fs) Precision() time.Duration {
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
var srcObj *baseObject
ext := ""
readDescription := false
switch src := src.(type) {
case *Object:
srcObj = &src.baseObject
case *documentObject:
srcObj, ext = &src.baseObject, src.ext()
readDescription = true
case *linkObject:
srcObj, ext = &src.baseObject, src.ext()
default:
@@ -2057,9 +2238,28 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
return nil, err
}
if readDescription {
// preserve the description on copy for docs
info, err := f.getFile(actualID(srcObj.id), "description")
if err != nil {
return nil, errors.Wrap(err, "failed to read description for Google Doc")
}
// set the description if there is one, or use the default if not
if info.Description != "" {
createInfo.Description = info.Description
}
} else {
// don't overwrite the description on copy for files
// this should work for docs but it doesn't - it is probably a bug in Google Drive
createInfo.Description = ""
}
// get the ID of the thing to copy - this is the shortcut if available
id := shortcutID(srcObj.id)
var info *drive.File
err = f.pacer.Call(func() (bool, error) {
info, err = f.svc.Files.Copy(srcObj.id, createInfo).
info, err = f.svc.Files.Copy(id, createInfo).
Fields(partialFields).
SupportsAllDrives(true).
KeepRevisionForever(f.opt.KeepRevisionForever).
@@ -2098,23 +2298,7 @@ func (f *Fs) Purge(ctx context.Context) error {
if err != nil {
return err
}
err = f.pacer.Call(func() (bool, error) {
if f.opt.UseTrash {
info := drive.File{
Trashed: true,
}
_, err = f.svc.Files.Update(f.dirCache.RootID(), &info).
Fields("").
SupportsAllDrives(true).
Do()
} else {
err = f.svc.Files.Delete(f.dirCache.RootID()).
Fields("").
SupportsAllDrives(true).
Do()
}
return f.shouldRetry(err)
})
err = f.delete(ctx, shortcutID(f.dirCache.RootID()), f.opt.UseTrash)
f.dirCache.ResetRoot()
if err != nil {
return err
@@ -2220,6 +2404,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
if err != nil {
return nil, err
}
srcParentID = actualID(srcParentID)
// Temporary Object under construction
dstInfo, err := f.createFileInfo(ctx, remote, src.ModTime(ctx))
@@ -2232,7 +2417,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
// Do the move
var info *drive.File
err = f.pacer.Call(func() (bool, error) {
info, err = f.svc.Files.Update(srcObj.id, dstInfo).
info, err = f.svc.Files.Update(shortcutID(srcObj.id), dstInfo).
RemoveParents(srcParentID).
AddParents(dstParents).
Fields(partialFields).
@@ -2252,13 +2437,14 @@ func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err er
id, err := f.dirCache.FindDir(ctx, remote, false)
if err == nil {
fs.Debugf(f, "attempting to share directory '%s'", remote)
id = shortcutID(id)
} else {
fs.Debugf(f, "attempting to share single file '%s'", remote)
o, err := f.NewObject(ctx, remote)
if err != nil {
return "", err
}
id = o.(fs.IDer).ID()
id = shortcutID(o.(fs.IDer).ID())
}
permission := &drive.Permission{
@@ -2333,6 +2519,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
if err != nil {
return err
}
dstDirectoryID = actualID(dstDirectoryID)
// Check destination does not exist
if dstRemote != "" {
@@ -2356,19 +2543,19 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
if err != nil {
return err
}
srcDirectoryID = actualID(srcDirectoryID)
// Find ID of src
srcID, err := srcFs.dirCache.FindDir(ctx, srcRemote, false)
if err != nil {
return err
}
// Do the move
patch := drive.File{
Name: leaf,
}
err = f.pacer.Call(func() (bool, error) {
_, err = f.svc.Files.Update(srcID, &patch).
_, err = f.svc.Files.Update(shortcutID(srcID), &patch).
RemoveParents(srcDirectoryID).
AddParents(dstDirectoryID).
Fields("").
@@ -2545,6 +2732,258 @@ func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.MD5)
}
func (f *Fs) changeChunkSize(chunkSizeString string) (err error) {
chunkSizeInt, err := strconv.ParseInt(chunkSizeString, 10, 64)
if err != nil {
return errors.Wrap(err, "couldn't convert chunk size to int")
}
chunkSize := fs.SizeSuffix(chunkSizeInt)
if chunkSize == f.opt.ChunkSize {
return nil
}
err = checkUploadChunkSize(chunkSize)
if err == nil {
f.opt.ChunkSize = chunkSize
}
return err
}
func (f *Fs) changeServiceAccountFile(file string) (err error) {
fs.Debugf(nil, "Changing Service Account File from %s to %s", f.opt.ServiceAccountFile, file)
if file == f.opt.ServiceAccountFile {
return nil
}
oldSvc := f.svc
oldv2Svc := f.v2Svc
oldOAuthClient := f.client
oldFile := f.opt.ServiceAccountFile
oldCredentials := f.opt.ServiceAccountCredentials
defer func() {
// Undo all the changes instead of doing selective undo's
if err != nil {
f.svc = oldSvc
f.v2Svc = oldv2Svc
f.client = oldOAuthClient
f.opt.ServiceAccountFile = oldFile
f.opt.ServiceAccountCredentials = oldCredentials
}
}()
f.opt.ServiceAccountFile = file
f.opt.ServiceAccountCredentials = ""
oAuthClient, err := createOAuthClient(&f.opt, f.name, f.m)
if err != nil {
return errors.Wrap(err, "drive: failed when making oauth client")
}
f.client = oAuthClient
f.svc, err = drive.New(f.client)
if err != nil {
return errors.Wrap(err, "couldn't create Drive client")
}
if f.opt.V2DownloadMinSize >= 0 {
f.v2Svc, err = drive_v2.New(f.client)
if err != nil {
return errors.Wrap(err, "couldn't create Drive v2 client")
}
}
return nil
}
// Create a shortcut from (f, srcPath) to (dstFs, dstPath)
//
// Will not overwrite existing files
func (f *Fs) makeShortcut(ctx context.Context, srcPath string, dstFs *Fs, dstPath string) (o fs.Object, err error) {
srcFs := f
srcPath = strings.Trim(srcPath, "/")
dstPath = strings.Trim(dstPath, "/")
if dstPath == "" {
return nil, errors.New("shortcut destination can't be root directory")
}
// Find source
var srcID string
isDir := false
if srcPath == "" {
// source is root directory
err = f.dirCache.FindRoot(ctx, false)
if err != nil {
return nil, err
}
srcID = f.dirCache.RootID()
isDir = true
} else if srcObj, err := srcFs.NewObject(ctx, srcPath); err != nil {
if err != fs.ErrorNotAFile {
return nil, errors.Wrap(err, "can't find source")
}
// source was a directory
srcID, err = srcFs.dirCache.FindDir(ctx, srcPath, false)
if err != nil {
return nil, errors.Wrap(err, "failed to find source dir")
}
isDir = true
} else {
// source was a file
srcID = srcObj.(*Object).id
}
srcID = actualID(srcID) // link to underlying object not to shortcut
// Find destination
_, err = dstFs.NewObject(ctx, dstPath)
if err != fs.ErrorObjectNotFound {
if err == nil {
err = errors.New("existing file")
} else if err == fs.ErrorNotAFile {
err = errors.New("existing directory")
}
return nil, errors.Wrap(err, "not overwriting shortcut target")
}
// Create destination shortcut
createInfo, err := dstFs.createFileInfo(ctx, dstPath, time.Now())
if err != nil {
return nil, errors.Wrap(err, "shortcut destination failed")
}
createInfo.MimeType = shortcutMimeType
createInfo.ShortcutDetails = &drive.FileShortcutDetails{
TargetId: srcID,
}
var info *drive.File
err = dstFs.pacer.CallNoRetry(func() (bool, error) {
info, err = dstFs.svc.Files.Create(createInfo).
Fields(partialFields).
SupportsAllDrives(true).
KeepRevisionForever(dstFs.opt.KeepRevisionForever).
Do()
return dstFs.shouldRetry(err)
})
if err != nil {
return nil, errors.Wrap(err, "shortcut creation failed")
}
if isDir {
return nil, nil
}
return dstFs.newObjectWithInfo(dstPath, info)
}
var commandHelp = []fs.CommandHelp{{
Name: "get",
Short: "Get command for fetching the drive config parameters",
Long: `This is a get command which will be used to fetch the various drive config parameters
Usage Examples:
rclone backend get drive: [-o service_account_file] [-o chunk_size]
rclone rc backend/command command=get fs=drive: [-o service_account_file] [-o chunk_size]
`,
Opts: map[string]string{
"chunk_size": "show the current upload chunk size",
"service_account_file": "show the current service account file",
},
}, {
Name: "set",
Short: "Set command for updating the drive config parameters",
Long: `This is a set command which will be used to update the various drive config parameters
Usage Examples:
rclone backend set drive: [-o service_account_file=sa.json] [-o chunk_size=67108864]
rclone rc backend/command command=set fs=drive: [-o service_account_file=sa.json] [-o chunk_size=67108864]
`,
Opts: map[string]string{
"chunk_size": "update the current upload chunk size",
"service_account_file": "update the current service account file",
},
}, {
Name: "shortcut",
Short: "Create shortcuts from files or directories",
Long: `This command creates shortcuts from files or directories.
Usage:
rclone backend shortcut drive: source_item destination_shortcut
rclone backend shortcut drive: source_item -o target=drive2: destination_shortcut
In the first example this creates a shortcut from the "source_item"
which can be a file or a directory to the "destination_shortcut". The
"source_item" and the "destination_shortcut" should be relative paths
from "drive:"
In the second example this creates a shortcut from the "source_item"
relative to "drive:" to the "destination_shortcut" relative to
"drive2:". This may fail with a permission error if the user
authenticated with "drive2:" can't read files from "drive:".
`,
Opts: map[string]string{
"target": "optional target remote for the shortcut destination",
},
}}
// Command the backend to run a named command
//
// The command run is name
// args may be used to read arguments from
// opts may be used to read optional arguments from
//
// The result should be capable of being JSON encoded
// If it is a string or a []string it will be shown to the user
// otherwise it will be JSON encoded and shown to the user like that
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
switch name {
case "get":
out := make(map[string]string)
if _, ok := opt["service_account_file"]; ok {
out["service_account_file"] = f.opt.ServiceAccountFile
}
if _, ok := opt["chunk_size"]; ok {
out["chunk_size"] = fmt.Sprintf("%s", f.opt.ChunkSize)
}
return out, nil
case "set":
out := make(map[string]map[string]string)
if serviceAccountFile, ok := opt["service_account_file"]; ok {
serviceAccountMap := make(map[string]string)
serviceAccountMap["previous"] = f.opt.ServiceAccountFile
if err = f.changeServiceAccountFile(serviceAccountFile); err != nil {
return out, err
}
f.m.Set("service_account_file", serviceAccountFile)
serviceAccountMap["current"] = f.opt.ServiceAccountFile
out["service_account_file"] = serviceAccountMap
}
if chunkSize, ok := opt["chunk_size"]; ok {
chunkSizeMap := make(map[string]string)
chunkSizeMap["previous"] = fmt.Sprintf("%s", f.opt.ChunkSize)
if err = f.changeChunkSize(chunkSize); err != nil {
return out, err
}
chunkSizeString := fmt.Sprintf("%s", f.opt.ChunkSize)
f.m.Set("chunk_size", chunkSizeString)
chunkSizeMap["current"] = chunkSizeString
out["chunk_size"] = chunkSizeMap
}
return out, nil
case "shortcut":
if len(arg) != 2 {
return nil, errors.New("need exactly 2 arguments")
}
dstFs := f
target, ok := opt["target"]
if ok {
targetFs, err := cache.Get(target)
if err != nil {
return nil, errors.Wrap(err, "couldn't find target")
}
dstFs, ok = targetFs.(*Fs)
if !ok {
return nil, errors.New("target is not a drive backend")
}
}
return f.makeShortcut(ctx, arg[0], dstFs, arg[1])
default:
return nil, fs.ErrorCommandNotFound
}
}
// ------------------------------------------------------------
// Fs returns the parent Fs
@@ -2605,8 +3044,9 @@ func (f *Fs) getRemoteInfoWithExport(ctx context.Context, remote string) (
}
return nil, "", "", "", false, err
}
directoryID = actualID(directoryID)
found, err := f.list(ctx, []string{directoryID}, leaf, false, true, false, func(item *drive.File) bool {
found, err := f.list(ctx, []string{directoryID}, leaf, false, false, false, func(item *drive.File) bool {
if !f.opt.SkipGdocs {
extension, exportName, exportMimeType, isDocument = f.findExportFormat(item)
if exportName == leaf {
@@ -2656,7 +3096,7 @@ func (o *baseObject) SetModTime(ctx context.Context, modTime time.Time) error {
var info *drive.File
err := o.fs.pacer.Call(func() (bool, error) {
var err error
info, err = o.fs.svc.Files.Update(o.id, updateInfo).
info, err = o.fs.svc.Files.Update(actualID(o.id), updateInfo).
Fields(partialFields).
SupportsAllDrives(true).
Do()
@@ -2785,7 +3225,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
if o.v2Download {
var v2File *drive_v2.File
err = o.fs.pacer.Call(func() (bool, error) {
v2File, err = o.fs.v2Svc.Files.Get(o.id).
v2File, err = o.fs.v2Svc.Files.Get(actualID(o.id)).
Fields("downloadUrl").
SupportsAllDrives(true).
Do()
@@ -2864,7 +3304,7 @@ func (o *baseObject) update(ctx context.Context, updateInfo *drive.File, uploadM
if size >= 0 && size < int64(o.fs.opt.UploadCutoff) {
// Don't retry, return a retry error instead
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
info, err = o.fs.svc.Files.Update(o.id, updateInfo).
info, err = o.fs.svc.Files.Update(actualID(o.id), updateInfo).
Media(in, googleapi.ContentType(uploadMimeType)).
Fields(partialFields).
SupportsAllDrives(true).
@@ -2884,6 +3324,26 @@ func (o *baseObject) update(ctx context.Context, updateInfo *drive.File, uploadM
//
// The new object may have been created if an error is returned
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
// If o is a shortcut
if isShortcutID(o.id) {
// Delete it first
err := o.fs.delete(ctx, shortcutID(o.id), o.fs.opt.UseTrash)
if err != nil {
return err
}
// Then put the file as a new file
newObj, err := o.fs.PutUnchecked(ctx, in, src, options...)
if err != nil {
return err
}
// Update the object
if newO, ok := newObj.(*Object); ok {
*o = *newO
} else {
fs.Debugf(newObj, "Failed to update object %T from new object %T", o, newObj)
}
return nil
}
srcMimeType := fs.MimeType(ctx, src)
updateInfo := &drive.File{
MimeType: srcMimeType,
@@ -2954,25 +3414,10 @@ func (o *linkObject) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo
// Remove an object
func (o *baseObject) Remove(ctx context.Context) error {
var err error
err = o.fs.pacer.Call(func() (bool, error) {
if o.fs.opt.UseTrash {
info := drive.File{
Trashed: true,
}
_, err = o.fs.svc.Files.Update(o.id, &info).
Fields("").
SupportsAllDrives(true).
Do()
} else {
err = o.fs.svc.Files.Delete(o.id).
Fields("").
SupportsAllDrives(true).
Do()
}
return o.fs.shouldRetry(err)
})
return err
if o.parents > 1 {
return errors.New("can't delete safely - has multiple parents")
}
return o.fs.delete(ctx, shortcutID(o.id), o.fs.opt.UseTrash)
}
// MimeType of an Object if known, "" otherwise
@@ -3034,6 +3479,7 @@ var (
_ fs.Copier = (*Fs)(nil)
_ fs.Mover = (*Fs)(nil)
_ fs.DirMover = (*Fs)(nil)
_ fs.Commander = (*Fs)(nil)
_ fs.DirCacheFlusher = (*Fs)(nil)
_ fs.ChangeNotifier = (*Fs)(nil)
_ fs.PutUncheckeder = (*Fs)(nil)

View File

@@ -14,6 +14,7 @@ import (
"github.com/pkg/errors"
_ "github.com/rclone/rclone/backend/local"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fstest/fstests"
"github.com/stretchr/testify/assert"
@@ -268,6 +269,98 @@ func (f *Fs) InternalTestDocumentLink(t *testing.T) {
}
}
// TestIntegration/FsMkdir/FsPutFiles/Internal/Shortcuts
func (f *Fs) InternalTestShortcuts(t *testing.T) {
const (
// from fstest/fstests/fstests.go
existingDir = "hello? sausage"
existingFile = "file name.txt"
existingSubDir = "êé"
)
ctx := context.Background()
srcObj, err := f.NewObject(ctx, existingFile)
require.NoError(t, err)
srcHash, err := srcObj.Hash(ctx, hash.MD5)
require.NoError(t, err)
assert.NotEqual(t, "", srcHash)
t.Run("Errors", func(t *testing.T) {
_, err := f.makeShortcut(ctx, "", f, "")
assert.Error(t, err)
assert.Contains(t, err.Error(), "can't be root")
_, err = f.makeShortcut(ctx, "notfound", f, "dst")
assert.Error(t, err)
assert.Contains(t, err.Error(), "can't find source")
_, err = f.makeShortcut(ctx, existingFile, f, existingFile)
assert.Error(t, err)
assert.Contains(t, err.Error(), "not overwriting")
assert.Contains(t, err.Error(), "existing file")
_, err = f.makeShortcut(ctx, existingFile, f, existingDir)
assert.Error(t, err)
assert.Contains(t, err.Error(), "not overwriting")
assert.Contains(t, err.Error(), "existing directory")
})
t.Run("File", func(t *testing.T) {
dstObj, err := f.makeShortcut(ctx, existingFile, f, "shortcut.txt")
require.NoError(t, err)
require.NotNil(t, dstObj)
assert.Equal(t, "shortcut.txt", dstObj.Remote())
dstHash, err := dstObj.Hash(ctx, hash.MD5)
require.NoError(t, err)
assert.Equal(t, srcHash, dstHash)
require.NoError(t, dstObj.Remove(ctx))
})
t.Run("Dir", func(t *testing.T) {
dstObj, err := f.makeShortcut(ctx, existingDir, f, "shortcutdir")
require.NoError(t, err)
require.Nil(t, dstObj)
entries, err := f.List(ctx, "shortcutdir")
require.NoError(t, err)
require.Equal(t, 1, len(entries))
require.Equal(t, "shortcutdir/"+existingSubDir, entries[0].Remote())
require.NoError(t, f.Rmdir(ctx, "shortcutdir"))
})
t.Run("Command", func(t *testing.T) {
_, err := f.Command(ctx, "shortcut", []string{"one"}, nil)
require.Error(t, err)
require.Contains(t, err.Error(), "need exactly 2 arguments")
_, err = f.Command(ctx, "shortcut", []string{"one", "two"}, map[string]string{
"target": "doesnotexistremote:",
})
require.Error(t, err)
require.Contains(t, err.Error(), "couldn't find target")
_, err = f.Command(ctx, "shortcut", []string{"one", "two"}, map[string]string{
"target": ".",
})
require.Error(t, err)
require.Contains(t, err.Error(), "target is not a drive backend")
dstObjI, err := f.Command(ctx, "shortcut", []string{existingFile, "shortcut2.txt"}, map[string]string{
"target": fs.ConfigString(f),
})
require.NoError(t, err)
dstObj := dstObjI.(*Object)
assert.Equal(t, "shortcut2.txt", dstObj.Remote())
dstHash, err := dstObj.Hash(ctx, hash.MD5)
require.NoError(t, err)
assert.Equal(t, srcHash, dstHash)
require.NoError(t, dstObj.Remove(ctx))
dstObjI, err = f.Command(ctx, "shortcut", []string{existingFile, "shortcut3.txt"}, nil)
require.NoError(t, err)
dstObj = dstObjI.(*Object)
assert.Equal(t, "shortcut3.txt", dstObj.Remote())
dstHash, err = dstObj.Hash(ctx, hash.MD5)
require.NoError(t, err)
assert.Equal(t, srcHash, dstHash)
require.NoError(t, dstObj.Remove(ctx))
})
}
func (f *Fs) InternalTest(t *testing.T) {
// These tests all depend on each other so run them as nested tests
t.Run("DocumentImport", func(t *testing.T) {
@@ -282,6 +375,7 @@ func (f *Fs) InternalTest(t *testing.T) {
})
})
})
t.Run("Shortcuts", f.InternalTestShortcuts)
}
var _ fstests.InternalTester = (*Fs)(nil)

6
backend/dropbox/dropbox.go Normal file → Executable file
View File

@@ -225,7 +225,11 @@ func shouldRetry(err error) (bool, error) {
return false, err
}
baseErrString := errors.Cause(err).Error()
// handle any official Retry-After header from Dropbox's SDK first
// First check for Insufficient Space
if strings.Contains(baseErrString, "insufficient_space") {
return false, fserrors.FatalError(err)
}
// Then handle any official Retry-After header from Dropbox's SDK
switch e := err.(type) {
case auth.RateLimitAPIError:
if e.RateLimitError.RetryAfter > 0 {

View File

@@ -17,6 +17,7 @@ import (
// retryErrorCodes is a slice of error codes that we will retry
var retryErrorCodes = []int{
429, // Too Many Requests.
403, // Forbidden (may happen when request limit is exceeded)
500, // Internal Server Error
502, // Bad Gateway
503, // Service Unavailable
@@ -320,7 +321,7 @@ func (f *Fs) getUploadNode(ctx context.Context) (response *GetUploadNodeResponse
return response, err
}
func (f *Fs) uploadFile(ctx context.Context, in io.Reader, size int64, fileName, folderID, uploadID, node string) (response *http.Response, err error) {
func (f *Fs) uploadFile(ctx context.Context, in io.Reader, size int64, fileName, folderID, uploadID, node string, options ...fs.OpenOption) (response *http.Response, err error) {
// fs.Debugf(f, "Uploading File `%s`", fileName)
fileName = f.opt.Enc.FromStandardName(fileName)
@@ -338,6 +339,7 @@ func (f *Fs) uploadFile(ctx context.Context, in io.Reader, size int64, fileName,
NoResponse: true,
Body: in,
ContentLength: &size,
Options: options,
MultipartContentName: "file[]",
MultipartFileName: fileName,
MultipartParams: map[string][]string{

View File

@@ -23,11 +23,12 @@ import (
)
const (
rootID = "0"
apiBaseURL = "https://api.1fichier.com/v1"
minSleep = 334 * time.Millisecond // 3 API calls per second is recommended
maxSleep = 5 * time.Second
decayConstant = 2 // bigger for slower decay, exponential
rootID = "0"
apiBaseURL = "https://api.1fichier.com/v1"
minSleep = 400 * time.Millisecond // api is extremely rate limited now
maxSleep = 5 * time.Second
decayConstant = 2 // bigger for slower decay, exponential
attackConstant = 0 // start with max sleep
)
func init() {
@@ -185,7 +186,7 @@ func NewFs(name string, root string, config configmap.Mapper) (fs.Fs, error) {
name: name,
root: root,
opt: *opt,
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant), pacer.AttackConstant(attackConstant))),
baseClient: &http.Client{},
}
@@ -338,7 +339,7 @@ func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size
return nil, err
}
_, err = f.uploadFile(ctx, in, size, leaf, directoryID, nodeResponse.ID, nodeResponse.URL)
_, err = f.uploadFile(ctx, in, size, leaf, directoryID, nodeResponse.ID, nodeResponse.URL, options...)
if err != nil {
return nil, err
}

View File

@@ -799,10 +799,12 @@ func (f *ftpReadCloser) Close() error {
f.f.putFtpConnection(&f.c, nil)
}
// mask the error if it was caused by a premature close
// NB StatusAboutToSend is to work around a bug in pureftpd
// See: https://github.com/rclone/rclone/issues/3445#issuecomment-521654257
switch errX := err.(type) {
case *textproto.Error:
switch errX.Code {
case ftp.StatusTransfertAborted, ftp.StatusFileUnavailable:
case ftp.StatusTransfertAborted, ftp.StatusFileUnavailable, ftp.StatusAboutToSend:
err = nil
}
}

View File

@@ -242,6 +242,9 @@ Docs: https://cloud.google.com/storage/docs/bucket-policy-only
}, {
Value: "COLDLINE",
Help: "Coldline storage class",
}, {
Value: "ARCHIVE",
Help: "Archive storage class",
}, {
Value: "DURABLE_REDUCED_AVAILABILITY",
Help: "Durable reduced availability storage class",
@@ -555,7 +558,7 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
continue
}
remote = remote[len(prefix):]
isDirectory := strings.HasSuffix(remote, "/")
isDirectory := remote == "" || strings.HasSuffix(remote, "/")
if addBucket {
remote = path.Join(bucket, remote)
}
@@ -1063,6 +1066,33 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
ContentType: fs.MimeType(ctx, src),
Metadata: metadataFromModTime(modTime),
}
// Apply upload options
for _, option := range options {
key, value := option.Header()
lowerKey := strings.ToLower(key)
switch lowerKey {
case "":
// ignore
case "cache-control":
object.CacheControl = value
case "content-disposition":
object.ContentDisposition = value
case "content-encoding":
object.ContentEncoding = value
case "content-language":
object.ContentLanguage = value
case "content-type":
object.ContentType = value
default:
const googMetaPrefix = "x-goog-meta-"
if strings.HasPrefix(lowerKey, googMetaPrefix) {
metaKey := lowerKey[len(googMetaPrefix):]
object.Metadata[metaKey] = value
} else {
fs.Errorf(o, "Don't know how to set key %q on upload", key)
}
}
}
var newObject *storage.Object
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
insertObject := o.fs.svc.Objects.Insert(bucket, &object).Media(in, googleapi.ContentType("")).Name(object.Name)

View File

@@ -134,14 +134,20 @@ rclone mount needs to know the size of files in advance of reading
them, so setting this flag when using rclone mount is recommended if
you want to read the media.`,
Advanced: true,
}, {
Name: "start_year",
Default: 2000,
Help: `Year limits the photos to be downloaded to those which are uploaded after the given year`,
Advanced: true,
}},
})
}
// Options defines the configuration for this backend
type Options struct {
ReadOnly bool `config:"read_only"`
ReadSize bool `config:"read_size"`
ReadOnly bool `config:"read_only"`
ReadSize bool `config:"read_size"`
StartYear int `config:"start_year"`
}
// Fs represents a remote storage server
@@ -202,6 +208,11 @@ func (f *Fs) dirTime() time.Time {
return f.startTime
}
// startYear returns the start year
func (f *Fs) startYear() int {
return f.opt.StartYear
}
// retryErrorCodes is a slice of error codes that we will retry
var retryErrorCodes = []int{
429, // Too Many Requests.
@@ -224,6 +235,10 @@ func errorHandler(resp *http.Response) error {
if err != nil {
body = nil
}
// Google sends 404 messages as images so be prepared for that
if strings.HasPrefix(resp.Header.Get("Content-Type"), "image/") {
body = []byte("Image not found or broken")
}
var e = api.Error{
Details: api.ErrorDetails{
Code: resp.StatusCode,
@@ -943,8 +958,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// Upload the media item in exchange for an UploadToken
opts := rest.Opts{
Method: "POST",
Path: "/uploads",
Method: "POST",
Path: "/uploads",
Options: options,
ExtraHeaders: map[string]string{
"X-Goog-Upload-File-Name": fileName,
"X-Goog-Upload-Protocol": "raw",
@@ -1003,7 +1019,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// Add upload to internal storage
if pattern.isUpload {
o.fs.uploadedMu.Lock()
o.fs.uploaded.AddEntry(o)
o.fs.uploadedMu.Unlock()
}
return nil
}

View File

@@ -23,6 +23,7 @@ type lister interface {
listAlbums(ctx context.Context, shared bool) (all *albums, err error)
listUploads(ctx context.Context, dir string) (entries fs.DirEntries, err error)
dirTime() time.Time
startYear() int
}
// dirPattern describes a single directory pattern
@@ -222,11 +223,10 @@ func (ds dirPatterns) match(root string, itemPath string, isFile bool) (match []
return nil, "", nil
}
// Return the years from 2000 to today
// FIXME make configurable?
// Return the years from startYear to today
func years(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) {
currentYear := f.dirTime().Year()
for year := 2000; year <= currentYear; year++ {
for year := f.startYear(); year <= currentYear; year++ {
entries = append(entries, fs.NewDir(prefix+fmt.Sprint(year), f.dirTime()))
}
return entries, nil

View File

@@ -59,6 +59,11 @@ func (f *testLister) dirTime() time.Time {
return startTime
}
// mock startYear for testing
func (f *testLister) startYear() int {
return 2000
}
func TestPatternMatch(t *testing.T) {
for testNumber, test := range []struct {
// input

View File

@@ -166,8 +166,7 @@ func TestNewObject(t *testing.T) {
require.NoError(t, err)
tFile := fi.ModTime()
dt, ok := fstest.CheckTimeEqualWithPrecision(tObj, tFile, time.Second)
assert.True(t, ok, fmt.Sprintf("%s: Modification time difference too big |%s| > %s (%s vs %s) (precision %s)", o.Remote(), dt, time.Second, tObj, tFile, time.Second))
fstest.AssertTimeEqualWithPrecision(t, o.Remote(), tFile, tObj, time.Second)
// check object not found
o, err = f.NewObject(context.Background(), "not found.txt")

View File

@@ -164,6 +164,12 @@ type CustomerInfo struct {
IOSHash string `json:"ios_hash"`
}
// TrashResponse is returned when emptying the Trash
type TrashResponse struct {
Folders int64 `json:"folders"`
Files int64 `json:"files"`
}
// XML structures returned by the old API
// Flag is a hacky type for checking if an attribute is present

View File

@@ -140,6 +140,11 @@ func init() {
Help: "Files bigger than this will be cached on disk to calculate the MD5 if required.",
Default: fs.SizeSuffix(10 * 1024 * 1024),
Advanced: true,
}, {
Name: "trashed_only",
Help: "Only show files that are in the trash.\nThis will show trashed files in their original directory structure.",
Default: false,
Advanced: true,
}, {
Name: "hard_delete",
Help: "Delete files permanently rather than putting them into the trash.",
@@ -174,6 +179,7 @@ type Options struct {
Device string `config:"device"`
Mountpoint string `config:"mountpoint"`
MD5MemoryThreshold fs.SizeSuffix `config:"md5_memory_limit"`
TrashedOnly bool `config:"trashed_only"`
HardDelete bool `config:"hard_delete"`
Unlink bool `config:"unlink"`
UploadThreshold fs.SizeSuffix `config:"upload_resume_limit"`
@@ -519,6 +525,9 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
WriteMimeType: true,
}).Fill(f)
f.srv.SetErrorHandler(errorHandler)
if opt.TrashedOnly { // we cannot support showing Trashed Files when using ListR right now
f.features.ListR = nil
}
// Renew the token in the background
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
@@ -638,13 +647,13 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
return nil, errors.Wrap(err, "couldn't list files")
}
if result.Deleted {
if bool(result.Deleted) && !f.opt.TrashedOnly {
return nil, fs.ErrorDirNotFound
}
for i := range result.Folders {
item := &result.Folders[i]
if item.Deleted {
if !f.opt.TrashedOnly && bool(item.Deleted) {
continue
}
remote := path.Join(dir, f.opt.Enc.ToStandardName(item.Name))
@@ -654,8 +663,14 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
for i := range result.Files {
item := &result.Files[i]
if item.Deleted || item.State != "COMPLETED" {
continue
if f.opt.TrashedOnly {
if !item.Deleted || item.State != "COMPLETED" {
continue
}
} else {
if item.Deleted || item.State != "COMPLETED" {
continue
}
}
remote := path.Join(dir, f.opt.Enc.ToStandardName(item.Name))
o, err := f.newObjectWithInfo(ctx, remote, item)
@@ -1049,6 +1064,22 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
return usage, nil
}
// CleanUp empties the trash
func (f *Fs) CleanUp(ctx context.Context) error {
opts := rest.Opts{
Method: "POST",
Path: "files/v1/purge_trash",
}
var info api.TrashResponse
_, err := f.apiSrv.CallJSON(ctx, &opts, nil, &info)
if err != nil {
return errors.Wrap(err, "couldn't empty trash")
}
return nil
}
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.MD5)
@@ -1122,7 +1153,7 @@ func (o *Object) readMetaData(ctx context.Context, force bool) (err error) {
if err != nil {
return err
}
if info.Deleted {
if bool(info.Deleted) && !o.fs.opt.TrashedOnly {
return fs.ErrorObjectNotFound
}
return o.setMetaData(info)
@@ -1259,6 +1290,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
opts := rest.Opts{
Method: "POST",
Path: "files/v1/allocate",
Options: options,
ExtraHeaders: make(map[string]string),
}
fileDate := api.Time(src.ModTime(ctx)).APIString()
@@ -1355,6 +1387,7 @@ var (
_ fs.ListRer = (*Fs)(nil)
_ fs.PublicLinker = (*Fs)(nil)
_ fs.Abouter = (*Fs)(nil)
_ fs.CleanUpper = (*Fs)(nil)
_ fs.Object = (*Object)(nil)
_ fs.MimeTyper = (*Object)(nil)
)

View File

@@ -41,6 +41,7 @@ func init() {
Name: "local",
Description: "Local Disk",
NewFs: NewFs,
CommandHelp: commandHelp,
Options: []fs.Option{{
Name: "nounc",
Help: "Disable UNC (long path names) conversion on Windows",
@@ -697,6 +698,50 @@ func (f *Fs) Hashes() hash.Set {
return hash.Supported()
}
var commandHelp = []fs.CommandHelp{
{
Name: "noop",
Short: "A null operation for testing backend commands",
Long: `This is a test command which has some options
you can try to change the output.`,
Opts: map[string]string{
"echo": "echo the input arguments",
"error": "return an error based on option value",
},
},
}
// Command the backend to run a named command
//
// The command run is name
// args may be used to read arguments from
// opts may be used to read optional arguments from
//
// The result should be capable of being JSON encoded
// If it is a string or a []string it will be shown to the user
// otherwise it will be JSON encoded and shown to the user like that
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (interface{}, error) {
switch name {
case "noop":
if txt, ok := opt["error"]; ok {
if txt == "" {
txt = "unspecified error"
}
return nil, errors.New(txt)
}
if _, ok := opt["echo"]; ok {
out := map[string]interface{}{}
out["name"] = name
out["arg"] = arg
out["opt"] = opt
return out, nil
}
return nil, nil
default:
return nil, fs.ErrorCommandNotFound
}
}
// ------------------------------------------------------------
// Fs returns the parent Fs
@@ -723,8 +768,17 @@ func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
oldtime := o.modTime
oldsize := o.size
err := o.lstat()
var changed bool
if err != nil {
return "", errors.Wrap(err, "hash: failed to stat")
if os.IsNotExist(errors.Cause(err)) {
// If file not found then we assume any accumulated
// hashes are OK - this will error on Open
changed = true
} else {
return "", errors.Wrap(err, "hash: failed to stat")
}
} else {
changed = !o.modTime.Equal(oldtime) || oldsize != o.size
}
o.fs.objectHashesMu.Lock()
@@ -732,7 +786,7 @@ func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
hashValue, hashFound := o.hashes[r]
o.fs.objectHashesMu.Unlock()
if !o.modTime.Equal(oldtime) || oldsize != o.size || hashes == nil || !hashFound {
if changed || hashes == nil || !hashFound {
var in io.ReadCloser
if !o.translatedLink {
@@ -977,7 +1031,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
}
}
// Pre-allocate the file for performance reasons
err = preAllocate(src.Size(), f)
err = file.PreAllocate(src.Size(), f)
if err != nil {
fs.Debugf(o, "Failed to pre-allocate: %v", err)
}
@@ -1064,10 +1118,16 @@ func (f *Fs) OpenWriterAt(ctx context.Context, remote string, size int64) (fs.Wr
return nil, err
}
// Pre-allocate the file for performance reasons
err = preAllocate(size, out)
err = file.PreAllocate(size, out)
if err != nil {
fs.Debugf(o, "Failed to pre-allocate: %v", err)
}
// Set the file to be a sparse file (important on Windows)
err = file.SetSparse(out)
if err != nil {
fs.Debugf(o, "Failed to set sparse: %v", err)
}
return out, nil
}
@@ -1158,6 +1218,7 @@ var (
_ fs.PutStreamer = &Fs{}
_ fs.Mover = &Fs{}
_ fs.DirMover = &Fs{}
_ fs.Commander = &Fs{}
_ fs.OpenWriterAter = &Fs{}
_ fs.Object = &Object{}
)

View File

@@ -1,10 +0,0 @@
//+build !windows,!linux
package local
import "os"
// preAllocate the file for performance reasons
func preAllocate(size int64, out *os.File) error {
return nil
}

77
backend/onedrive/onedrive.go Normal file → Executable file
View File

@@ -184,6 +184,28 @@ func init() {
log.Fatalf("Failed to query available drives: %v", err)
}
// Also call /me/drive as sometimes /me/drives doesn't return it #4068
if opts.Path == "/me/drives" {
opts.Path = "/me/drive"
meDrive := driveResource{}
_, err := srv.CallJSON(ctx, &opts, nil, &meDrive)
if err != nil {
log.Fatalf("Failed to query available drives: %v", err)
}
found := false
for _, drive := range drives.Drives {
if drive.DriveID == meDrive.DriveID {
found = true
break
}
}
// add the me drive if not found already
if !found {
fs.Debugf(nil, "Adding %v to drives list from /me/drive", meDrive)
drives.Drives = append(drives.Drives, meDrive)
}
}
if len(drives.Drives) == 0 {
log.Fatalf("No drives found")
} else {
@@ -226,8 +248,9 @@ func init() {
Name: "chunk_size",
Help: `Chunk size to upload files with - must be multiple of 320k (327,680 bytes).
Above this size files will be chunked - must be multiple of 320k (327,680 bytes). Note
that the chunks will be buffered into memory.`,
Above this size files will be chunked - must be multiple of 320k (327,680 bytes) and
should not exceed 250M (262,144,000 bytes) else you may encounter \"Microsoft.SharePoint.Client.InvalidClientQueryException: The request message is too big.\"
Note that the chunks will be buffered into memory.`,
Default: defaultChunkSize,
Advanced: true,
}, {
@@ -251,6 +274,16 @@ delete OneNote files or otherwise want them to show up in directory
listing, set this option.`,
Default: false,
Advanced: true,
}, {
Name: "server_side_across_configs",
Default: false,
Help: `Allow server side operations (eg copy) to work across different onedrive configs.
This can be useful if you wish to do a server side copy between two
different Onedrives. Note that this isn't enabled by default
because it isn't easy to tell if it will work between any two
configurations.`,
Advanced: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
@@ -303,11 +336,12 @@ listing, set this option.`,
// Options defines the configuration for this backend
type Options struct {
ChunkSize fs.SizeSuffix `config:"chunk_size"`
DriveID string `config:"drive_id"`
DriveType string `config:"drive_type"`
ExposeOneNoteFiles bool `config:"expose_onenote_files"`
Enc encoder.MultiEncoder `config:"encoding"`
ChunkSize fs.SizeSuffix `config:"chunk_size"`
DriveID string `config:"drive_id"`
DriveType string `config:"drive_type"`
ExposeOneNoteFiles bool `config:"expose_onenote_files"`
ServerSideAcrossConfigs bool `config:"server_side_across_configs"`
Enc encoder.MultiEncoder `config:"encoding"`
}
// Fs represents a remote one drive
@@ -402,6 +436,8 @@ func shouldRetry(resp *http.Response, err error) (bool, error) {
fs.Debugf(nil, "Too many requests. Trying again in %d seconds.", retryAfter)
}
}
case 507: // Insufficient Storage
return false, fserrors.FatalError(err)
}
}
return retry || fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
@@ -576,6 +612,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
CaseInsensitive: true,
ReadMimeType: true,
CanHaveEmptyDirectories: true,
ServerSideAcrossConfigs: opt.ServerSideAcrossConfigs,
}).Fill(f)
f.srv.SetErrorHandler(errorHandler)
@@ -988,10 +1025,13 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
return nil, err
}
srcPath := srcObj.rootPath()
dstPath := f.rootPath(remote)
if strings.ToLower(srcPath) == strings.ToLower(dstPath) {
return nil, errors.Errorf("can't copy %q -> %q as are same name when lowercase", srcPath, dstPath)
// Check we aren't overwriting a file on the same remote
if srcObj.fs == f {
srcPath := srcObj.rootPath()
dstPath := f.rootPath(remote)
if strings.ToLower(srcPath) == strings.ToLower(dstPath) {
return nil, errors.Errorf("can't copy %q -> %q as are same name when lowercase", srcPath, dstPath)
}
}
// Create temporary object
@@ -1572,7 +1612,7 @@ func (o *Object) getPosition(ctx context.Context, url string) (pos int64, err er
}
// uploadFragment uploads a part
func (o *Object) uploadFragment(ctx context.Context, url string, start int64, totalSize int64, chunk io.ReadSeeker, chunkSize int64) (info *api.Item, err error) {
func (o *Object) uploadFragment(ctx context.Context, url string, start int64, totalSize int64, chunk io.ReadSeeker, chunkSize int64, options ...fs.OpenOption) (info *api.Item, err error) {
// var response api.UploadFragmentResponse
var resp *http.Response
var body []byte
@@ -1585,6 +1625,7 @@ func (o *Object) uploadFragment(ctx context.Context, url string, start int64, to
ContentLength: &toSend,
ContentRange: fmt.Sprintf("bytes %d-%d/%d", start+skip, start+chunkSize-1, totalSize),
Body: chunk,
Options: options,
}
_, _ = chunk.Seek(skip, io.SeekStart)
resp, err = o.fs.srv.Call(ctx, &opts)
@@ -1642,7 +1683,7 @@ func (o *Object) cancelUploadSession(ctx context.Context, url string) (err error
}
// uploadMultipart uploads a file using multipart upload
func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, size int64, modTime time.Time) (info *api.Item, err error) {
func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, size int64, modTime time.Time, options ...fs.OpenOption) (info *api.Item, err error) {
if size <= 0 {
return nil, errors.New("unknown-sized upload not supported")
}
@@ -1693,7 +1734,7 @@ func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, size int64,
}
seg := readers.NewRepeatableReader(io.LimitReader(in, n))
fs.Debugf(o, "Uploading segment %d/%d size %d", position, size, n)
info, err = o.uploadFragment(ctx, uploadURL, position, size, seg, n)
info, err = o.uploadFragment(ctx, uploadURL, position, size, seg, n, options...)
if err != nil {
return nil, err
}
@@ -1706,7 +1747,7 @@ func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, size int64,
// Update the content of a remote file within 4MB size in one single request
// This function will set modtime after uploading, which will create a new version for the remote file
func (o *Object) uploadSinglepart(ctx context.Context, in io.Reader, size int64, modTime time.Time) (info *api.Item, err error) {
func (o *Object) uploadSinglepart(ctx context.Context, in io.Reader, size int64, modTime time.Time, options ...fs.OpenOption) (info *api.Item, err error) {
if size < 0 || size > int64(fs.SizeSuffix(4*1024*1024)) {
return nil, errors.New("size passed into uploadSinglepart must be >= 0 and <= 4MiB")
}
@@ -1723,6 +1764,7 @@ func (o *Object) uploadSinglepart(ctx context.Context, in io.Reader, size int64,
Path: "/" + drive + "/items/" + trueDirID + ":/" + rest.URLPathEscape(o.fs.opt.Enc.FromStandardName(leaf)) + ":/content",
ContentLength: &size,
Body: in,
Options: options,
}
} else {
opts = rest.Opts{
@@ -1730,6 +1772,7 @@ func (o *Object) uploadSinglepart(ctx context.Context, in io.Reader, size int64,
Path: "/root:/" + rest.URLPathEscape(o.srvPath()) + ":/content",
ContentLength: &size,
Body: in,
Options: options,
}
}
@@ -1771,9 +1814,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
var info *api.Item
if size > 0 {
info, err = o.uploadMultipart(ctx, in, size, modTime)
info, err = o.uploadMultipart(ctx, in, size, modTime, options...)
} else if size == 0 {
info, err = o.uploadSinglepart(ctx, in, size, modTime)
info, err = o.uploadSinglepart(ctx, in, size, modTime, options...)
} else {
return errors.New("unknown-sized upload not supported")
}

View File

@@ -687,8 +687,9 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
Name: leaf,
}
opts := rest.Opts{
Method: "POST",
Path: "/upload/create_file.json",
Method: "POST",
Options: options,
Path: "/upload/create_file.json",
}
resp, err = o.fs.srv.CallJSON(ctx, &opts, &createFileData, &response)
return o.fs.shouldRetry(resp, err)
@@ -970,8 +971,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
openUploadData := openUpload{SessionID: o.fs.session.SessionID, FileID: o.id, Size: size}
// fs.Debugf(nil, "PreOpen: %#v", openUploadData)
opts := rest.Opts{
Method: "POST",
Path: "/upload/open_file_upload.json",
Method: "POST",
Options: options,
Path: "/upload/open_file_upload.json",
}
resp, err := o.fs.srv.CallJSON(ctx, &opts, &openUploadData, &openResponse)
return o.fs.shouldRetry(resp, err)

View File

@@ -41,8 +41,7 @@ const (
rcloneEncryptedClientSecret = "ej1OIF39VOQQ0PXaSdK9ztkLw3tdLNscW2157TKNQdQKkICR4uU7aFg4eFM"
minSleep = 10 * time.Millisecond
maxSleep = 2 * time.Second
decayConstant = 2 // bigger for slower decay, exponential
rootID = "d0" // ID of root folder is always this
decayConstant = 2 // bigger for slower decay, exponential
rootURL = "https://api.pcloud.com"
)
@@ -89,13 +88,19 @@ func init() {
Default: (encoder.Display |
encoder.EncodeBackSlash |
encoder.EncodeInvalidUtf8),
}, {
Name: "root_folder_id",
Help: "Fill in for rclone to use a non root folder as its starting point.",
Default: "d0",
Advanced: true,
}},
})
}
// Options defines the configuration for this backend
type Options struct {
Enc encoder.MultiEncoder `config:"encoding"`
Enc encoder.MultiEncoder `config:"encoding"`
RootFolderID string `config:"root_folder_id"`
}
// Fs represents a remote pcloud
@@ -265,7 +270,8 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
return err
})
// Get rootID
// Get rootFolderID
rootID := f.opt.RootFolderID
f.dirCache = dircache.New(root, rootID, f)
// Find the current root
@@ -1074,6 +1080,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
ContentLength: &size,
Parameters: url.Values{},
TransferEncoding: []string{"identity"}, // pcloud doesn't like chunked encoding
Options: options,
}
leaf = o.fs.opt.Enc.FromStandardName(leaf)
opts.Parameters.Set("filename", leaf)

View File

@@ -517,7 +517,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
return existingObj, existingObj.Update(ctx, in, src, options...)
case fs.ErrorObjectNotFound:
// Not found so create it
return f.PutUnchecked(ctx, in, src)
return f.PutUnchecked(ctx, in, src, options...)
default:
return nil, err
}
@@ -1002,6 +1002,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
Method: "POST",
Path: "/folder/uploadinfo",
Parameters: o.fs.baseParams(),
Options: options,
MultipartParams: url.Values{
"id": {directoryID},
},

View File

@@ -18,6 +18,7 @@ import (
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/dircache"
"github.com/rclone/rclone/lib/oauthutil"
@@ -34,7 +35,8 @@ type Fs struct {
client *putio.Client // client for making API calls to Put.io
pacer *fs.Pacer // To pace the API calls
dirCache *dircache.DirCache // Map of directory path to directory id
oAuthClient *http.Client
httpClient *http.Client // base http client
oAuthClient *http.Client // http client with oauth Authorization
}
// ------------------------------------------------------------
@@ -59,6 +61,12 @@ func (f *Fs) Features() *fs.Features {
return f.features
}
// parsePath parses a putio 'url'
func parsePath(path string) (root string) {
root = strings.Trim(path, "/")
return
}
// NewFs constructs an Fs from the path, container:path
func NewFs(name, root string, m configmap.Mapper) (f fs.Fs, err error) {
// defer log.Trace(name, "root=%v", root)("f=%+v, err=%v", &f, &err)
@@ -68,7 +76,9 @@ func NewFs(name, root string, m configmap.Mapper) (f fs.Fs, err error) {
if err != nil {
return nil, err
}
oAuthClient, _, err := oauthutil.NewClient(name, m, putioConfig)
root = parsePath(root)
httpClient := fshttp.NewClient(fs.Config)
oAuthClient, _, err := oauthutil.NewClientWithBaseClient(name, m, putioConfig, httpClient)
if err != nil {
return nil, errors.Wrap(err, "failed to configure putio")
}
@@ -78,6 +88,7 @@ func NewFs(name, root string, m configmap.Mapper) (f fs.Fs, err error) {
opt: *opt,
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
client: putio.NewClient(oAuthClient),
httpClient: httpClient,
oAuthClient: oAuthClient,
}
p.features = (&fs.Features{
@@ -253,7 +264,7 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
if err != nil {
return nil, err
}
loc, err := f.createUpload(ctx, leaf, size, directoryID, src.ModTime(ctx))
loc, err := f.createUpload(ctx, leaf, size, directoryID, src.ModTime(ctx), options)
if err != nil {
return nil, err
}
@@ -273,7 +284,7 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
return f.newObjectWithInfo(ctx, remote, entry)
}
func (f *Fs) createUpload(ctx context.Context, name string, size int64, parentID string, modTime time.Time) (location string, err error) {
func (f *Fs) createUpload(ctx context.Context, name string, size int64, parentID string, modTime time.Time, options []fs.OpenOption) (location string, err error) {
// defer log.Trace(f, "name=%v, size=%v, parentID=%v, modTime=%v", name, size, parentID, modTime.String())("location=%v, err=%v", location, &err)
err = f.pacer.Call(func() (bool, error) {
req, err := http.NewRequest("POST", "https://upload.put.io/files/", nil)
@@ -288,6 +299,7 @@ func (f *Fs) createUpload(ctx context.Context, name string, size int64, parentID
b64parentID := base64.StdEncoding.EncodeToString([]byte(parentID))
b64modifiedAt := base64.StdEncoding.EncodeToString([]byte(modTime.Format(time.RFC3339)))
req.Header.Set("upload-metadata", fmt.Sprintf("name %s,no-torrent %s,parent_id %s,updated-at %s", b64name, b64true, b64parentID, b64modifiedAt))
fs.OpenOptionAddHTTPHeaders(req.Header, options)
resp, err := f.oAuthClient.Do(req)
retry, err := shouldRetry(err)
if retry {

View File

@@ -241,7 +241,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
req.Header.Set(header, value)
}
// fs.Debugf(o, "opening file: id=%d", o.file.ID)
resp, err = http.DefaultClient.Do(req)
resp, err = o.fs.httpClient.Do(req)
return shouldRetry(err)
})
if perr, ok := err.(*putio.ErrorResponse); ok && perr.Response.StatusCode >= 400 && perr.Response.StatusCode <= 499 {

View File

@@ -864,6 +864,76 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
})
}
// cleanUpBucket removes all pending multipart uploads for a given bucket
func (f *Fs) cleanUpBucket(ctx context.Context, bucket string) (err error) {
fs.Infof(f, "cleaning bucket %q of pending multipart uploads older than 24 hours", bucket)
bucketInit, err := f.svc.Bucket(bucket, f.zone)
if err != nil {
return err
}
maxLimit := int(listLimitSize)
var marker *string
for {
req := qs.ListMultipartUploadsInput{
Limit: &maxLimit,
KeyMarker: marker,
}
var resp *qs.ListMultipartUploadsOutput
resp, err = bucketInit.ListMultipartUploads(&req)
if err != nil {
return errors.Wrap(err, "clean up bucket list multipart uploads")
}
for _, upload := range resp.Uploads {
if upload.Created != nil && upload.Key != nil && upload.UploadID != nil {
age := time.Since(*upload.Created)
if age > 24*time.Hour {
fs.Infof(f, "removing pending multipart upload for %q dated %v (%v ago)", *upload.Key, upload.Created, age)
req := qs.AbortMultipartUploadInput{
UploadID: upload.UploadID,
}
_, abortErr := bucketInit.AbortMultipartUpload(*upload.Key, &req)
if abortErr != nil {
err = errors.Wrapf(abortErr, "failed to remove multipart upload for %q", *upload.Key)
fs.Errorf(f, "%v", err)
}
} else {
fs.Debugf(f, "ignoring pending multipart upload for %q dated %v (%v ago)", *upload.Key, upload.Created, age)
}
}
}
if resp.HasMore != nil && !*resp.HasMore {
break
}
// Use NextMarker if set, otherwise use last Key
if resp.NextKeyMarker == nil || *resp.NextKeyMarker == "" {
fs.Errorf(f, "Expecting NextKeyMarker but didn't find one")
break
} else {
marker = resp.NextKeyMarker
}
}
return err
}
// CleanUp removes all pending multipart uploads
func (f *Fs) CleanUp(ctx context.Context) (err error) {
if f.rootBucket != "" {
return f.cleanUpBucket(ctx, f.rootBucket)
}
entries, err := f.listBuckets(ctx)
if err != nil {
return err
}
for _, entry := range entries {
cleanErr := f.cleanUpBucket(ctx, f.opt.Enc.FromStandardName(entry.Remote()))
if err != nil {
fs.Errorf(f, "Failed to cleanup bucket: %q", cleanErr)
err = cleanErr
}
}
return err
}
// readMetaData gets the metadata if it hasn't already been fetched
//
// it also sets the info
@@ -1090,9 +1160,10 @@ func (o *Object) MimeType(ctx context.Context) string {
// Check the interfaces are satisfied
var (
_ fs.Fs = &Fs{}
_ fs.Copier = &Fs{}
_ fs.Object = &Object{}
_ fs.ListRer = &Fs{}
_ fs.MimeTyper = &Object{}
_ fs.Fs = &Fs{}
_ fs.CleanUpper = &Fs{}
_ fs.Copier = &Fs{}
_ fs.Object = &Object{}
_ fs.ListRer = &Fs{}
_ fs.MimeTyper = &Object{}
)

View File

@@ -341,12 +341,17 @@ func (mu *multiUploader) abort() error {
}
// multiPartUpload upload a multiple object into QingStor
func (mu *multiUploader) multiPartUpload(firstBuf io.ReadSeeker) error {
var err error
//Initiate an multi-part upload
func (mu *multiUploader) multiPartUpload(firstBuf io.ReadSeeker) (err error) {
// Initiate an multi-part upload
if err = mu.initiate(); err != nil {
return err
}
defer func() {
// Abort the transfer if returning an error
if err != nil {
_ = mu.abort()
}
}()
ch := make(chan chunk, mu.cfg.concurrency)
for i := 0; i < mu.cfg.concurrency; i++ {
@@ -400,9 +405,5 @@ func (mu *multiUploader) multiPartUpload(firstBuf io.ReadSeeker) error {
close(ch)
mu.wg.Wait()
// Complete Multipart Upload
err = mu.complete()
if mu.getErr() != nil || err != nil {
_ = mu.abort()
}
return err
return mu.complete()
}

View File

@@ -641,7 +641,7 @@ isn't set then "acl" is used instead.`,
}, {
Name: "server_side_encryption",
Help: "The server-side encryption algorithm used when storing this object in S3.",
Provider: "AWS",
Provider: "AWS,Ceph,Minio",
Examples: []fs.OptionExample{{
Value: "",
Help: "None",
@@ -652,10 +652,22 @@ isn't set then "acl" is used instead.`,
Value: "aws:kms",
Help: "aws:kms",
}},
}, {
Name: "sse_customer_algorithm",
Help: "If using SSE-C, the server-side encryption algorithm used when storing this object in S3.",
Provider: "AWS,Ceph,Minio",
Advanced: true,
Examples: []fs.OptionExample{{
Value: "",
Help: "None",
}, {
Value: "AES256",
Help: "AES256",
}},
}, {
Name: "sse_kms_key_id",
Help: "If using KMS ID you must provide the ARN of Key.",
Provider: "AWS",
Provider: "AWS,Ceph,Minio",
Examples: []fs.OptionExample{{
Value: "",
Help: "None",
@@ -663,6 +675,24 @@ isn't set then "acl" is used instead.`,
Value: "arn:aws:kms:us-east-1:*",
Help: "arn:aws:kms:*",
}},
}, {
Name: "sse_customer_key",
Help: "If using SSE-C you must provide the secret encyption key used to encrypt/decrypt your data.",
Provider: "AWS,Ceph,Minio",
Advanced: true,
Examples: []fs.OptionExample{{
Value: "",
Help: "None",
}},
}, {
Name: "sse_customer_key_md5",
Help: "If using SSE-C you must provide the secret encryption key MD5 checksum.",
Provider: "AWS,Ceph,Minio",
Advanced: true,
Examples: []fs.OptionExample{{
Value: "",
Help: "None",
}},
}, {
Name: "storage_class",
Help: "The storage class to use when storing new objects in S3.",
@@ -754,8 +784,13 @@ The minimum is 0 and the maximum is 5GB.`,
Default: fs.SizeSuffix(maxSizeForCopy),
Advanced: true,
}, {
Name: "disable_checksum",
Help: "Don't store MD5 checksum with object metadata",
Name: "disable_checksum",
Help: `Don't store MD5 checksum with object metadata
Normally rclone will calculate the MD5 checksum of the input before
uploading it so it can add it to metadata on the object. This is great
for data integrity checking but can cause long delays for large files
to start uploading.`,
Default: false,
Advanced: true,
}, {
@@ -889,6 +924,9 @@ type Options struct {
BucketACL string `config:"bucket_acl"`
ServerSideEncryption string `config:"server_side_encryption"`
SSEKMSKeyID string `config:"sse_kms_key_id"`
SSECustomerAlgorithm string `config:"sse_customer_algorithm"`
SSECustomerKey string `config:"sse_customer_key"`
SSECustomerKeyMD5 string `config:"sse_customer_key_md5"`
StorageClass string `config:"storage_class"`
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
CopyCutoff fs.SizeSuffix `config:"copy_cutoff"`
@@ -908,19 +946,18 @@ type Options struct {
// Fs represents a remote s3 server
type Fs struct {
name string // the name of the remote
root string // root of the bucket - ignore all objects above this
opt Options // parsed options
features *fs.Features // optional features
c *s3.S3 // the connection to the s3 server
ses *session.Session // the s3 session
rootBucket string // bucket part of root (if any)
rootDirectory string // directory part of root (if any)
cache *bucket.Cache // cache for bucket creation status
pacer *fs.Pacer // To pace the API calls
srv *http.Client // a plain http client
poolMu sync.Mutex // mutex protecting memory pools map
pools map[int64]*pool.Pool // memory pools
name string // the name of the remote
root string // root of the bucket - ignore all objects above this
opt Options // parsed options
features *fs.Features // optional features
c *s3.S3 // the connection to the s3 server
ses *session.Session // the s3 session
rootBucket string // bucket part of root (if any)
rootDirectory string // directory part of root (if any)
cache *bucket.Cache // cache for bucket creation status
pacer *fs.Pacer // To pace the API calls
srv *http.Client // a plain http client
pool *pool.Pool // memory pool
}
// Object describes a s3 object
@@ -1039,6 +1076,12 @@ func s3Connection(opt *Options) (*s3.S3, *session.Session, error) {
def := defaults.Get()
def.Config.HTTPClient = lowTimeoutClient
// start a new AWS session
awsSession, err := session.NewSession()
if err != nil {
return nil, nil, errors.Wrap(err, "NewSession")
}
// first provider to supply a credential set "wins"
providers := []credentials.Provider{
// use static credentials if they're present (checked by provider)
@@ -1058,7 +1101,7 @@ func s3Connection(opt *Options) (*s3.S3, *session.Session, error) {
// Pick up IAM role in case we're on EC2
&ec2rolecreds.EC2RoleProvider{
Client: ec2metadata.New(session.New(), &aws.Config{
Client: ec2metadata.New(awsSession, &aws.Config{
HTTPClient: lowTimeoutClient,
}),
ExpiryWindow: 3 * time.Minute,
@@ -1093,7 +1136,7 @@ func s3Connection(opt *Options) (*s3.S3, *session.Session, error) {
opt.ForcePathStyle = false
}
awsConfig := aws.NewConfig().
WithMaxRetries(fs.Config.LowLevelRetries).
WithMaxRetries(0). // Rely on rclone's retry logic
WithCredentials(cred).
WithHTTPClient(fshttp.NewClient(fs.Config)).
WithS3ForcePathStyle(opt.ForcePathStyle).
@@ -1200,20 +1243,20 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
return nil, err
}
pc := fs.NewPacer(pacer.NewS3(pacer.MinSleep(minSleep)))
// Set pacer retries to 0 because we are relying on SDK retry mechanism.
// Setting it to 1 because in context of pacer it means 1 attempt.
pc.SetRetries(1)
f := &Fs{
name: name,
opt: *opt,
c: c,
ses: ses,
pacer: pc,
pacer: fs.NewPacer(pacer.NewS3(pacer.MinSleep(minSleep))),
cache: bucket.NewCache(),
srv: fshttp.NewClient(fs.Config),
pools: make(map[int64]*pool.Pool),
pool: pool.New(
time.Duration(opt.MemoryPoolFlushTime),
int(opt.ChunkSize),
opt.UploadConcurrency*fs.Config.Transfers,
opt.MemoryPoolUseMmap,
),
}
f.setRoot(root)
@@ -1463,7 +1506,7 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
continue
}
remote = remote[len(prefix):]
isDirectory := strings.HasSuffix(remote, "/")
isDirectory := remote == "" || strings.HasSuffix(remote, "/")
if addBucket {
remote = path.Join(bucket, remote)
}
@@ -1695,12 +1738,12 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) error {
if err == nil {
fs.Infof(f, "Bucket %q created with ACL %q", bucket, f.opt.BucketACL)
}
if err, ok := err.(awserr.Error); ok {
if err.Code() == "BucketAlreadyOwnedByYou" {
if awsErr, ok := err.(awserr.Error); ok {
if code := awsErr.Code(); code == "BucketAlreadyOwnedByYou" || code == "BucketAlreadyExists" {
err = nil
}
}
return nil
return err
}, func() (bool, error) {
return f.bucketExists(ctx, bucket)
})
@@ -1904,19 +1947,16 @@ func (f *Fs) Hashes() hash.Set {
}
func (f *Fs) getMemoryPool(size int64) *pool.Pool {
f.poolMu.Lock()
defer f.poolMu.Unlock()
_, ok := f.pools[size]
if !ok {
f.pools[size] = pool.New(
time.Duration(f.opt.MemoryPoolFlushTime),
int(f.opt.ChunkSize),
f.opt.UploadConcurrency*fs.Config.Transfers,
f.opt.MemoryPoolUseMmap,
)
if size == int64(f.opt.ChunkSize) {
return f.pool
}
return f.pools[size]
return pool.New(
time.Duration(f.opt.MemoryPoolFlushTime),
int(size),
f.opt.UploadConcurrency*fs.Config.Transfers,
f.opt.MemoryPoolUseMmap,
)
}
// ------------------------------------------------------------
@@ -2083,22 +2123,35 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
Bucket: &bucket,
Key: &bucketPath,
}
if o.fs.opt.SSECustomerAlgorithm != "" {
req.SSECustomerAlgorithm = &o.fs.opt.SSECustomerAlgorithm
}
if o.fs.opt.SSECustomerKey != "" {
req.SSECustomerKey = &o.fs.opt.SSECustomerKey
}
if o.fs.opt.SSECustomerKeyMD5 != "" {
req.SSECustomerKeyMD5 = &o.fs.opt.SSECustomerKeyMD5
}
httpReq, resp := o.fs.c.GetObjectRequest(&req)
fs.FixRangeOption(options, o.bytes)
for _, option := range options {
switch option.(type) {
case *fs.RangeOption, *fs.SeekOption:
_, value := option.Header()
req.Range = &value
case *fs.HTTPOption:
key, value := option.Header()
httpReq.HTTPRequest.Header.Add(key, value)
default:
if option.Mandatory() {
fs.Logf(o, "Unsupported mandatory option: %v", option)
}
}
}
var resp *s3.GetObjectOutput
err = o.fs.pacer.Call(func() (bool, error) {
var err error
resp, err = o.fs.c.GetObjectWithContext(ctx, &req)
httpReq.HTTPRequest = httpReq.HTTPRequest.WithContext(ctx)
err = httpReq.Send()
return o.fs.shouldRetry(err)
})
if err, ok := err.(awserr.RequestFailure); ok {
@@ -2200,9 +2253,16 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
tokens.Get()
buf := memPool.Get()
free := func() {
// return the memory and token
memPool.Put(buf)
tokens.Put()
}
// Fail fast, in case an errgroup managed function returns an error
// gCtx is cancelled. There is no point in uploading all the other parts.
if gCtx.Err() != nil {
free()
break
}
@@ -2211,10 +2271,12 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
n, err = readers.ReadFill(in, buf) // this can never return 0, nil
if err == io.EOF {
if n == 0 && partNum != 1 { // end if no data and if not first chunk
free()
break
}
finished = true
} else if err != nil {
free()
return errors.Wrap(err, "multipart upload failed to read source")
}
buf = buf[:n]
@@ -2223,6 +2285,7 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
fs.Debugf(o, "multipart upload starting chunk %d size %v offset %v/%v", partNum, fs.SizeSuffix(n), fs.SizeSuffix(off), fs.SizeSuffix(size))
off += int64(n)
g.Go(func() (err error) {
defer free()
partLength := int64(len(buf))
// create checksum of buffer for integrity checking
@@ -2260,11 +2323,6 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
return false, nil
})
// return the memory and token
memPool.Put(buf[:partSize])
tokens.Put()
if err != nil {
return errors.Wrap(err, "multipart upload failed to upload part")
}
@@ -2350,6 +2408,15 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
if o.fs.opt.ServerSideEncryption != "" {
req.ServerSideEncryption = &o.fs.opt.ServerSideEncryption
}
if o.fs.opt.SSECustomerAlgorithm != "" {
req.SSECustomerAlgorithm = &o.fs.opt.SSECustomerAlgorithm
}
if o.fs.opt.SSECustomerKey != "" {
req.SSECustomerKey = &o.fs.opt.SSECustomerKey
}
if o.fs.opt.SSECustomerKeyMD5 != "" {
req.SSECustomerKeyMD5 = &o.fs.opt.SSECustomerKeyMD5
}
if o.fs.opt.SSEKMSKeyID != "" {
req.SSEKMSKeyId = &o.fs.opt.SSEKMSKeyID
}
@@ -2396,6 +2463,18 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
httpReq.Header = headers
httpReq.ContentLength = size
for _, option := range options {
switch option.(type) {
case *fs.HTTPOption:
key, value := option.Header()
httpReq.Header.Add(key, value)
default:
if option.Mandatory() {
fs.Logf(o, "Unsupported mandatory option: %v", option)
}
}
}
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
resp, err := o.fs.srv.Do(httpReq)
if err != nil {

View File

@@ -0,0 +1,153 @@
package api
// Some api objects are duplicated with only small differences,
// it's because the returned JSON objects are very inconsistent between api calls
// AuthenticationRequest contains user credentials
type AuthenticationRequest struct {
Username string `json:"username"`
Password string `json:"password"`
}
// AuthenticationResult is returned by a call to the authentication api
type AuthenticationResult struct {
Token string `json:"token"`
Errors []string `json:"non_field_errors"`
}
// AccountInfo contains simple user properties
type AccountInfo struct {
Usage int64 `json:"usage"`
Total int64 `json:"total"`
Email string `json:"email"`
Name string `json:"name"`
}
// ServerInfo contains server information
type ServerInfo struct {
Version string `json:"version"`
}
// DefaultLibrary when none specified
type DefaultLibrary struct {
ID string `json:"repo_id"`
Exists bool `json:"exists"`
}
// CreateLibraryRequest contains the information needed to create a library
type CreateLibraryRequest struct {
Name string `json:"name"`
Description string `json:"desc"`
Password string `json:"passwd"`
}
// Library properties. Please note not all properties are going to be useful for rclone
type Library struct {
Encrypted bool `json:"encrypted"`
Owner string `json:"owner"`
ID string `json:"id"`
Size int `json:"size"`
Name string `json:"name"`
Modified int64 `json:"mtime"`
}
// CreateLibrary properties. Seafile is not consistent and returns different types for different API calls
type CreateLibrary struct {
ID string `json:"repo_id"`
Name string `json:"repo_name"`
}
// FileType is either "dir" or "file"
type FileType string
// File types
var (
FileTypeDir FileType = "dir"
FileTypeFile FileType = "file"
)
// FileDetail contains file properties (for older api v2.0)
type FileDetail struct {
ID string `json:"id"`
Type FileType `json:"type"`
Name string `json:"name"`
Size int64 `json:"size"`
Parent string `json:"parent_dir"`
Modified string `json:"last_modified"`
}
// DirEntries contains a list of DirEntry
type DirEntries struct {
Entries []DirEntry `json:"dirent_list"`
}
// DirEntry contains a directory entry
type DirEntry struct {
ID string `json:"id"`
Type FileType `json:"type"`
Name string `json:"name"`
Size int64 `json:"size"`
Path string `json:"parent_dir"`
Modified int64 `json:"mtime"`
}
// Operation is move, copy or rename
type Operation string
// Operations
var (
CopyFileOperation Operation = "copy"
MoveFileOperation Operation = "move"
RenameFileOperation Operation = "rename"
)
// FileOperationRequest is sent to the api to copy, move or rename a file
type FileOperationRequest struct {
Operation Operation `json:"operation"`
DestinationLibraryID string `json:"dst_repo"` // For copy/move operation
DestinationPath string `json:"dst_dir"` // For copy/move operation
NewName string `json:"newname"` // Only to be used by the rename operation
}
// FileInfo is returned by a server file copy/move/rename (new api v2.1)
type FileInfo struct {
Type string `json:"type"`
LibraryID string `json:"repo_id"`
Path string `json:"parent_dir"`
Name string `json:"obj_name"`
ID string `json:"obj_id"`
Size int64 `json:"size"`
}
// CreateDirRequest only contain an operation field
type CreateDirRequest struct {
Operation string `json:"operation"`
}
// DirectoryDetail contains the directory details specific to the getDirectoryDetails call
type DirectoryDetail struct {
ID string `json:"repo_id"`
Name string `json:"name"`
Path string `json:"path"`
}
// ShareLinkRequest contains the information needed to create or list shared links
type ShareLinkRequest struct {
LibraryID string `json:"repo_id"`
Path string `json:"path"`
}
// SharedLink contains the information returned by a call to shared link creation
type SharedLink struct {
Link string `json:"link"`
IsExpired bool `json:"is_expired"`
}
// BatchSourceDestRequest contains JSON parameters for sending a batch copy or move operation
type BatchSourceDestRequest struct {
SrcLibraryID string `json:"src_repo_id"`
SrcParentDir string `json:"src_parent_dir"`
SrcItems []string `json:"src_dirents"`
DstLibraryID string `json:"dst_repo_id"`
DstParentDir string `json:"dst_parent_dir"`
}

127
backend/seafile/object.go Normal file
View File

@@ -0,0 +1,127 @@
package seafile
import (
"context"
"io"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
)
// Object describes a seafile object (also commonly called a file)
type Object struct {
fs *Fs // what this object is part of
id string // internal ID of object
remote string // The remote path (full path containing library name if target at root)
pathInLibrary string // Path of the object without the library name
size int64 // size of the object
modTime time.Time // modification time of the object
libraryID string // Needed to download the file
}
// ==================== Interface fs.DirEntry ====================
// Return a string version
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.remote
}
// Remote returns the remote string
func (o *Object) Remote() string {
return o.remote
}
// ModTime returns last modified time
func (o *Object) ModTime(context.Context) time.Time {
return o.modTime
}
// Size returns the size of an object in bytes
func (o *Object) Size() int64 {
return o.size
}
// ==================== Interface fs.ObjectInfo ====================
// Fs returns the parent Fs
func (o *Object) Fs() fs.Info {
return o.fs
}
// Hash returns the selected checksum of the file
// If no checksum is available it returns ""
func (o *Object) Hash(ctx context.Context, ty hash.Type) (string, error) {
return "", hash.ErrUnsupported
}
// Storable says whether this object can be stored
func (o *Object) Storable() bool {
return true
}
// ==================== Interface fs.Object ====================
// SetModTime sets the metadata on the object to set the modification date
func (o *Object) SetModTime(ctx context.Context, t time.Time) error {
return fs.ErrorCantSetModTime
}
// Open opens the file for read. Call Close() on the returned io.ReadCloser
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) {
downloadLink, err := o.fs.getDownloadLink(ctx, o.libraryID, o.pathInLibrary)
if err != nil {
return nil, err
}
reader, err := o.fs.download(ctx, downloadLink, o.Size(), options...)
if err != nil {
return nil, err
}
return reader, nil
}
// Update in to the object with the modTime given of the given size
//
// When called from outside a Fs by rclone, src.Size() will always be >= 0.
// But for unknown-sized objects (indicated by src.Size() == -1), Upload should either
// return an error or update the object properly (rather than e.g. calling panic).
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
// The upload sometimes return a temporary 500 error
// We cannot use the pacer to retry uploading the file as the upload link is single use only
for retry := 0; retry <= 3; retry++ {
uploadLink, err := o.fs.getUploadLink(ctx, o.libraryID)
if err != nil {
return err
}
uploaded, err := o.fs.upload(ctx, in, uploadLink, o.pathInLibrary)
if err == ErrorInternalDuringUpload {
// This is a temporary error, try again with a new upload link
continue
}
if err != nil {
return err
}
// Set the properties from the upload back to the object
o.size = uploaded.Size
o.id = uploaded.ID
return nil
}
return ErrorInternalDuringUpload
}
// Remove this object
func (o *Object) Remove(ctx context.Context) error {
return o.fs.deleteFile(ctx, o.libraryID, o.pathInLibrary)
}
// ==================== Optional Interface fs.IDer ====================
// ID returns the ID of the Object if known, or "" if not
func (o *Object) ID() string {
return o.id
}

67
backend/seafile/pacer.go Normal file
View File

@@ -0,0 +1,67 @@
package seafile
import (
"fmt"
"net/url"
"sync"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/lib/pacer"
)
const (
minSleep = 100 * time.Millisecond
maxSleep = 10 * time.Second
decayConstant = 2 // bigger for slower decay, exponential
)
// Use only one pacer per server URL
var (
pacers map[string]*fs.Pacer
pacerMutex sync.Mutex
)
func init() {
pacers = make(map[string]*fs.Pacer, 0)
}
// getPacer returns the unique pacer for that remote URL
func getPacer(remote string) *fs.Pacer {
pacerMutex.Lock()
defer pacerMutex.Unlock()
remote = parseRemote(remote)
if existing, found := pacers[remote]; found {
return existing
}
pacers[remote] = fs.NewPacer(
pacer.NewDefault(
pacer.MinSleep(minSleep),
pacer.MaxSleep(maxSleep),
pacer.DecayConstant(decayConstant),
),
)
return pacers[remote]
}
// parseRemote formats a remote url into "hostname:port"
func parseRemote(remote string) string {
remoteURL, err := url.Parse(remote)
if err != nil {
// Return a default value in the very unlikely event we're not going to parse remote
fs.Infof(nil, "Cannot parse remote %s", remote)
return "default"
}
host := remoteURL.Hostname()
port := remoteURL.Port()
if port == "" {
if remoteURL.Scheme == "https" {
port = "443"
} else {
port = "80"
}
}
return fmt.Sprintf("%s:%s", host, port)
}

1247
backend/seafile/seafile.go Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,123 @@
package seafile
import (
"path"
"testing"
"github.com/stretchr/testify/assert"
)
type pathData struct {
configLibrary string // Library specified in the config
configRoot string // Root directory specified in the config
argumentPath string // Path given as an argument in the command line
expectedLibrary string
expectedPath string
}
// Test the method to split a library name and a path
// from a mix of configuration data and path command line argument
func TestSplitPath(t *testing.T) {
testData := []pathData{
pathData{
configLibrary: "",
configRoot: "",
argumentPath: "",
expectedLibrary: "",
expectedPath: "",
},
pathData{
configLibrary: "",
configRoot: "",
argumentPath: "Library",
expectedLibrary: "Library",
expectedPath: "",
},
pathData{
configLibrary: "",
configRoot: "",
argumentPath: path.Join("Library", "path", "to", "file"),
expectedLibrary: "Library",
expectedPath: path.Join("path", "to", "file"),
},
pathData{
configLibrary: "Library",
configRoot: "",
argumentPath: "",
expectedLibrary: "Library",
expectedPath: "",
},
pathData{
configLibrary: "Library",
configRoot: "",
argumentPath: "path",
expectedLibrary: "Library",
expectedPath: "path",
},
pathData{
configLibrary: "Library",
configRoot: "",
argumentPath: path.Join("path", "to", "file"),
expectedLibrary: "Library",
expectedPath: path.Join("path", "to", "file"),
},
pathData{
configLibrary: "Library",
configRoot: "root",
argumentPath: "",
expectedLibrary: "Library",
expectedPath: "root",
},
pathData{
configLibrary: "Library",
configRoot: path.Join("root", "path"),
argumentPath: "",
expectedLibrary: "Library",
expectedPath: path.Join("root", "path"),
},
pathData{
configLibrary: "Library",
configRoot: "root",
argumentPath: "path",
expectedLibrary: "Library",
expectedPath: path.Join("root", "path"),
},
pathData{
configLibrary: "Library",
configRoot: "root",
argumentPath: path.Join("path", "to", "file"),
expectedLibrary: "Library",
expectedPath: path.Join("root", "path", "to", "file"),
},
pathData{
configLibrary: "Library",
configRoot: path.Join("root", "path"),
argumentPath: path.Join("subpath", "to", "file"),
expectedLibrary: "Library",
expectedPath: path.Join("root", "path", "subpath", "to", "file"),
},
}
for _, test := range testData {
fs := &Fs{
libraryName: test.configLibrary,
rootDirectory: test.configRoot,
}
libraryName, path := fs.splitPath(test.argumentPath)
assert.Equal(t, test.expectedLibrary, libraryName)
assert.Equal(t, test.expectedPath, path)
}
}
func TestSplitPathIntoSlice(t *testing.T) {
testData := map[string][]string{
"1": {"1"},
"/1": {"1"},
"/1/": {"1"},
"1/2/3": {"1", "2", "3"},
}
for input, expected := range testData {
output := splitPath(input)
assert.Equal(t, expected, output)
}
}

View File

@@ -0,0 +1,17 @@
// Test Seafile filesystem interface
package seafile_test
import (
"testing"
"github.com/rclone/rclone/backend/seafile"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestSeafile:",
NilObject: (*seafile.Object)(nil),
})
}

1083
backend/seafile/webapi.go Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -1102,19 +1102,18 @@ func (o *Object) stat() error {
//
// it also updates the info field
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
if !o.fs.opt.SetModTime {
return nil
if o.fs.opt.SetModTime {
c, err := o.fs.getSftpConnection()
if err != nil {
return errors.Wrap(err, "SetModTime")
}
err = c.sftpClient.Chtimes(o.path(), modTime, modTime)
o.fs.putSftpConnection(&c, err)
if err != nil {
return errors.Wrap(err, "SetModTime failed")
}
}
c, err := o.fs.getSftpConnection()
if err != nil {
return errors.Wrap(err, "SetModTime")
}
err = c.sftpClient.Chtimes(o.path(), modTime, modTime)
o.fs.putSftpConnection(&c, err)
if err != nil {
return errors.Wrap(err, "SetModTime failed")
}
err = o.stat()
err := o.stat()
if err != nil {
return errors.Wrap(err, "SetModTime stat failed")
}

View File

@@ -1429,8 +1429,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
var resp *http.Response
var info api.UploadSpecification
opts := rest.Opts{
Method: "POST",
Path: "/Items(" + directoryID + ")/Upload2",
Method: "POST",
Path: "/Items(" + directoryID + ")/Upload2",
Options: options,
}
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.CallJSON(ctx, &opts, &req, &info)

View File

@@ -733,7 +733,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
return existingObj, existingObj.Update(ctx, in, src, options...)
case fs.ErrorObjectNotFound:
// Not found so create it
return f.PutUnchecked(ctx, in, src)
return f.PutUnchecked(ctx, in, src, options...)
default:
return nil, err
}
@@ -1320,6 +1320,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
RootURL: o.id,
Path: "/data",
NoResponse: true,
Options: options,
Body: in,
}
if size >= 0 {

View File

@@ -1285,6 +1285,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
m.SetModTime(modTime)
contentType := fs.MimeType(ctx, src)
headers := m.ObjectHeaders()
fs.OpenOptionAddHeaders(options, headers)
uniquePrefix := ""
if size > int64(o.fs.opt.ChunkSize) || (size == -1 && !o.fs.opt.NoChunk) {
uniquePrefix, err = o.updateChunks(in, headers, size, contentType)

684
backend/tardigrade/fs.go Normal file
View File

@@ -0,0 +1,684 @@
// +build go1.13,!plan9
// Package tardigrade provides an interface to Tardigrade decentralized object storage.
package tardigrade
import (
"context"
"fmt"
"io"
"log"
"path"
"strings"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/bucket"
"golang.org/x/text/unicode/norm"
"storj.io/uplink"
)
const (
existingProvider = "existing"
newProvider = "new"
)
var satMap = map[string]string{
"us-central-1.tardigrade.io": "12EayRS2V1kEsWESU9QMRseFhdxYxKicsiFmxrsLZHeLUtdps3S@us-central-1.tardigrade.io:7777",
"europe-west-1.tardigrade.io": "12L9ZFwhzVpuEKMUNUqkaTLGzwY9G24tbiigLiXpmZWKwmcNDDs@europe-west-1.tardigrade.io:7777",
"asia-east-1.tardigrade.io": "121RTSDpyNZVcEU84Ticf2L1ntiuUimbWgfATz21tuvgk3vzoA6@asia-east-1.tardigrade.io:7777",
}
// Register with Fs
func init() {
fs.Register(&fs.RegInfo{
Name: "tardigrade",
Description: "Tardigrade Decentralized Cloud Storage",
NewFs: NewFs,
Config: func(name string, configMapper configmap.Mapper) {
provider, _ := configMapper.Get(fs.ConfigProvider)
config.FileDeleteKey(name, fs.ConfigProvider)
if provider == newProvider {
satelliteString, _ := configMapper.Get("satellite_address")
apiKey, _ := configMapper.Get("api_key")
passphrase, _ := configMapper.Get("passphrase")
// satelliteString contains always default and passphrase can be empty
if apiKey == "" {
return
}
satellite, found := satMap[satelliteString]
if !found {
satellite = satelliteString
}
access, err := uplink.RequestAccessWithPassphrase(context.TODO(), satellite, apiKey, passphrase)
if err != nil {
log.Fatalf("Couldn't create access grant: %v", err)
}
serialziedAccess, err := access.Serialize()
if err != nil {
log.Fatalf("Couldn't serialize access grant: %v", err)
}
configMapper.Set("satellite_address", satellite)
configMapper.Set("access_grant", serialziedAccess)
} else if provider == existingProvider {
config.FileDeleteKey(name, "satellite_address")
config.FileDeleteKey(name, "api_key")
config.FileDeleteKey(name, "passphrase")
} else {
log.Fatalf("Invalid provider type: %s", provider)
}
},
Options: []fs.Option{
{
Name: fs.ConfigProvider,
Help: "Choose an authentication method.",
Required: true,
Default: existingProvider,
Examples: []fs.OptionExample{{
Value: "existing",
Help: "Use an existing access grant.",
}, {
Value: newProvider,
Help: "Create a new access grant from satellite address, API key, and passphrase.",
},
}},
{
Name: "access_grant",
Help: "Access Grant.",
Required: false,
Provider: "existing",
},
{
Name: "satellite_address",
Help: "Satellite Address. Custom satellite address should match the format: <nodeid>@<address>:<port>.",
Required: false,
Provider: newProvider,
Default: "us-central-1.tardigrade.io",
Examples: []fs.OptionExample{{
Value: "us-central-1.tardigrade.io",
Help: "US Central 1",
}, {
Value: "europe-west-1.tardigrade.io",
Help: "Europe West 1",
}, {
Value: "asia-east-1.tardigrade.io",
Help: "Asia East 1",
},
},
},
{
Name: "api_key",
Help: "API Key.",
Required: false,
Provider: newProvider,
},
{
Name: "passphrase",
Help: "Encryption Passphrase. To access existing objects enter passphrase used for uploading.",
Required: false,
Provider: newProvider,
},
},
})
}
// Options defines the configuration for this backend
type Options struct {
Access string `config:"access_grant"`
SatelliteAddress string `config:"satellite_address"`
APIKey string `config:"api_key"`
Passphrase string `config:"passphrase"`
}
// Fs represents a remote to Tardigrade
type Fs struct {
name string // the name of the remote
root string // root of the filesystem
opts Options // parsed options
features *fs.Features // optional features
access *uplink.Access // parsed scope
project *uplink.Project // project client
}
// Check the interfaces are satisfied.
var (
_ fs.Fs = &Fs{}
_ fs.ListRer = &Fs{}
_ fs.PutStreamer = &Fs{}
)
// NewFs creates a filesystem backed by Tardigrade.
func NewFs(name, root string, m configmap.Mapper) (_ fs.Fs, err error) {
ctx := context.Background()
// Setup filesystem and connection to Tardigrade
root = norm.NFC.String(root)
root = strings.Trim(root, "/")
f := &Fs{
name: name,
root: root,
}
// Parse config into Options struct
err = configstruct.Set(m, &f.opts)
if err != nil {
return nil, err
}
// Parse access
var access *uplink.Access
if f.opts.Access != "" {
access, err = uplink.ParseAccess(f.opts.Access)
if err != nil {
return nil, errors.Wrap(err, "tardigrade: access")
}
}
if access == nil && f.opts.SatelliteAddress != "" && f.opts.APIKey != "" && f.opts.Passphrase != "" {
access, err = uplink.RequestAccessWithPassphrase(ctx, f.opts.SatelliteAddress, f.opts.APIKey, f.opts.Passphrase)
if err != nil {
return nil, errors.Wrap(err, "tardigrade: access")
}
serializedAccess, err := access.Serialize()
if err != nil {
return nil, errors.Wrap(err, "tardigrade: access")
}
err = config.SetValueAndSave(f.name, "access_grant", serializedAccess)
if err != nil {
return nil, errors.Wrap(err, "tardigrade: access")
}
}
if access == nil {
return nil, errors.New("access not found")
}
f.access = access
f.features = (&fs.Features{
BucketBased: true,
BucketBasedRootOK: true,
}).Fill(f)
project, err := f.connect(ctx)
if err != nil {
return nil, err
}
f.project = project
// Root validation needs to check the following: If a bucket path is
// specified and exists, then the object must be a directory.
//
// NOTE: At this point this must return the filesystem object we've
// created so far even if there is an error.
if root != "" {
bucketName, bucketPath := bucket.Split(root)
if bucketName != "" && bucketPath != "" {
_, err = project.StatBucket(ctx, bucketName)
if err != nil {
return f, errors.Wrap(err, "tardigrade: bucket")
}
object, err := project.StatObject(ctx, bucketName, bucketPath)
if err == nil {
if !object.IsPrefix {
// If the root is actually a file we
// need to return the *parent*
// directory of the root instead and an
// error that the original root
// requested is a file.
newRoot := path.Dir(f.root)
if newRoot == "." {
newRoot = ""
}
f.root = newRoot
return f, fs.ErrorIsFile
}
}
}
}
return f, nil
}
// connect opens a connection to Tardigrade.
func (f *Fs) connect(ctx context.Context) (project *uplink.Project, err error) {
fs.Debugf(f, "connecting...")
defer fs.Debugf(f, "connected: %+v", err)
cfg := uplink.Config{}
project, err = cfg.OpenProject(ctx, f.access)
if err != nil {
return nil, errors.Wrap(err, "tardigrade: project")
}
return
}
// absolute computes the absolute bucket name and path from the filesystem root
// and the relative path provided.
func (f *Fs) absolute(relative string) (bucketName, bucketPath string) {
bn, bp := bucket.Split(path.Join(f.root, relative))
// NOTE: Technically libuplink does not care about the encoding. It is
// happy to work with them as opaque byte sequences. However, rclone
// has a test that requires two paths with the same normalized form
// (but different un-normalized forms) to point to the same file. This
// means we have to normalize before we interact with libuplink.
return norm.NFC.String(bn), norm.NFC.String(bp)
}
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string {
return f.name
}
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
return f.root
}
// String returns a description of the FS
func (f *Fs) String() string {
return fmt.Sprintf("FS sj://%s", f.root)
}
// Precision of the ModTimes in this Fs
func (f *Fs) Precision() time.Duration {
return time.Nanosecond
}
// Hashes returns the supported hash types of the filesystem.
func (f *Fs) Hashes() hash.Set {
return hash.NewHashSet()
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// List the objects and directories in relative into entries. The entries can
// be returned in any order but should be for a complete directory.
//
// relative should be "" to list the root, and should not have trailing
// slashes.
//
// This should return fs.ErrDirNotFound if the directory isn't found.
func (f *Fs) List(ctx context.Context, relative string) (entries fs.DirEntries, err error) {
fs.Debugf(f, "ls ./%s", relative)
bucketName, bucketPath := f.absolute(relative)
defer func() {
if errors.Is(err, uplink.ErrBucketNotFound) {
err = fs.ErrorDirNotFound
}
}()
if bucketName == "" {
if bucketPath != "" {
return nil, fs.ErrorListBucketRequired
}
return f.listBuckets(ctx)
}
return f.listObjects(ctx, relative, bucketName, bucketPath)
}
func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error) {
fs.Debugf(f, "BKT ls")
buckets := f.project.ListBuckets(ctx, nil)
for buckets.Next() {
bucket := buckets.Item()
entries = append(entries, fs.NewDir(bucket.Name, bucket.Created))
}
return entries, buckets.Err()
}
// newDirEntry creates a directory entry from an uplink object.
//
// NOTE: Getting the exact behavior required by rclone is somewhat tricky. The
// path manipulation here is necessary to cover all the different ways the
// filesystem and object could be initialized and combined.
func (f *Fs) newDirEntry(relative, prefix string, object *uplink.Object) fs.DirEntry {
if object.IsPrefix {
// . The entry must include the relative path as its prefix. Depending on
// | what is being listed and how the filesystem root was initialized the
// | relative path may be empty (and so we use path joining here to ensure
// | we don't end up with an empty path segment).
// |
// | . Remove the prefix used during listing.
// | |
// | | . Remove the trailing slash.
// | | |
// v v v
return fs.NewDir(path.Join(relative, object.Key[len(prefix):len(object.Key)-1]), object.System.Created)
}
return newObjectFromUplink(f, relative, object)
}
func (f *Fs) listObjects(ctx context.Context, relative, bucketName, bucketPath string) (entries fs.DirEntries, err error) {
fs.Debugf(f, "OBJ ls ./%s (%q, %q)", relative, bucketName, bucketPath)
opts := &uplink.ListObjectsOptions{
Prefix: newPrefix(bucketPath),
System: true,
Custom: true,
}
fs.Debugf(f, "opts %+v", opts)
objects := f.project.ListObjects(ctx, bucketName, opts)
for objects.Next() {
entries = append(entries, f.newDirEntry(relative, opts.Prefix, objects.Item()))
}
err = objects.Err()
if err != nil {
return nil, err
}
return entries, nil
}
// ListR lists the objects and directories of the Fs starting from dir
// recursively into out.
//
// relative should be "" to start from the root, and should not have trailing
// slashes.
//
// This should return ErrDirNotFound if the directory isn't found.
//
// It should call callback for each tranche of entries read. These need not be
// returned in any particular order. If callback returns an error then the
// listing will stop immediately.
//
// Don't implement this unless you have a more efficient way of listing
// recursively that doing a directory traversal.
func (f *Fs) ListR(ctx context.Context, relative string, callback fs.ListRCallback) (err error) {
fs.Debugf(f, "ls -R ./%s", relative)
bucketName, bucketPath := f.absolute(relative)
defer func() {
if errors.Is(err, uplink.ErrBucketNotFound) {
err = fs.ErrorDirNotFound
}
}()
if bucketName == "" {
if bucketPath != "" {
return fs.ErrorListBucketRequired
}
return f.listBucketsR(ctx, callback)
}
return f.listObjectsR(ctx, relative, bucketName, bucketPath, callback)
}
func (f *Fs) listBucketsR(ctx context.Context, callback fs.ListRCallback) (err error) {
fs.Debugf(f, "BKT ls -R")
buckets := f.project.ListBuckets(ctx, nil)
for buckets.Next() {
bucket := buckets.Item()
err = f.listObjectsR(ctx, bucket.Name, bucket.Name, "", callback)
if err != nil {
return err
}
}
return buckets.Err()
}
func (f *Fs) listObjectsR(ctx context.Context, relative, bucketName, bucketPath string, callback fs.ListRCallback) (err error) {
fs.Debugf(f, "OBJ ls -R ./%s (%q, %q)", relative, bucketName, bucketPath)
opts := &uplink.ListObjectsOptions{
Prefix: newPrefix(bucketPath),
Recursive: true,
System: true,
Custom: true,
}
objects := f.project.ListObjects(ctx, bucketName, opts)
for objects.Next() {
object := objects.Item()
err = callback(fs.DirEntries{f.newDirEntry(relative, opts.Prefix, object)})
if err != nil {
return err
}
}
err = objects.Err()
if err != nil {
return err
}
return nil
}
// NewObject finds the Object at relative. If it can't be found it returns the
// error ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, relative string) (_ fs.Object, err error) {
fs.Debugf(f, "stat ./%s", relative)
bucketName, bucketPath := f.absolute(relative)
object, err := f.project.StatObject(ctx, bucketName, bucketPath)
if err != nil {
fs.Debugf(f, "err: %+v", err)
if errors.Is(err, uplink.ErrObjectNotFound) {
return nil, fs.ErrorObjectNotFound
}
return nil, err
}
return newObjectFromUplink(f, relative, object), nil
}
// Put in to the remote path with the modTime given of the given size
//
// When called from outside a Fs by rclone, src.Size() will always be >= 0.
// But for unknown-sized objects (indicated by src.Size() == -1), Put should
// either return an error or upload it properly (rather than e.g. calling
// panic).
//
// May create the object even if it returns an error - if so will return the
// object and the error, otherwise will return nil and the error
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (_ fs.Object, err error) {
fs.Debugf(f, "cp input ./%s # %+v %d", src.Remote(), options, src.Size())
// Reject options we don't support.
for _, option := range options {
if option.Mandatory() {
fs.Errorf(f, "Unsupported mandatory option: %v", option)
return nil, errors.New("unsupported mandatory option")
}
}
bucketName, bucketPath := f.absolute(src.Remote())
upload, err := f.project.UploadObject(ctx, bucketName, bucketPath, nil)
if err != nil {
return nil, err
}
defer func() {
if err != nil {
aerr := upload.Abort()
if aerr != nil {
fs.Errorf(f, "cp input ./%s %+v: %+v", src.Remote(), options, aerr)
}
}
}()
err = upload.SetCustomMetadata(ctx, uplink.CustomMetadata{
"rclone:mtime": src.ModTime(ctx).Format(time.RFC3339Nano),
})
if err != nil {
return nil, err
}
_, err = io.Copy(upload, in)
if err != nil {
err = fserrors.RetryError(err)
fs.Errorf(f, "cp input ./%s %+v: %+v\n", src.Remote(), options, err)
return nil, err
}
err = upload.Commit()
if err != nil {
if errors.Is(err, uplink.ErrBucketNotFound) {
// Rclone assumes the backend will create the bucket if not existing yet.
// Here we create the bucket and return a retry error for rclone to retry the upload.
_, err = f.project.EnsureBucket(ctx, bucketName)
if err != nil {
return nil, err
}
err = fserrors.RetryError(errors.New("bucket was not available, now created, the upload must be retried"))
}
return nil, err
}
return newObjectFromUplink(f, "", upload.Info()), nil
}
// PutStream uploads to the remote path with the modTime given of indeterminate
// size.
//
// May create the object even if it returns an error - if so will return the
// object and the error, otherwise will return nil and the error.
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (_ fs.Object, err error) {
return f.Put(ctx, in, src, options...)
}
// Mkdir makes the directory (container, bucket)
//
// Shouldn't return an error if it already exists
func (f *Fs) Mkdir(ctx context.Context, relative string) (err error) {
fs.Debugf(f, "mkdir -p ./%s", relative)
bucketName, _ := f.absolute(relative)
_, err = f.project.EnsureBucket(ctx, bucketName)
return err
}
// Rmdir removes the directory (container, bucket)
//
// NOTE: Despite code documentation to the contrary, this method should not
// return an error if the directory does not exist.
func (f *Fs) Rmdir(ctx context.Context, relative string) (err error) {
fs.Debugf(f, "rmdir ./%s", relative)
bucketName, bucketPath := f.absolute(relative)
if bucketPath != "" {
// If we can successfully stat it, then it is an object (and not a prefix).
_, err := f.project.StatObject(ctx, bucketName, bucketPath)
if err != nil {
if errors.Is(err, uplink.ErrObjectNotFound) {
// At this point we know it is not an object,
// but we don't know if it is a prefix for one.
//
// We check this by doing a listing and if we
// get any results back, then we know this is a
// valid prefix (which implies the directory is
// not empty).
opts := &uplink.ListObjectsOptions{
Prefix: newPrefix(bucketPath),
System: true,
Custom: true,
}
objects := f.project.ListObjects(ctx, bucketName, opts)
if objects.Next() {
return fs.ErrorDirectoryNotEmpty
}
return objects.Err()
}
return err
}
return fs.ErrorIsFile
}
_, err = f.project.DeleteBucket(ctx, bucketName)
if err != nil {
if errors.Is(err, uplink.ErrBucketNotFound) {
return fs.ErrorDirNotFound
}
if errors.Is(err, uplink.ErrBucketNotEmpty) {
return fs.ErrorDirectoryNotEmpty
}
return err
}
return nil
}
// newPrefix returns a new prefix for listing conforming to the libuplink
// requirements. In particular, libuplink requires a trailing slash for
// listings, but rclone does not always provide one. Further, depending on how
// the path was initially path normalization may have removed it (e.g. a
// trailing slash from the CLI is removed before it ever get's to the backend
// code).
func newPrefix(prefix string) string {
if prefix == "" {
return prefix
}
if prefix[len(prefix)-1] == '/' {
return prefix
}
return prefix + "/"
}

View File

@@ -0,0 +1,204 @@
// +build go1.13,!plan9
package tardigrade
import (
"context"
"io"
"path"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/bucket"
"golang.org/x/text/unicode/norm"
"storj.io/uplink"
)
// Object describes a Tardigrade object
type Object struct {
fs *Fs
absolute string
size int64
created time.Time
modified time.Time
}
// Check the interfaces are satisfied.
var _ fs.Object = &Object{}
// newObjectFromUplink creates a new object from a Tardigrade uplink object.
func newObjectFromUplink(f *Fs, relative string, object *uplink.Object) *Object {
// Attempt to use the modified time from the metadata. Otherwise
// fallback to the server time.
modified := object.System.Created
if modifiedStr, ok := object.Custom["rclone:mtime"]; ok {
var err error
modified, err = time.Parse(time.RFC3339Nano, modifiedStr)
if err != nil {
modified = object.System.Created
}
}
bucketName, _ := bucket.Split(path.Join(f.root, relative))
return &Object{
fs: f,
absolute: norm.NFC.String(bucketName + "/" + object.Key),
size: object.System.ContentLength,
created: object.System.Created,
modified: modified,
}
}
// String returns a description of the Object
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.Remote()
}
// Remote returns the remote path
func (o *Object) Remote() string {
// It is possible that we have an empty root (meaning the filesystem is
// rooted at the project level). In this case the relative path is just
// the full absolute path to the object (including the bucket name).
if o.fs.root == "" {
return o.absolute
}
// At this point we know that the filesystem itself is at least a
// bucket name (and possibly a prefix path).
//
// . This is necessary to remove the slash.
// |
// v
return o.absolute[len(o.fs.root)+1:]
}
// ModTime returns the modification date of the file
// It should return a best guess if one isn't available
func (o *Object) ModTime(ctx context.Context) time.Time {
return o.modified
}
// Size returns the size of the file
func (o *Object) Size() int64 {
return o.size
}
// Fs returns read only access to the Fs that this object is part of
func (o *Object) Fs() fs.Info {
return o.fs
}
// Hash returns the selected checksum of the file
// If no checksum is available it returns ""
func (o *Object) Hash(ctx context.Context, ty hash.Type) (_ string, err error) {
fs.Debugf(o, "%s", ty)
return "", hash.ErrUnsupported
}
// Storable says whether this object can be stored
func (o *Object) Storable() bool {
return true
}
// SetModTime sets the metadata on the object to set the modification date
func (o *Object) SetModTime(ctx context.Context, t time.Time) (err error) {
fs.Debugf(o, "touch -d %q sj://%s", t, o.absolute)
return fs.ErrorCantSetModTime
}
// Open opens the file for read. Call Close() on the returned io.ReadCloser
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (_ io.ReadCloser, err error) {
fs.Debugf(o, "cat sj://%s # %+v", o.absolute, options)
bucketName, bucketPath := bucket.Split(o.absolute)
// Convert the semantics of HTTP range headers to an offset and length
// that libuplink can use.
var (
offset int64 = 0
length int64 = -1
)
for _, option := range options {
switch opt := option.(type) {
case *fs.RangeOption:
s := opt.Start >= 0
e := opt.End >= 0
switch {
case s && e:
offset = opt.Start
length = (opt.End + 1) - opt.Start
case s && !e:
offset = opt.Start
case !s && e:
object, err := o.fs.project.StatObject(ctx, bucketName, bucketPath)
if err != nil {
return nil, err
}
offset = object.System.ContentLength - opt.End
length = opt.End
}
case *fs.SeekOption:
offset = opt.Offset
default:
if option.Mandatory() {
fs.Errorf(o, "Unsupported mandatory option: %v", option)
return nil, errors.New("unsupported mandatory option")
}
}
}
fs.Debugf(o, "range %d + %d", offset, length)
return o.fs.project.DownloadObject(ctx, bucketName, bucketPath, &uplink.DownloadOptions{
Offset: offset,
Length: length,
})
}
// Update in to the object with the modTime given of the given size
//
// When called from outside a Fs by rclone, src.Size() will always be >= 0.
// But for unknown-sized objects (indicated by src.Size() == -1), Upload should either
// return an error or update the object properly (rather than e.g. calling panic).
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
fs.Debugf(o, "cp input ./%s %+v", src.Remote(), options)
oNew, err := o.fs.Put(ctx, in, src, options...)
if err == nil {
*o = *(oNew.(*Object))
}
return err
}
// Remove this object.
func (o *Object) Remove(ctx context.Context) (err error) {
fs.Debugf(o, "rm sj://%s", o.absolute)
bucketName, bucketPath := bucket.Split(o.absolute)
_, err = o.fs.project.DeleteObject(ctx, bucketName, bucketPath)
return err
}

View File

@@ -0,0 +1,19 @@
// +build go1.13,!plan9
// Test Tardigrade filesystem interface
package tardigrade_test
import (
"testing"
"github.com/rclone/rclone/backend/tardigrade"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestTardigrade:",
NilObject: (*tardigrade.Object)(nil),
})
}

View File

@@ -0,0 +1,3 @@
// +build !go1.13 plan9
package tardigrade

167
backend/union/entry.go Normal file
View File

@@ -0,0 +1,167 @@
package union
import (
"bufio"
"context"
"io"
"sync"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/backend/union/upstream"
"github.com/rclone/rclone/fs"
)
// Object describes a union Object
//
// This is a wrapped object which returns the Union Fs as its parent
type Object struct {
*upstream.Object
fs *Fs // what this object is part of
co []upstream.Entry
}
// Directory describes a union Directory
//
// This is a wrapped object contains all candidates
type Directory struct {
*upstream.Directory
cd []upstream.Entry
}
type entry interface {
upstream.Entry
candidates() []upstream.Entry
}
// UnWrap returns the Object that this Object is wrapping or
// nil if it isn't wrapping anything
func (o *Object) UnWrap() *upstream.Object {
return o.Object
}
// Fs returns the union Fs as the parent
func (o *Object) Fs() fs.Info {
return o.fs
}
func (o *Object) candidates() []upstream.Entry {
return o.co
}
func (d *Directory) candidates() []upstream.Entry {
return d.cd
}
// Update in to the object with the modTime given of the given size
//
// When called from outside a Fs by rclone, src.Size() will always be >= 0.
// But for unknown-sized objects (indicated by src.Size() == -1), Upload should either
// return an error or update the object properly (rather than e.g. calling panic).
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
entries, err := o.fs.actionEntries(o.candidates()...)
if err != nil {
return err
}
if len(entries) == 1 {
obj := entries[0].(*upstream.Object)
return obj.Update(ctx, in, src, options...)
}
// Get multiple reader
readers := make([]io.Reader, len(entries))
writers := make([]io.Writer, len(entries))
errs := Errors(make([]error, len(entries)+1))
for i := range entries {
r, w := io.Pipe()
bw := bufio.NewWriter(w)
readers[i], writers[i] = r, bw
defer func() {
err := w.Close()
if err != nil {
panic(err)
}
}()
}
go func() {
mw := io.MultiWriter(writers...)
es := make([]error, len(writers)+1)
_, es[len(es)-1] = io.Copy(mw, in)
for i, bw := range writers {
es[i] = bw.(*bufio.Writer).Flush()
}
errs[len(entries)] = Errors(es).Err()
}()
// Multi-threading
multithread(len(entries), func(i int) {
if o, ok := entries[i].(*upstream.Object); ok {
err := o.Update(ctx, readers[i], src, options...)
errs[i] = errors.Wrap(err, o.UpstreamFs().Name())
} else {
errs[i] = fs.ErrorNotAFile
}
})
return errs.Err()
}
// Remove candidate objects selected by ACTION policy
func (o *Object) Remove(ctx context.Context) error {
entries, err := o.fs.actionEntries(o.candidates()...)
if err != nil {
return err
}
errs := Errors(make([]error, len(entries)))
multithread(len(entries), func(i int) {
if o, ok := entries[i].(*upstream.Object); ok {
err := o.Remove(ctx)
errs[i] = errors.Wrap(err, o.UpstreamFs().Name())
} else {
errs[i] = fs.ErrorNotAFile
}
})
return errs.Err()
}
// SetModTime sets the metadata on the object to set the modification date
func (o *Object) SetModTime(ctx context.Context, t time.Time) error {
entries, err := o.fs.actionEntries(o.candidates()...)
if err != nil {
return err
}
var wg sync.WaitGroup
errs := Errors(make([]error, len(entries)))
multithread(len(entries), func(i int) {
if o, ok := entries[i].(*upstream.Object); ok {
err := o.SetModTime(ctx, t)
errs[i] = errors.Wrap(err, o.UpstreamFs().Name())
} else {
errs[i] = fs.ErrorNotAFile
}
})
wg.Wait()
return errs.Err()
}
// ModTime returns the modification date of the directory
// It returns the latest ModTime of all candidates
func (d *Directory) ModTime(ctx context.Context) (t time.Time) {
entries := d.candidates()
times := make([]time.Time, len(entries))
multithread(len(entries), func(i int) {
times[i] = entries[i].ModTime(ctx)
})
for _, ti := range times {
if t.Before(ti) {
t = ti
}
}
return t
}
// Size returns the size of the directory
// It returns the sum of all candidates
func (d *Directory) Size() (s int64) {
for _, e := range d.candidates() {
s += e.Size()
}
return s
}

68
backend/union/errors.go Normal file
View File

@@ -0,0 +1,68 @@
package union
import (
"bytes"
"fmt"
)
// The Errors type wraps a slice of errors
type Errors []error
// Map returns a copy of the error slice with all its errors modified
// according to the mapping function. If mapping returns nil,
// the error is dropped from the error slice with no replacement.
func (e Errors) Map(mapping func(error) error) Errors {
s := make([]error, len(e))
i := 0
for _, err := range e {
nerr := mapping(err)
if nerr == nil {
continue
}
s[i] = nerr
i++
}
return Errors(s[:i])
}
// FilterNil returns the Errors without nil
func (e Errors) FilterNil() Errors {
ne := e.Map(func(err error) error {
return err
})
return ne
}
// Err returns a error interface that filtered nil,
// or nil if no non-nil Error is presented.
func (e Errors) Err() error {
ne := e.FilterNil()
if len(ne) == 0 {
return nil
}
return ne
}
// Error returns a concatenated string of the contained errors
func (e Errors) Error() string {
var buf bytes.Buffer
if len(e) == 0 {
buf.WriteString("no error")
}
if len(e) == 1 {
buf.WriteString("1 error: ")
} else {
fmt.Fprintf(&buf, "%d errors: ", len(e))
}
for i, err := range e {
if i != 0 {
buf.WriteString("; ")
}
buf.WriteString(err.Error())
}
return buf.String()
}

View File

@@ -0,0 +1,44 @@
package policy
import (
"context"
"github.com/rclone/rclone/backend/union/upstream"
"github.com/rclone/rclone/fs"
)
func init() {
registerPolicy("all", &All{})
}
// All policy behaves the same as EpAll except for the CREATE category
// Action category: same as epall.
// Create category: apply to all branches.
// Search category: same as epall.
type All struct {
EpAll
}
// Create category policy, governing the creation of files and directories
func (p *All) Create(ctx context.Context, upstreams []*upstream.Fs, path string) ([]*upstream.Fs, error) {
if len(upstreams) == 0 {
return nil, fs.ErrorObjectNotFound
}
upstreams = filterNC(upstreams)
if len(upstreams) == 0 {
return nil, fs.ErrorPermissionDenied
}
return upstreams, nil
}
// CreateEntries is CREATE category policy but receving a set of candidate entries
func (p *All) CreateEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
if len(entries) == 0 {
return nil, fs.ErrorObjectNotFound
}
entries = filterNCEntries(entries)
if len(entries) == 0 {
return nil, fs.ErrorPermissionDenied
}
return entries, nil
}

View File

@@ -0,0 +1,99 @@
package policy
import (
"context"
"path"
"sync"
"github.com/rclone/rclone/backend/union/upstream"
"github.com/rclone/rclone/fs"
)
func init() {
registerPolicy("epall", &EpAll{})
}
// EpAll stands for existing path, all
// Action category: apply to all found.
// Create category: apply to all found.
// Search category: same as epff.
type EpAll struct {
EpFF
}
func (p *EpAll) epall(ctx context.Context, upstreams []*upstream.Fs, filePath string) ([]*upstream.Fs, error) {
var wg sync.WaitGroup
ufs := make([]*upstream.Fs, len(upstreams))
for i, u := range upstreams {
wg.Add(1)
i, u := i, u // Closure
go func() {
rfs := u.RootFs
remote := path.Join(u.RootPath, filePath)
if findEntry(ctx, rfs, remote) != nil {
ufs[i] = u
}
wg.Done()
}()
}
wg.Wait()
var results []*upstream.Fs
for _, f := range ufs {
if f != nil {
results = append(results, f)
}
}
if len(results) == 0 {
return nil, fs.ErrorObjectNotFound
}
return results, nil
}
// Action category policy, governing the modification of files and directories
func (p *EpAll) Action(ctx context.Context, upstreams []*upstream.Fs, path string) ([]*upstream.Fs, error) {
if len(upstreams) == 0 {
return nil, fs.ErrorObjectNotFound
}
upstreams = filterRO(upstreams)
if len(upstreams) == 0 {
return nil, fs.ErrorPermissionDenied
}
return p.epall(ctx, upstreams, path)
}
// ActionEntries is ACTION category policy but receving a set of candidate entries
func (p *EpAll) ActionEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
if len(entries) == 0 {
return nil, fs.ErrorObjectNotFound
}
entries = filterROEntries(entries)
if len(entries) == 0 {
return nil, fs.ErrorPermissionDenied
}
return entries, nil
}
// Create category policy, governing the creation of files and directories
func (p *EpAll) Create(ctx context.Context, upstreams []*upstream.Fs, path string) ([]*upstream.Fs, error) {
if len(upstreams) == 0 {
return nil, fs.ErrorObjectNotFound
}
upstreams = filterNC(upstreams)
if len(upstreams) == 0 {
return nil, fs.ErrorPermissionDenied
}
upstreams, err := p.epall(ctx, upstreams, path+"/..")
return upstreams, err
}
// CreateEntries is CREATE category policy but receving a set of candidate entries
func (p *EpAll) CreateEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
if len(entries) == 0 {
return nil, fs.ErrorObjectNotFound
}
entries = filterNCEntries(entries)
if len(entries) == 0 {
return nil, fs.ErrorPermissionDenied
}
return entries, nil
}

View File

@@ -0,0 +1,115 @@
package policy
import (
"context"
"path"
"github.com/rclone/rclone/backend/union/upstream"
"github.com/rclone/rclone/fs"
)
func init() {
registerPolicy("epff", &EpFF{})
}
// EpFF stands for existing path, first found
// Given the order of the candidates, act on the first one found where the relative path exists.
type EpFF struct{}
func (p *EpFF) epff(ctx context.Context, upstreams []*upstream.Fs, filePath string) (*upstream.Fs, error) {
ch := make(chan *upstream.Fs)
for _, u := range upstreams {
u := u // Closure
go func() {
rfs := u.RootFs
remote := path.Join(u.RootPath, filePath)
if findEntry(ctx, rfs, remote) == nil {
u = nil
}
ch <- u
}()
}
var u *upstream.Fs
for i := 0; i < len(upstreams); i++ {
u = <-ch
if u != nil {
// close remaining goroutines
go func(num int) {
defer close(ch)
for i := 0; i < num; i++ {
<-ch
}
}(len(upstreams) - 1 - i)
}
}
if u == nil {
return nil, fs.ErrorObjectNotFound
}
return u, nil
}
// Action category policy, governing the modification of files and directories
func (p *EpFF) Action(ctx context.Context, upstreams []*upstream.Fs, path string) ([]*upstream.Fs, error) {
if len(upstreams) == 0 {
return nil, fs.ErrorObjectNotFound
}
upstreams = filterRO(upstreams)
if len(upstreams) == 0 {
return nil, fs.ErrorPermissionDenied
}
u, err := p.epff(ctx, upstreams, path)
return []*upstream.Fs{u}, err
}
// ActionEntries is ACTION category policy but receving a set of candidate entries
func (p *EpFF) ActionEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
if len(entries) == 0 {
return nil, fs.ErrorObjectNotFound
}
entries = filterROEntries(entries)
if len(entries) == 0 {
return nil, fs.ErrorPermissionDenied
}
return entries[:1], nil
}
// Create category policy, governing the creation of files and directories
func (p *EpFF) Create(ctx context.Context, upstreams []*upstream.Fs, path string) ([]*upstream.Fs, error) {
if len(upstreams) == 0 {
return nil, fs.ErrorObjectNotFound
}
upstreams = filterNC(upstreams)
if len(upstreams) == 0 {
return nil, fs.ErrorPermissionDenied
}
u, err := p.epff(ctx, upstreams, path+"/..")
return []*upstream.Fs{u}, err
}
// CreateEntries is CREATE category policy but receving a set of candidate entries
func (p *EpFF) CreateEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
if len(entries) == 0 {
return nil, fs.ErrorObjectNotFound
}
entries = filterNCEntries(entries)
if len(entries) == 0 {
return nil, fs.ErrorPermissionDenied
}
return entries[:1], nil
}
// Search category policy, governing the access to files and directories
func (p *EpFF) Search(ctx context.Context, upstreams []*upstream.Fs, path string) (*upstream.Fs, error) {
if len(upstreams) == 0 {
return nil, fs.ErrorObjectNotFound
}
return p.epff(ctx, upstreams, path)
}
// SearchEntries is SEARCH category policy but receving a set of candidate entries
func (p *EpFF) SearchEntries(entries ...upstream.Entry) (upstream.Entry, error) {
if len(entries) == 0 {
return nil, fs.ErrorObjectNotFound
}
return entries[0], nil
}

View File

@@ -0,0 +1,116 @@
package policy
import (
"context"
"math"
"github.com/rclone/rclone/backend/union/upstream"
"github.com/rclone/rclone/fs"
)
func init() {
registerPolicy("eplfs", &EpLfs{})
}
// EpLfs stands for existing path, least free space
// Of all the candidates on which the path exists choose the one with the least free space.
type EpLfs struct {
EpAll
}
func (p *EpLfs) lfs(upstreams []*upstream.Fs) (*upstream.Fs, error) {
var minFreeSpace int64 = math.MaxInt64
var lfsupstream *upstream.Fs
for _, u := range upstreams {
space, err := u.GetFreeSpace()
if err != nil {
fs.LogPrintf(fs.LogLevelNotice, nil,
"Free Space is not supported for upstream %s, treating as infinite", u.Name())
}
if space < minFreeSpace {
minFreeSpace = space
lfsupstream = u
}
}
if lfsupstream == nil {
return nil, fs.ErrorObjectNotFound
}
return lfsupstream, nil
}
func (p *EpLfs) lfsEntries(entries []upstream.Entry) (upstream.Entry, error) {
var minFreeSpace int64
var lfsEntry upstream.Entry
for _, e := range entries {
space, err := e.UpstreamFs().GetFreeSpace()
if err != nil {
fs.LogPrintf(fs.LogLevelNotice, nil,
"Free Space is not supported for upstream %s, treating as infinite", e.UpstreamFs().Name())
}
if space < minFreeSpace {
minFreeSpace = space
lfsEntry = e
}
}
return lfsEntry, nil
}
// Action category policy, governing the modification of files and directories
func (p *EpLfs) Action(ctx context.Context, upstreams []*upstream.Fs, path string) ([]*upstream.Fs, error) {
upstreams, err := p.EpAll.Action(ctx, upstreams, path)
if err != nil {
return nil, err
}
u, err := p.lfs(upstreams)
return []*upstream.Fs{u}, err
}
// ActionEntries is ACTION category policy but receving a set of candidate entries
func (p *EpLfs) ActionEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
entries, err := p.EpAll.ActionEntries(entries...)
if err != nil {
return nil, err
}
e, err := p.lfsEntries(entries)
return []upstream.Entry{e}, err
}
// Create category policy, governing the creation of files and directories
func (p *EpLfs) Create(ctx context.Context, upstreams []*upstream.Fs, path string) ([]*upstream.Fs, error) {
upstreams, err := p.EpAll.Create(ctx, upstreams, path)
if err != nil {
return nil, err
}
u, err := p.lfs(upstreams)
return []*upstream.Fs{u}, err
}
// CreateEntries is CREATE category policy but receving a set of candidate entries
func (p *EpLfs) CreateEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
entries, err := p.EpAll.CreateEntries(entries...)
if err != nil {
return nil, err
}
e, err := p.lfsEntries(entries)
return []upstream.Entry{e}, err
}
// Search category policy, governing the access to files and directories
func (p *EpLfs) Search(ctx context.Context, upstreams []*upstream.Fs, path string) (*upstream.Fs, error) {
if len(upstreams) == 0 {
return nil, fs.ErrorObjectNotFound
}
upstreams, err := p.epall(ctx, upstreams, path)
if err != nil {
return nil, err
}
return p.lfs(upstreams)
}
// SearchEntries is SEARCH category policy but receving a set of candidate entries
func (p *EpLfs) SearchEntries(entries ...upstream.Entry) (upstream.Entry, error) {
if len(entries) == 0 {
return nil, fs.ErrorObjectNotFound
}
return p.lfsEntries(entries)
}

View File

@@ -0,0 +1,116 @@
package policy
import (
"context"
"math"
"github.com/rclone/rclone/backend/union/upstream"
"github.com/rclone/rclone/fs"
)
func init() {
registerPolicy("eplno", &EpLno{})
}
// EpLno stands for existing path, least number of objects
// Of all the candidates on which the path exists choose the one with the least number of objects
type EpLno struct {
EpAll
}
func (p *EpLno) lno(upstreams []*upstream.Fs) (*upstream.Fs, error) {
var minNumObj int64 = math.MaxInt64
var lnoUpstream *upstream.Fs
for _, u := range upstreams {
numObj, err := u.GetNumObjects()
if err != nil {
fs.LogPrintf(fs.LogLevelNotice, nil,
"Number of Objects is not supported for upstream %s, treating as 0", u.Name())
}
if minNumObj > numObj {
minNumObj = numObj
lnoUpstream = u
}
}
if lnoUpstream == nil {
return nil, fs.ErrorObjectNotFound
}
return lnoUpstream, nil
}
func (p *EpLno) lnoEntries(entries []upstream.Entry) (upstream.Entry, error) {
var minNumObj int64 = math.MaxInt64
var lnoEntry upstream.Entry
for _, e := range entries {
numObj, err := e.UpstreamFs().GetNumObjects()
if err != nil {
fs.LogPrintf(fs.LogLevelNotice, nil,
"Number of Objects is not supported for upstream %s, treating as 0", e.UpstreamFs().Name())
}
if minNumObj > numObj {
minNumObj = numObj
lnoEntry = e
}
}
return lnoEntry, nil
}
// Action category policy, governing the modification of files and directories
func (p *EpLno) Action(ctx context.Context, upstreams []*upstream.Fs, path string) ([]*upstream.Fs, error) {
upstreams, err := p.EpAll.Action(ctx, upstreams, path)
if err != nil {
return nil, err
}
u, err := p.lno(upstreams)
return []*upstream.Fs{u}, err
}
// ActionEntries is ACTION category policy but receving a set of candidate entries
func (p *EpLno) ActionEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
entries, err := p.EpAll.ActionEntries(entries...)
if err != nil {
return nil, err
}
e, err := p.lnoEntries(entries)
return []upstream.Entry{e}, err
}
// Create category policy, governing the creation of files and directories
func (p *EpLno) Create(ctx context.Context, upstreams []*upstream.Fs, path string) ([]*upstream.Fs, error) {
upstreams, err := p.EpAll.Create(ctx, upstreams, path)
if err != nil {
return nil, err
}
u, err := p.lno(upstreams)
return []*upstream.Fs{u}, err
}
// CreateEntries is CREATE category policy but receving a set of candidate entries
func (p *EpLno) CreateEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
entries, err := p.EpAll.CreateEntries(entries...)
if err != nil {
return nil, err
}
e, err := p.lnoEntries(entries)
return []upstream.Entry{e}, err
}
// Search category policy, governing the access to files and directories
func (p *EpLno) Search(ctx context.Context, upstreams []*upstream.Fs, path string) (*upstream.Fs, error) {
if len(upstreams) == 0 {
return nil, fs.ErrorObjectNotFound
}
upstreams, err := p.epall(ctx, upstreams, path)
if err != nil {
return nil, err
}
return p.lno(upstreams)
}
// SearchEntries is SEARCH category policy but receving a set of candidate entries
func (p *EpLno) SearchEntries(entries ...upstream.Entry) (upstream.Entry, error) {
if len(entries) == 0 {
return nil, fs.ErrorObjectNotFound
}
return p.lnoEntries(entries)
}

View File

@@ -0,0 +1,116 @@
package policy
import (
"context"
"math"
"github.com/rclone/rclone/backend/union/upstream"
"github.com/rclone/rclone/fs"
)
func init() {
registerPolicy("eplus", &EpLus{})
}
// EpLus stands for existing path, least used space
// Of all the candidates on which the path exists choose the one with the least used space.
type EpLus struct {
EpAll
}
func (p *EpLus) lus(upstreams []*upstream.Fs) (*upstream.Fs, error) {
var minUsedSpace int64 = math.MaxInt64
var lusupstream *upstream.Fs
for _, u := range upstreams {
space, err := u.GetUsedSpace()
if err != nil {
fs.LogPrintf(fs.LogLevelNotice, nil,
"Used Space is not supported for upstream %s, treating as 0", u.Name())
}
if space < minUsedSpace {
minUsedSpace = space
lusupstream = u
}
}
if lusupstream == nil {
return nil, fs.ErrorObjectNotFound
}
return lusupstream, nil
}
func (p *EpLus) lusEntries(entries []upstream.Entry) (upstream.Entry, error) {
var minUsedSpace int64
var lusEntry upstream.Entry
for _, e := range entries {
space, err := e.UpstreamFs().GetFreeSpace()
if err != nil {
fs.LogPrintf(fs.LogLevelNotice, nil,
"Used Space is not supported for upstream %s, treating as 0", e.UpstreamFs().Name())
}
if space < minUsedSpace {
minUsedSpace = space
lusEntry = e
}
}
return lusEntry, nil
}
// Action category policy, governing the modification of files and directories
func (p *EpLus) Action(ctx context.Context, upstreams []*upstream.Fs, path string) ([]*upstream.Fs, error) {
upstreams, err := p.EpAll.Action(ctx, upstreams, path)
if err != nil {
return nil, err
}
u, err := p.lus(upstreams)
return []*upstream.Fs{u}, err
}
// ActionEntries is ACTION category policy but receving a set of candidate entries
func (p *EpLus) ActionEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
entries, err := p.EpAll.ActionEntries(entries...)
if err != nil {
return nil, err
}
e, err := p.lusEntries(entries)
return []upstream.Entry{e}, err
}
// Create category policy, governing the creation of files and directories
func (p *EpLus) Create(ctx context.Context, upstreams []*upstream.Fs, path string) ([]*upstream.Fs, error) {
upstreams, err := p.EpAll.Create(ctx, upstreams, path)
if err != nil {
return nil, err
}
u, err := p.lus(upstreams)
return []*upstream.Fs{u}, err
}
// CreateEntries is CREATE category policy but receving a set of candidate entries
func (p *EpLus) CreateEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
entries, err := p.EpAll.CreateEntries(entries...)
if err != nil {
return nil, err
}
e, err := p.lusEntries(entries)
return []upstream.Entry{e}, err
}
// Search category policy, governing the access to files and directories
func (p *EpLus) Search(ctx context.Context, upstreams []*upstream.Fs, path string) (*upstream.Fs, error) {
if len(upstreams) == 0 {
return nil, fs.ErrorObjectNotFound
}
upstreams, err := p.epall(ctx, upstreams, path)
if err != nil {
return nil, err
}
return p.lus(upstreams)
}
// SearchEntries is SEARCH category policy but receving a set of candidate entries
func (p *EpLus) SearchEntries(entries ...upstream.Entry) (upstream.Entry, error) {
if len(entries) == 0 {
return nil, fs.ErrorObjectNotFound
}
return p.lusEntries(entries)
}

View File

@@ -0,0 +1,115 @@
package policy
import (
"context"
"github.com/rclone/rclone/backend/union/upstream"
"github.com/rclone/rclone/fs"
)
func init() {
registerPolicy("epmfs", &EpMfs{})
}
// EpMfs stands for existing path, most free space
// Of all the candidates on which the path exists choose the one with the most free space.
type EpMfs struct {
EpAll
}
func (p *EpMfs) mfs(upstreams []*upstream.Fs) (*upstream.Fs, error) {
var maxFreeSpace int64
var mfsupstream *upstream.Fs
for _, u := range upstreams {
space, err := u.GetFreeSpace()
if err != nil {
fs.LogPrintf(fs.LogLevelNotice, nil,
"Free Space is not supported for upstream %s, treating as infinite", u.Name())
}
if maxFreeSpace < space {
maxFreeSpace = space
mfsupstream = u
}
}
if mfsupstream == nil {
return nil, fs.ErrorObjectNotFound
}
return mfsupstream, nil
}
func (p *EpMfs) mfsEntries(entries []upstream.Entry) (upstream.Entry, error) {
var maxFreeSpace int64
var mfsEntry upstream.Entry
for _, e := range entries {
space, err := e.UpstreamFs().GetFreeSpace()
if err != nil {
fs.LogPrintf(fs.LogLevelNotice, nil,
"Free Space is not supported for upstream %s, treating as infinite", e.UpstreamFs().Name())
}
if maxFreeSpace < space {
maxFreeSpace = space
mfsEntry = e
}
}
return mfsEntry, nil
}
// Action category policy, governing the modification of files and directories
func (p *EpMfs) Action(ctx context.Context, upstreams []*upstream.Fs, path string) ([]*upstream.Fs, error) {
upstreams, err := p.EpAll.Action(ctx, upstreams, path)
if err != nil {
return nil, err
}
u, err := p.mfs(upstreams)
return []*upstream.Fs{u}, err
}
// ActionEntries is ACTION category policy but receving a set of candidate entries
func (p *EpMfs) ActionEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
entries, err := p.EpAll.ActionEntries(entries...)
if err != nil {
return nil, err
}
e, err := p.mfsEntries(entries)
return []upstream.Entry{e}, err
}
// Create category policy, governing the creation of files and directories
func (p *EpMfs) Create(ctx context.Context, upstreams []*upstream.Fs, path string) ([]*upstream.Fs, error) {
upstreams, err := p.EpAll.Create(ctx, upstreams, path)
if err != nil {
return nil, err
}
u, err := p.mfs(upstreams)
return []*upstream.Fs{u}, err
}
// CreateEntries is CREATE category policy but receving a set of candidate entries
func (p *EpMfs) CreateEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
entries, err := p.EpAll.CreateEntries(entries...)
if err != nil {
return nil, err
}
e, err := p.mfsEntries(entries)
return []upstream.Entry{e}, err
}
// Search category policy, governing the access to files and directories
func (p *EpMfs) Search(ctx context.Context, upstreams []*upstream.Fs, path string) (*upstream.Fs, error) {
if len(upstreams) == 0 {
return nil, fs.ErrorObjectNotFound
}
upstreams, err := p.epall(ctx, upstreams, path)
if err != nil {
return nil, err
}
return p.mfs(upstreams)
}
// SearchEntries is SEARCH category policy but receving a set of candidate entries
func (p *EpMfs) SearchEntries(entries ...upstream.Entry) (upstream.Entry, error) {
if len(entries) == 0 {
return nil, fs.ErrorObjectNotFound
}
return p.mfsEntries(entries)
}

View File

@@ -0,0 +1,86 @@
package policy
import (
"context"
"math/rand"
"time"
"github.com/rclone/rclone/backend/union/upstream"
"github.com/rclone/rclone/fs"
)
func init() {
registerPolicy("eprand", &EpRand{})
}
// EpRand stands for existing path, random
// Calls epall and then randomizes. Returns one candidate.
type EpRand struct {
EpAll
}
func (p *EpRand) rand(upstreams []*upstream.Fs) *upstream.Fs {
rand.Seed(time.Now().Unix())
return upstreams[rand.Intn(len(upstreams))]
}
func (p *EpRand) randEntries(entries []upstream.Entry) upstream.Entry {
rand.Seed(time.Now().Unix())
return entries[rand.Intn(len(entries))]
}
// Action category policy, governing the modification of files and directories
func (p *EpRand) Action(ctx context.Context, upstreams []*upstream.Fs, path string) ([]*upstream.Fs, error) {
upstreams, err := p.EpAll.Action(ctx, upstreams, path)
if err != nil {
return nil, err
}
return []*upstream.Fs{p.rand(upstreams)}, nil
}
// ActionEntries is ACTION category policy but receving a set of candidate entries
func (p *EpRand) ActionEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
entries, err := p.EpAll.ActionEntries(entries...)
if err != nil {
return nil, err
}
return []upstream.Entry{p.randEntries(entries)}, nil
}
// Create category policy, governing the creation of files and directories
func (p *EpRand) Create(ctx context.Context, upstreams []*upstream.Fs, path string) ([]*upstream.Fs, error) {
upstreams, err := p.EpAll.Create(ctx, upstreams, path)
if err != nil {
return nil, err
}
return []*upstream.Fs{p.rand(upstreams)}, nil
}
// CreateEntries is CREATE category policy but receving a set of candidate entries
func (p *EpRand) CreateEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
entries, err := p.EpAll.CreateEntries(entries...)
if err != nil {
return nil, err
}
return []upstream.Entry{p.randEntries(entries)}, nil
}
// Search category policy, governing the access to files and directories
func (p *EpRand) Search(ctx context.Context, upstreams []*upstream.Fs, path string) (*upstream.Fs, error) {
if len(upstreams) == 0 {
return nil, fs.ErrorObjectNotFound
}
upstreams, err := p.epall(ctx, upstreams, path)
if err != nil {
return nil, err
}
return p.rand(upstreams), nil
}
// SearchEntries is SEARCH category policy but receving a set of candidate entries
func (p *EpRand) SearchEntries(entries ...upstream.Entry) (upstream.Entry, error) {
if len(entries) == 0 {
return nil, fs.ErrorObjectNotFound
}
return p.randEntries(entries), nil
}

View File

@@ -0,0 +1,32 @@
package policy
import (
"context"
"github.com/rclone/rclone/backend/union/upstream"
"github.com/rclone/rclone/fs"
)
func init() {
registerPolicy("ff", &FF{})
}
// FF stands for first found
// Search category: same as epff.
// Action category: same as epff.
// Create category: Given the order of the candiates, act on the first one found.
type FF struct {
EpFF
}
// Create category policy, governing the creation of files and directories
func (p *FF) Create(ctx context.Context, upstreams []*upstream.Fs, path string) ([]*upstream.Fs, error) {
if len(upstreams) == 0 {
return nil, fs.ErrorObjectNotFound
}
upstreams = filterNC(upstreams)
if len(upstreams) == 0 {
return upstreams, fs.ErrorPermissionDenied
}
return upstreams[:1], nil
}

View File

@@ -0,0 +1,33 @@
package policy
import (
"context"
"github.com/rclone/rclone/backend/union/upstream"
"github.com/rclone/rclone/fs"
)
func init() {
registerPolicy("lfs", &Lfs{})
}
// Lfs stands for least free space
// Search category: same as eplfs.
// Action category: same as eplfs.
// Create category: Pick the drive with the least free space.
type Lfs struct {
EpLfs
}
// Create category policy, governing the creation of files and directories
func (p *Lfs) Create(ctx context.Context, upstreams []*upstream.Fs, path string) ([]*upstream.Fs, error) {
if len(upstreams) == 0 {
return nil, fs.ErrorObjectNotFound
}
upstreams = filterNC(upstreams)
if len(upstreams) == 0 {
return nil, fs.ErrorPermissionDenied
}
u, err := p.lfs(upstreams)
return []*upstream.Fs{u}, err
}

View File

@@ -0,0 +1,33 @@
package policy
import (
"context"
"github.com/rclone/rclone/backend/union/upstream"
"github.com/rclone/rclone/fs"
)
func init() {
registerPolicy("lno", &Lno{})
}
// Lno stands for least number of objects
// Search category: same as eplno.
// Action category: same as eplno.
// Create category: Pick the drive with the least number of objects.
type Lno struct {
EpLno
}
// Create category policy, governing the creation of files and directories
func (p *Lno) Create(ctx context.Context, upstreams []*upstream.Fs, path string) ([]*upstream.Fs, error) {
if len(upstreams) == 0 {
return nil, fs.ErrorObjectNotFound
}
upstreams = filterNC(upstreams)
if len(upstreams) == 0 {
return nil, fs.ErrorPermissionDenied
}
u, err := p.lno(upstreams)
return []*upstream.Fs{u}, err
}

View File

@@ -0,0 +1,33 @@
package policy
import (
"context"
"github.com/rclone/rclone/backend/union/upstream"
"github.com/rclone/rclone/fs"
)
func init() {
registerPolicy("lus", &Lus{})
}
// Lus stands for least used space
// Search category: same as eplus.
// Action category: same as eplus.
// Create category: Pick the drive with the least used space.
type Lus struct {
EpLus
}
// Create category policy, governing the creation of files and directories
func (p *Lus) Create(ctx context.Context, upstreams []*upstream.Fs, path string) ([]*upstream.Fs, error) {
if len(upstreams) == 0 {
return nil, fs.ErrorObjectNotFound
}
upstreams = filterNC(upstreams)
if len(upstreams) == 0 {
return nil, fs.ErrorPermissionDenied
}
u, err := p.lus(upstreams)
return []*upstream.Fs{u}, err
}

View File

@@ -0,0 +1,33 @@
package policy
import (
"context"
"github.com/rclone/rclone/backend/union/upstream"
"github.com/rclone/rclone/fs"
)
func init() {
registerPolicy("mfs", &Mfs{})
}
// Mfs stands for most free space
// Search category: same as epmfs.
// Action category: same as epmfs.
// Create category: Pick the drive with the most free space.
type Mfs struct {
EpMfs
}
// Create category policy, governing the creation of files and directories
func (p *Mfs) Create(ctx context.Context, upstreams []*upstream.Fs, path string) ([]*upstream.Fs, error) {
if len(upstreams) == 0 {
return nil, fs.ErrorObjectNotFound
}
upstreams = filterNC(upstreams)
if len(upstreams) == 0 {
return nil, fs.ErrorPermissionDenied
}
u, err := p.mfs(upstreams)
return []*upstream.Fs{u}, err
}

View File

@@ -0,0 +1,149 @@
package policy
import (
"context"
"path"
"sync"
"time"
"github.com/rclone/rclone/backend/union/upstream"
"github.com/rclone/rclone/fs"
)
func init() {
registerPolicy("newest", &Newest{})
}
// Newest policy picks the file / directory with the largest mtime
// It implies the existance of a path
type Newest struct {
EpAll
}
func (p *Newest) newest(ctx context.Context, upstreams []*upstream.Fs, filePath string) (*upstream.Fs, error) {
var wg sync.WaitGroup
ufs := make([]*upstream.Fs, len(upstreams))
mtimes := make([]time.Time, len(upstreams))
for i, u := range upstreams {
wg.Add(1)
i, u := i, u // Closure
go func() {
defer wg.Done()
rfs := u.RootFs
remote := path.Join(u.RootPath, filePath)
if e := findEntry(ctx, rfs, remote); e != nil {
ufs[i] = u
mtimes[i] = e.ModTime(ctx)
}
}()
}
wg.Wait()
maxMtime := time.Time{}
var newestFs *upstream.Fs
for i, u := range ufs {
if u != nil && mtimes[i].After(maxMtime) {
maxMtime = mtimes[i]
newestFs = u
}
}
if newestFs == nil {
return nil, fs.ErrorObjectNotFound
}
return newestFs, nil
}
func (p *Newest) newestEntries(entries []upstream.Entry) (upstream.Entry, error) {
var wg sync.WaitGroup
mtimes := make([]time.Time, len(entries))
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
for i, e := range entries {
wg.Add(1)
i, e := i, e // Closure
go func() {
defer wg.Done()
mtimes[i] = e.ModTime(ctx)
}()
}
wg.Wait()
maxMtime := time.Time{}
var newestEntry upstream.Entry
for i, t := range mtimes {
if t.After(maxMtime) {
maxMtime = t
newestEntry = entries[i]
}
}
if newestEntry == nil {
return nil, fs.ErrorObjectNotFound
}
return newestEntry, nil
}
// Action category policy, governing the modification of files and directories
func (p *Newest) Action(ctx context.Context, upstreams []*upstream.Fs, path string) ([]*upstream.Fs, error) {
if len(upstreams) == 0 {
return nil, fs.ErrorObjectNotFound
}
upstreams = filterRO(upstreams)
if len(upstreams) == 0 {
return nil, fs.ErrorPermissionDenied
}
u, err := p.newest(ctx, upstreams, path)
return []*upstream.Fs{u}, err
}
// ActionEntries is ACTION category policy but receving a set of candidate entries
func (p *Newest) ActionEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
if len(entries) == 0 {
return nil, fs.ErrorObjectNotFound
}
entries = filterROEntries(entries)
if len(entries) == 0 {
return nil, fs.ErrorPermissionDenied
}
e, err := p.newestEntries(entries)
return []upstream.Entry{e}, err
}
// Create category policy, governing the creation of files and directories
func (p *Newest) Create(ctx context.Context, upstreams []*upstream.Fs, path string) ([]*upstream.Fs, error) {
if len(upstreams) == 0 {
return nil, fs.ErrorObjectNotFound
}
upstreams = filterNC(upstreams)
if len(upstreams) == 0 {
return nil, fs.ErrorPermissionDenied
}
u, err := p.newest(ctx, upstreams, path+"/..")
return []*upstream.Fs{u}, err
}
// CreateEntries is CREATE category policy but receving a set of candidate entries
func (p *Newest) CreateEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
if len(entries) == 0 {
return nil, fs.ErrorObjectNotFound
}
entries = filterNCEntries(entries)
if len(entries) == 0 {
return nil, fs.ErrorPermissionDenied
}
e, err := p.newestEntries(entries)
return []upstream.Entry{e}, err
}
// Search category policy, governing the access to files and directories
func (p *Newest) Search(ctx context.Context, upstreams []*upstream.Fs, path string) (*upstream.Fs, error) {
if len(upstreams) == 0 {
return nil, fs.ErrorObjectNotFound
}
return p.newest(ctx, upstreams, path)
}
// SearchEntries is SEARCH category policy but receving a set of candidate entries
func (p *Newest) SearchEntries(entries ...upstream.Entry) (upstream.Entry, error) {
if len(entries) == 0 {
return nil, fs.ErrorObjectNotFound
}
return p.newestEntries(entries)
}

View File

@@ -0,0 +1,129 @@
package policy
import (
"context"
"math/rand"
"path"
"strings"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/backend/union/upstream"
"github.com/rclone/rclone/fs"
)
var policies = make(map[string]Policy)
// Policy is the interface of a set of defined behavior choosing
// the upstream Fs to operate on
type Policy interface {
// Action category policy, governing the modification of files and directories
Action(ctx context.Context, upstreams []*upstream.Fs, path string) ([]*upstream.Fs, error)
// Create category policy, governing the creation of files and directories
Create(ctx context.Context, upstreams []*upstream.Fs, path string) ([]*upstream.Fs, error)
// Search category policy, governing the access to files and directories
Search(ctx context.Context, upstreams []*upstream.Fs, path string) (*upstream.Fs, error)
// ActionEntries is ACTION category policy but receving a set of candidate entries
ActionEntries(entries ...upstream.Entry) ([]upstream.Entry, error)
// CreateEntries is CREATE category policy but receving a set of candidate entries
CreateEntries(entries ...upstream.Entry) ([]upstream.Entry, error)
// SearchEntries is SEARCH category policy but receving a set of candidate entries
SearchEntries(entries ...upstream.Entry) (upstream.Entry, error)
}
func registerPolicy(name string, p Policy) {
policies[strings.ToLower(name)] = p
}
// Get a Policy from the list
func Get(name string) (Policy, error) {
p, ok := policies[strings.ToLower(name)]
if !ok {
return nil, errors.Errorf("didn't find policy called %q", name)
}
return p, nil
}
func filterRO(ufs []*upstream.Fs) (wufs []*upstream.Fs) {
for _, u := range ufs {
if u.IsWritable() {
wufs = append(wufs, u)
}
}
return wufs
}
func filterROEntries(ue []upstream.Entry) (wue []upstream.Entry) {
for _, e := range ue {
if e.UpstreamFs().IsWritable() {
wue = append(wue, e)
}
}
return wue
}
func filterNC(ufs []*upstream.Fs) (wufs []*upstream.Fs) {
for _, u := range ufs {
if u.IsCreatable() {
wufs = append(wufs, u)
}
}
return wufs
}
func filterNCEntries(ue []upstream.Entry) (wue []upstream.Entry) {
for _, e := range ue {
if e.UpstreamFs().IsCreatable() {
wue = append(wue, e)
}
}
return wue
}
func parentDir(absPath string) string {
parent := path.Dir(strings.TrimRight(absPath, "/"))
if parent == "." {
parent = ""
}
return parent
}
func clean(absPath string) string {
cleanPath := path.Clean(absPath)
if cleanPath == "." {
cleanPath = ""
}
return cleanPath
}
func findEntry(ctx context.Context, f fs.Fs, remote string) fs.DirEntry {
remote = clean(remote)
dir := parentDir(remote)
entries, err := f.List(ctx, dir)
if remote == dir {
if err != nil {
return nil
}
// random modtime for root
randomNow := time.Unix(time.Now().Unix()-rand.Int63n(10000), 0)
return fs.NewDir("", randomNow)
}
found := false
for _, e := range entries {
eRemote := e.Remote()
if f.Features().CaseInsensitive {
found = strings.EqualFold(remote, eRemote)
} else {
found = (remote == eRemote)
}
if found {
return e
}
}
return nil
}

View File

@@ -0,0 +1,83 @@
package policy
import (
"context"
"math/rand"
"github.com/rclone/rclone/backend/union/upstream"
"github.com/rclone/rclone/fs"
)
func init() {
registerPolicy("rand", &Rand{})
}
// Rand stands for random
// Calls all and then randomizes. Returns one candidate.
type Rand struct {
All
}
func (p *Rand) rand(upstreams []*upstream.Fs) *upstream.Fs {
return upstreams[rand.Intn(len(upstreams))]
}
func (p *Rand) randEntries(entries []upstream.Entry) upstream.Entry {
return entries[rand.Intn(len(entries))]
}
// Action category policy, governing the modification of files and directories
func (p *Rand) Action(ctx context.Context, upstreams []*upstream.Fs, path string) ([]*upstream.Fs, error) {
upstreams, err := p.All.Action(ctx, upstreams, path)
if err != nil {
return nil, err
}
return []*upstream.Fs{p.rand(upstreams)}, nil
}
// ActionEntries is ACTION category policy but receving a set of candidate entries
func (p *Rand) ActionEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
entries, err := p.All.ActionEntries(entries...)
if err != nil {
return nil, err
}
return []upstream.Entry{p.randEntries(entries)}, nil
}
// Create category policy, governing the creation of files and directories
func (p *Rand) Create(ctx context.Context, upstreams []*upstream.Fs, path string) ([]*upstream.Fs, error) {
upstreams, err := p.All.Create(ctx, upstreams, path)
if err != nil {
return nil, err
}
return []*upstream.Fs{p.rand(upstreams)}, nil
}
// CreateEntries is CREATE category policy but receving a set of candidate entries
func (p *Rand) CreateEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
entries, err := p.All.CreateEntries(entries...)
if err != nil {
return nil, err
}
return []upstream.Entry{p.randEntries(entries)}, nil
}
// Search category policy, governing the access to files and directories
func (p *Rand) Search(ctx context.Context, upstreams []*upstream.Fs, path string) (*upstream.Fs, error) {
if len(upstreams) == 0 {
return nil, fs.ErrorObjectNotFound
}
upstreams, err := p.epall(ctx, upstreams, path)
if err != nil {
return nil, err
}
return p.rand(upstreams), nil
}
// SearchEntries is SEARCH category policy but receving a set of candidate entries
func (p *Rand) SearchEntries(entries ...upstream.Entry) (upstream.Entry, error) {
if len(entries) == 0 {
return nil, fs.ErrorObjectNotFound
}
return p.randEntries(entries), nil
}

View File

@@ -1,32 +1,57 @@
package union
import (
"bufio"
"context"
"fmt"
"io"
"path"
"path/filepath"
"strings"
"sync"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/backend/union/policy"
"github.com/rclone/rclone/backend/union/upstream"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/cache"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fs/walk"
)
// Register with Fs
func init() {
fsi := &fs.RegInfo{
Name: "union",
Description: "Union merges the contents of several remotes",
Description: "Union merges the contents of several upstream fs",
NewFs: NewFs,
Options: []fs.Option{{
Name: "remotes",
Help: "List of space separated remotes.\nCan be 'remotea:test/dir remoteb:', '\"remotea:test/space dir\" remoteb:', etc.\nThe last remote is used to write to.",
Name: "upstreams",
Help: "List of space separated upstreams.\nCan be 'upstreama:test/dir upstreamb:', '\"upstreama:test/space:ro dir\" upstreamb:', etc.\n",
Required: true,
}, {
Name: "action_policy",
Help: "Policy to choose upstream on ACTION category.",
Required: true,
Default: "epall",
}, {
Name: "create_policy",
Help: "Policy to choose upstream on CREATE category.",
Required: true,
Default: "epmfs",
}, {
Name: "search_policy",
Help: "Policy to choose upstream on SEARCH category.",
Required: true,
Default: "ff",
}, {
Name: "cache_time",
Help: "Cache time of usage and free space (in seconds). This option is only useful when a path preserving policy is used.",
Required: true,
Default: 120,
}},
}
fs.Register(fsi)
@@ -34,39 +59,48 @@ func init() {
// Options defines the configuration for this backend
type Options struct {
Remotes fs.SpaceSepList `config:"remotes"`
Upstreams fs.SpaceSepList `config:"upstreams"`
Remotes fs.SpaceSepList `config:"remotes"` // Depreated
ActionPolicy string `config:"action_policy"`
CreatePolicy string `config:"create_policy"`
SearchPolicy string `config:"search_policy"`
CacheTime int `config:"cache_time"`
}
// Fs represents a union of remotes
// Fs represents a union of upstreams
type Fs struct {
name string // name of this remote
features *fs.Features // optional features
opt Options // options for this Fs
root string // the path we are working on
remotes []fs.Fs // slice of remotes
wr fs.Fs // writable remote
hashSet hash.Set // intersection of hash types
name string // name of this remote
features *fs.Features // optional features
opt Options // options for this Fs
root string // the path we are working on
upstreams []*upstream.Fs // slice of upstreams
hashSet hash.Set // intersection of hash types
actionPolicy policy.Policy // policy for ACTION
createPolicy policy.Policy // policy for CREATE
searchPolicy policy.Policy // policy for SEARCH
}
// Object describes a union Object
//
// This is a wrapped object which returns the Union Fs as its parent
type Object struct {
fs.Object
fs *Fs // what this object is part of
}
// Wrap an existing object in the union Object
func (f *Fs) wrapObject(o fs.Object) *Object {
return &Object{
Object: o,
fs: f,
// Wrap candidate objects in to an union Object
func (f *Fs) wrapEntries(entries ...upstream.Entry) (entry, error) {
e, err := f.searchEntries(entries...)
if err != nil {
return nil, err
}
switch e.(type) {
case *upstream.Object:
return &Object{
Object: e.(*upstream.Object),
fs: f,
co: entries,
}, nil
case *upstream.Directory:
return &Directory{
Directory: e.(*upstream.Directory),
cd: entries,
}, nil
default:
return nil, errors.Errorf("unknown object type %T", e)
}
}
// Fs returns the union Fs as the parent
func (o *Object) Fs() fs.Info {
return o.fs
}
// Name of the remote (as passed into NewFs)
@@ -91,7 +125,16 @@ func (f *Fs) Features() *fs.Features {
// Rmdir removes the root directory of the Fs object
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
return f.wr.Rmdir(ctx, dir)
upstreams, err := f.action(ctx, dir)
if err != nil {
return err
}
errs := Errors(make([]error, len(upstreams)))
multithread(len(upstreams), func(i int) {
err := upstreams[i].Rmdir(ctx, dir)
errs[i] = errors.Wrap(err, upstreams[i].Name())
})
return errs.Err()
}
// Hashes returns hash.HashNone to indicate remote hashing is unavailable
@@ -101,7 +144,22 @@ func (f *Fs) Hashes() hash.Set {
// Mkdir makes the root directory of the Fs object
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
return f.wr.Mkdir(ctx, dir)
upstreams, err := f.create(ctx, dir)
if err == fs.ErrorObjectNotFound && dir != parentDir(dir) {
if err := f.Mkdir(ctx, parentDir(dir)); err != nil {
return err
}
upstreams, err = f.create(ctx, dir)
}
if err != nil {
return err
}
errs := Errors(make([]error, len(upstreams)))
multithread(len(upstreams), func(i int) {
err := upstreams[i].Mkdir(ctx, dir)
errs[i] = errors.Wrap(err, upstreams[i].Name())
})
return errs.Err()
}
// Purge all files in the root and the root directory
@@ -111,7 +169,21 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
//
// Return an error if it doesn't exist
func (f *Fs) Purge(ctx context.Context) error {
return f.wr.Features().Purge(ctx)
for _, r := range f.upstreams {
if r.Features().Purge == nil {
return fs.ErrorCantPurge
}
}
upstreams, err := f.action(ctx, "")
if err != nil {
return err
}
errs := Errors(make([]error, len(upstreams)))
multithread(len(upstreams), func(i int) {
err := upstreams[i].Features().Purge(ctx)
errs[i] = errors.Wrap(err, upstreams[i].Name())
})
return errs.Err()
}
// Copy src to this remote using server side copy operations.
@@ -124,15 +196,34 @@ func (f *Fs) Purge(ctx context.Context) error {
//
// If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
if src.Fs() != f.wr {
srcObj, ok := src.(*Object)
if !ok {
fs.Debugf(src, "Can't copy - not same remote type")
return nil, fs.ErrorCantCopy
}
o, err := f.wr.Features().Copy(ctx, src, remote)
if err != nil {
o := srcObj.UnWrap()
su := o.UpstreamFs()
if su.Features().Copy == nil {
return nil, fs.ErrorCantCopy
}
var du *upstream.Fs
for _, u := range f.upstreams {
if operations.Same(u.RootFs, su.RootFs) {
du = u
}
}
if du == nil {
return nil, fs.ErrorCantCopy
}
if !du.IsCreatable() {
return nil, fs.ErrorPermissionDenied
}
co, err := du.Features().Copy(ctx, o, remote)
if err != nil || co == nil {
return nil, err
}
return f.wrapObject(o), nil
wo, err := f.wrapEntries(du.WrapObject(co))
return wo.(*Object), err
}
// Move src to this remote using server side move operations.
@@ -145,15 +236,57 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
//
// If it isn't possible then return fs.ErrorCantMove
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
if src.Fs() != f.wr {
o, ok := src.(*Object)
if !ok {
fs.Debugf(src, "Can't move - not same remote type")
return nil, fs.ErrorCantMove
}
o, err := f.wr.Features().Move(ctx, src, remote)
entries, err := f.actionEntries(o.candidates()...)
if err != nil {
return nil, err
}
return f.wrapObject(o), err
for _, e := range entries {
if e.UpstreamFs().Features().Move == nil {
return nil, fs.ErrorCantMove
}
}
objs := make([]*upstream.Object, len(entries))
errs := Errors(make([]error, len(entries)))
multithread(len(entries), func(i int) {
su := entries[i].UpstreamFs()
o, ok := entries[i].(*upstream.Object)
if !ok {
errs[i] = errors.Wrap(fs.ErrorNotAFile, su.Name())
return
}
var du *upstream.Fs
for _, u := range f.upstreams {
if operations.Same(u.RootFs, su.RootFs) {
du = u
}
}
if du == nil {
errs[i] = errors.Wrap(fs.ErrorCantMove, su.Name()+":"+remote)
return
}
mo, err := du.Features().Move(ctx, o.UnWrap(), remote)
if err != nil || mo == nil {
errs[i] = errors.Wrap(err, su.Name())
return
}
objs[i] = du.WrapObject(mo)
})
var en []upstream.Entry
for _, o := range objs {
if o != nil {
en = append(en, o)
}
}
e, err := f.wrapEntries(en...)
if err != nil {
return nil, err
}
return e.(*Object), errs.Err()
}
// DirMove moves src, srcRemote to this remote at dstRemote
@@ -165,12 +298,46 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
//
// If destination exists then return fs.ErrorDirExists
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
srcFs, ok := src.(*Fs)
sfs, ok := src.(*Fs)
if !ok {
fs.Debugf(srcFs, "Can't move directory - not same remote type")
fs.Debugf(src, "Can't move directory - not same remote type")
return fs.ErrorCantDirMove
}
return f.wr.Features().DirMove(ctx, srcFs.wr, srcRemote, dstRemote)
upstreams, err := sfs.action(ctx, srcRemote)
if err != nil {
return err
}
for _, u := range upstreams {
if u.Features().DirMove == nil {
return fs.ErrorCantDirMove
}
}
errs := Errors(make([]error, len(upstreams)))
multithread(len(upstreams), func(i int) {
su := upstreams[i]
var du *upstream.Fs
for _, u := range f.upstreams {
if operations.Same(u.RootFs, su.RootFs) {
du = u
}
}
if du == nil {
errs[i] = errors.Wrap(fs.ErrorCantDirMove, su.Name()+":"+su.Root())
return
}
err := du.Features().DirMove(ctx, su.Fs, srcRemote, dstRemote)
errs[i] = errors.Wrap(err, du.Name()+":"+du.Root())
})
errs = errs.FilterNil()
if len(errs) == 0 {
return nil
}
for _, e := range errs {
if errors.Cause(e) != fs.ErrorDirExists {
return errs
}
}
return fs.ErrorDirExists
}
// ChangeNotify calls the passed function with a path
@@ -183,23 +350,23 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
// regularly. When the channel gets closed, the implementation
// should stop polling and release resources.
func (f *Fs) ChangeNotify(ctx context.Context, fn func(string, fs.EntryType), ch <-chan time.Duration) {
var remoteChans []chan time.Duration
var uChans []chan time.Duration
for _, remote := range f.remotes {
if ChangeNotify := remote.Features().ChangeNotify; ChangeNotify != nil {
for _, u := range f.upstreams {
if ChangeNotify := u.Features().ChangeNotify; ChangeNotify != nil {
ch := make(chan time.Duration)
remoteChans = append(remoteChans, ch)
uChans = append(uChans, ch)
ChangeNotify(ctx, fn, ch)
}
}
go func() {
for i := range ch {
for _, c := range remoteChans {
for _, c := range uChans {
c <- i
}
}
for _, c := range remoteChans {
for _, c := range uChans {
close(c)
}
}()
@@ -208,10 +375,103 @@ func (f *Fs) ChangeNotify(ctx context.Context, fn func(string, fs.EntryType), ch
// DirCacheFlush resets the directory cache - used in testing
// as an optional interface
func (f *Fs) DirCacheFlush() {
for _, remote := range f.remotes {
if DirCacheFlush := remote.Features().DirCacheFlush; DirCacheFlush != nil {
DirCacheFlush()
multithread(len(f.upstreams), func(i int) {
if do := f.upstreams[i].Features().DirCacheFlush; do != nil {
do()
}
})
}
func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, stream bool, options ...fs.OpenOption) (fs.Object, error) {
srcPath := src.Remote()
upstreams, err := f.create(ctx, srcPath)
if err == fs.ErrorObjectNotFound {
if err := f.Mkdir(ctx, parentDir(srcPath)); err != nil {
return nil, err
}
upstreams, err = f.create(ctx, srcPath)
}
if err != nil {
return nil, err
}
if len(upstreams) == 1 {
u := upstreams[0]
var o fs.Object
var err error
if stream {
o, err = u.Features().PutStream(ctx, in, src, options...)
} else {
o, err = u.Put(ctx, in, src, options...)
}
if err != nil {
return nil, err
}
e, err := f.wrapEntries(u.WrapObject(o))
return e.(*Object), err
}
errs := Errors(make([]error, len(upstreams)+1))
// Get multiple reader
readers := make([]io.Reader, len(upstreams))
writers := make([]io.Writer, len(upstreams))
for i := range writers {
r, w := io.Pipe()
bw := bufio.NewWriter(w)
readers[i], writers[i] = r, bw
defer func() {
err := w.Close()
if err != nil {
panic(err)
}
}()
}
go func() {
mw := io.MultiWriter(writers...)
es := make([]error, len(writers)+1)
_, es[len(es)-1] = io.Copy(mw, in)
for i, bw := range writers {
es[i] = bw.(*bufio.Writer).Flush()
}
errs[len(upstreams)] = Errors(es).Err()
}()
// Multi-threading
objs := make([]upstream.Entry, len(upstreams))
multithread(len(upstreams), func(i int) {
u := upstreams[i]
var o fs.Object
var err error
if stream {
o, err = u.Features().PutStream(ctx, readers[i], src, options...)
} else {
o, err = u.Put(ctx, readers[i], src, options...)
}
if err != nil {
errs[i] = errors.Wrap(err, u.Name())
return
}
objs[i] = u.WrapObject(o)
})
err = errs.Err()
if err != nil {
return nil, err
}
e, err := f.wrapEntries(objs...)
return e.(*Object), err
}
// Put in to the remote path with the modTime given of the given size
//
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
// nil and the error
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
o, err := f.NewObject(ctx, src.Remote())
switch err {
case nil:
return o, o.Update(ctx, in, src, options...)
case fs.ErrorObjectNotFound:
return f.put(ctx, in, src, false, options...)
default:
return nil, err
}
}
@@ -221,29 +481,64 @@ func (f *Fs) DirCacheFlush() {
// will return the object and the error, otherwise will return
// nil and the error
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
o, err := f.wr.Features().PutStream(ctx, in, src, options...)
if err != nil {
o, err := f.NewObject(ctx, src.Remote())
switch err {
case nil:
return o, o.Update(ctx, in, src, options...)
case fs.ErrorObjectNotFound:
return f.put(ctx, in, src, true, options...)
default:
return nil, err
}
return f.wrapObject(o), err
}
// About gets quota information from the Fs
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
return f.wr.Features().About(ctx)
}
// Put in to the remote path with the modTime given of the given size
//
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
// nil and the error
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
o, err := f.wr.Put(ctx, in, src, options...)
if err != nil {
return nil, err
usage := &fs.Usage{
Total: new(int64),
Used: new(int64),
Trashed: new(int64),
Other: new(int64),
Free: new(int64),
Objects: new(int64),
}
return f.wrapObject(o), err
for _, u := range f.upstreams {
usg, err := u.About(ctx)
if err != nil {
return nil, err
}
if usg.Total != nil && usage.Total != nil {
*usage.Total += *usg.Total
} else {
usage.Total = nil
}
if usg.Used != nil && usage.Used != nil {
*usage.Used += *usg.Used
} else {
usage.Used = nil
}
if usg.Trashed != nil && usage.Trashed != nil {
*usage.Trashed += *usg.Trashed
} else {
usage.Trashed = nil
}
if usg.Other != nil && usage.Other != nil {
*usage.Other += *usg.Other
} else {
usage.Other = nil
}
if usg.Free != nil && usage.Free != nil {
*usage.Free += *usg.Free
} else {
usage.Free = nil
}
if usg.Objects != nil && usage.Objects != nil {
*usage.Objects += *usg.Objects
} else {
usage.Objects = nil
}
}
return usage, nil
}
// List the objects and directories in dir into entries. The
@@ -256,60 +551,188 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
set := make(map[string]fs.DirEntry)
found := false
for _, remote := range f.remotes {
var remoteEntries, err = remote.List(ctx, dir)
if err == fs.ErrorDirNotFound {
continue
}
entriess := make([][]upstream.Entry, len(f.upstreams))
errs := Errors(make([]error, len(f.upstreams)))
multithread(len(f.upstreams), func(i int) {
u := f.upstreams[i]
entries, err := u.List(ctx, dir)
if err != nil {
return nil, errors.Wrapf(err, "List failed on %v", remote)
errs[i] = errors.Wrap(err, u.Name())
return
}
found = true
for _, remoteEntry := range remoteEntries {
set[remoteEntry.Remote()] = remoteEntry
uEntries := make([]upstream.Entry, len(entries))
for j, e := range entries {
uEntries[j], _ = u.WrapEntry(e)
}
}
if !found {
return nil, fs.ErrorDirNotFound
}
for _, entry := range set {
if o, ok := entry.(fs.Object); ok {
entry = f.wrapObject(o)
entriess[i] = uEntries
})
if len(errs) == len(errs.FilterNil()) {
errs = errs.Map(func(e error) error {
if errors.Cause(e) == fs.ErrorDirNotFound {
return nil
}
return e
})
if len(errs) == 0 {
return nil, fs.ErrorDirNotFound
}
entries = append(entries, entry)
return nil, errs.Err()
}
return entries, nil
return f.mergeDirEntries(entriess)
}
// NewObject creates a new remote union file object based on the first Object it finds (reverse remote order)
func (f *Fs) NewObject(ctx context.Context, path string) (fs.Object, error) {
for i := range f.remotes {
var remote = f.remotes[len(f.remotes)-i-1]
var obj, err = remote.NewObject(ctx, path)
if err == fs.ErrorObjectNotFound {
continue
// ListR lists the objects and directories of the Fs starting
// from dir recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
//
// Don't implement this unless you have a more efficient way
// of listing recursively that doing a directory traversal.
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
var entriess [][]upstream.Entry
errs := Errors(make([]error, len(f.upstreams)))
var mutex sync.Mutex
multithread(len(f.upstreams), func(i int) {
u := f.upstreams[i]
var err error
callback := func(entries fs.DirEntries) error {
uEntries := make([]upstream.Entry, len(entries))
for j, e := range entries {
uEntries[j], _ = u.WrapEntry(e)
}
mutex.Lock()
entriess = append(entriess, uEntries)
mutex.Unlock()
return nil
}
do := u.Features().ListR
if do != nil {
err = do(ctx, dir, callback)
} else {
err = walk.ListR(ctx, u, dir, true, -1, walk.ListAll, callback)
}
if err != nil {
return nil, errors.Wrapf(err, "NewObject failed on %v", remote)
errs[i] = errors.Wrap(err, u.Name())
return
}
return f.wrapObject(obj), nil
})
if len(errs) == len(errs.FilterNil()) {
errs = errs.Map(func(e error) error {
if errors.Cause(e) == fs.ErrorDirNotFound {
return nil
}
return e
})
if len(errs) == 0 {
return fs.ErrorDirNotFound
}
return errs.Err()
}
return nil, fs.ErrorObjectNotFound
entries, err := f.mergeDirEntries(entriess)
if err != nil {
return err
}
return callback(entries)
}
// Precision is the greatest Precision of all remotes
// NewObject creates a new remote union file object
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
objs := make([]*upstream.Object, len(f.upstreams))
errs := Errors(make([]error, len(f.upstreams)))
multithread(len(f.upstreams), func(i int) {
u := f.upstreams[i]
o, err := u.NewObject(ctx, remote)
if err != nil && err != fs.ErrorObjectNotFound {
errs[i] = errors.Wrap(err, u.Name())
return
}
objs[i] = u.WrapObject(o)
})
var entries []upstream.Entry
for _, o := range objs {
if o != nil {
entries = append(entries, o)
}
}
if len(entries) == 0 {
return nil, fs.ErrorObjectNotFound
}
e, err := f.wrapEntries(entries...)
if err != nil {
return nil, err
}
return e.(*Object), errs.Err()
}
// Precision is the greatest Precision of all upstreams
func (f *Fs) Precision() time.Duration {
var greatestPrecision time.Duration
for _, remote := range f.remotes {
if remote.Precision() > greatestPrecision {
greatestPrecision = remote.Precision()
for _, u := range f.upstreams {
if u.Precision() > greatestPrecision {
greatestPrecision = u.Precision()
}
}
return greatestPrecision
}
func (f *Fs) action(ctx context.Context, path string) ([]*upstream.Fs, error) {
return f.actionPolicy.Action(ctx, f.upstreams, path)
}
func (f *Fs) actionEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
return f.actionPolicy.ActionEntries(entries...)
}
func (f *Fs) create(ctx context.Context, path string) ([]*upstream.Fs, error) {
return f.createPolicy.Create(ctx, f.upstreams, path)
}
func (f *Fs) createEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
return f.createPolicy.CreateEntries(entries...)
}
func (f *Fs) search(ctx context.Context, path string) (*upstream.Fs, error) {
return f.searchPolicy.Search(ctx, f.upstreams, path)
}
func (f *Fs) searchEntries(entries ...upstream.Entry) (upstream.Entry, error) {
return f.searchPolicy.SearchEntries(entries...)
}
func (f *Fs) mergeDirEntries(entriess [][]upstream.Entry) (fs.DirEntries, error) {
entryMap := make(map[string]([]upstream.Entry))
for _, en := range entriess {
if en == nil {
continue
}
for _, entry := range en {
remote := entry.Remote()
if f.Features().CaseInsensitive {
remote = strings.ToLower(remote)
}
entryMap[remote] = append(entryMap[remote], entry)
}
}
var entries fs.DirEntries
for path := range entryMap {
e, err := f.wrapEntries(entryMap[path]...)
if err != nil {
return nil, err
}
entries = append(entries, e)
}
return entries, nil
}
// NewFs constructs an Fs from the path.
//
// The returned Fs is the actual Fs, referenced by remote in the config
@@ -320,51 +743,64 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
if err != nil {
return nil, err
}
if len(opt.Remotes) == 0 {
return nil, errors.New("union can't point to an empty remote - check the value of the remotes setting")
// Backward compatible to old config
if len(opt.Upstreams) == 0 && len(opt.Remotes) > 0 {
for i := 0; i < len(opt.Remotes)-1; i++ {
opt.Remotes[i] = opt.Remotes[i] + ":ro"
}
opt.Upstreams = opt.Remotes
}
if len(opt.Remotes) == 1 {
return nil, errors.New("union can't point to a single remote - check the value of the remotes setting")
if len(opt.Upstreams) == 0 {
return nil, errors.New("union can't point to an empty upstream - check the value of the upstreams setting")
}
for _, remote := range opt.Remotes {
if strings.HasPrefix(remote, name+":") {
return nil, errors.New("can't point union remote at itself - check the value of the remote setting")
if len(opt.Upstreams) == 1 {
return nil, errors.New("union can't point to a single upstream - check the value of the upstreams setting")
}
for _, u := range opt.Upstreams {
if strings.HasPrefix(u, name+":") {
return nil, errors.New("can't point union remote at itself - check the value of the upstreams setting")
}
}
var remotes []fs.Fs
for i := range opt.Remotes {
// Last remote first so we return the correct (last) matching fs in case of fs.ErrorIsFile
var remote = opt.Remotes[len(opt.Remotes)-i-1]
_, configName, fsPath, err := fs.ParseRemote(remote)
if err != nil {
upstreams := make([]*upstream.Fs, len(opt.Upstreams))
errs := Errors(make([]error, len(opt.Upstreams)))
multithread(len(opt.Upstreams), func(i int) {
u := opt.Upstreams[i]
upstreams[i], errs[i] = upstream.New(u, root, time.Duration(opt.CacheTime)*time.Second)
})
var usedUpstreams []*upstream.Fs
var fserr error
for i, err := range errs {
if err != nil && err != fs.ErrorIsFile {
return nil, err
}
var rootString = path.Join(fsPath, filepath.ToSlash(root))
if configName != "local" {
rootString = configName + ":" + rootString
// Only the upstreams returns ErrorIsFile would be used if any
if err == fs.ErrorIsFile {
usedUpstreams = append(usedUpstreams, upstreams[i])
fserr = fs.ErrorIsFile
}
myFs, err := cache.Get(rootString)
if err != nil {
if err == fs.ErrorIsFile {
return myFs, err
}
return nil, err
}
remotes = append(remotes, myFs)
}
// Reverse the remotes again so they are in the order as before
for i, j := 0, len(remotes)-1; i < j; i, j = i+1, j-1 {
remotes[i], remotes[j] = remotes[j], remotes[i]
if fserr == nil {
usedUpstreams = upstreams
}
f := &Fs{
name: name,
root: root,
opt: *opt,
remotes: remotes,
wr: remotes[len(remotes)-1],
name: name,
root: root,
opt: *opt,
upstreams: usedUpstreams,
}
f.actionPolicy, err = policy.Get(opt.ActionPolicy)
if err != nil {
return nil, err
}
f.createPolicy, err = policy.Get(opt.CreatePolicy)
if err != nil {
return nil, err
}
f.searchPolicy, err = policy.Get(opt.SearchPolicy)
if err != nil {
return nil, err
}
var features = (&fs.Features{
CaseInsensitive: true,
@@ -376,44 +812,54 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
SetTier: true,
GetTier: true,
}).Fill(f)
features = features.Mask(f.wr) // mask the features just on the writable fs
// Really need the union of all remotes for these, so
// re-instate and calculate separately.
features.ChangeNotify = f.ChangeNotify
features.DirCacheFlush = f.DirCacheFlush
// FIXME maybe should be masking the bools here?
// Clear ChangeNotify and DirCacheFlush if all are nil
clearChangeNotify := true
clearDirCacheFlush := true
for _, remote := range f.remotes {
remoteFeatures := remote.Features()
if remoteFeatures.ChangeNotify != nil {
clearChangeNotify = false
}
if remoteFeatures.DirCacheFlush != nil {
clearDirCacheFlush = false
}
for _, f := range upstreams {
features = features.Mask(f) // Mask all upstream fs
}
if clearChangeNotify {
features.ChangeNotify = nil
}
if clearDirCacheFlush {
features.DirCacheFlush = nil
// Enable ListR when upstreams either support ListR or is local
// But not when all upstreams are local
if features.ListR == nil {
for _, u := range upstreams {
if u.Features().ListR != nil {
features.ListR = f.ListR
} else if !u.Features().IsLocal {
features.ListR = nil
break
}
}
}
f.features = features
// Get common intersection of hashes
hashSet := f.remotes[0].Hashes()
for _, remote := range f.remotes[1:] {
hashSet = hashSet.Overlap(remote.Hashes())
hashSet := f.upstreams[0].Hashes()
for _, u := range f.upstreams[1:] {
hashSet = hashSet.Overlap(u.Hashes())
}
f.hashSet = hashSet
return f, nil
return f, fserr
}
func parentDir(absPath string) string {
parent := path.Dir(strings.TrimRight(filepath.ToSlash(absPath), "/"))
if parent == "." {
parent = ""
}
return parent
}
func multithread(num int, fn func(int)) {
var wg sync.WaitGroup
for i := 0; i < num; i++ {
wg.Add(1)
i := i
go func() {
defer wg.Done()
fn(i)
}()
}
wg.Wait()
}
// Check the interfaces are satisfied
@@ -427,4 +873,5 @@ var (
_ fs.DirCacheFlusher = (*Fs)(nil)
_ fs.ChangeNotifier = (*Fs)(nil)
_ fs.Abouter = (*Fs)(nil)
_ fs.ListRer = (*Fs)(nil)
)

View File

@@ -2,17 +2,154 @@
package union_test
import (
"os"
"path/filepath"
"testing"
_ "github.com/rclone/rclone/backend/local"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
"github.com/stretchr/testify/require"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
if *fstest.RemoteName == "" {
t.Skip("Skipping as -remote not set")
}
fstests.Run(t, &fstests.Opt{
RemoteName: "TestUnion:",
NilObject: nil,
SkipFsMatch: true,
RemoteName: *fstest.RemoteName,
UnimplementableFsMethods: []string{"OpenWriterAt", "DuplicateFiles"},
UnimplementableObjectMethods: []string{"MimeType"},
})
}
func TestStandard(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
tempdir1 := filepath.Join(os.TempDir(), "rclone-union-test-standard1")
tempdir2 := filepath.Join(os.TempDir(), "rclone-union-test-standard2")
tempdir3 := filepath.Join(os.TempDir(), "rclone-union-test-standard3")
require.NoError(t, os.MkdirAll(tempdir1, 0744))
require.NoError(t, os.MkdirAll(tempdir2, 0744))
require.NoError(t, os.MkdirAll(tempdir3, 0744))
upstreams := tempdir1 + " " + tempdir2 + " " + tempdir3
name := "TestUnion"
fstests.Run(t, &fstests.Opt{
RemoteName: name + ":",
ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "type", Value: "union"},
{Name: name, Key: "upstreams", Value: upstreams},
{Name: name, Key: "action_policy", Value: "epall"},
{Name: name, Key: "create_policy", Value: "epmfs"},
{Name: name, Key: "search_policy", Value: "ff"},
},
UnimplementableFsMethods: []string{"OpenWriterAt", "DuplicateFiles"},
UnimplementableObjectMethods: []string{"MimeType"},
})
}
func TestRO(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
tempdir1 := filepath.Join(os.TempDir(), "rclone-union-test-ro1")
tempdir2 := filepath.Join(os.TempDir(), "rclone-union-test-ro2")
tempdir3 := filepath.Join(os.TempDir(), "rclone-union-test-ro3")
require.NoError(t, os.MkdirAll(tempdir1, 0744))
require.NoError(t, os.MkdirAll(tempdir2, 0744))
require.NoError(t, os.MkdirAll(tempdir3, 0744))
upstreams := tempdir1 + " " + tempdir2 + ":ro " + tempdir3 + ":ro"
name := "TestUnionRO"
fstests.Run(t, &fstests.Opt{
RemoteName: name + ":",
ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "type", Value: "union"},
{Name: name, Key: "upstreams", Value: upstreams},
{Name: name, Key: "action_policy", Value: "epall"},
{Name: name, Key: "create_policy", Value: "epmfs"},
{Name: name, Key: "search_policy", Value: "ff"},
},
UnimplementableFsMethods: []string{"OpenWriterAt", "DuplicateFiles"},
UnimplementableObjectMethods: []string{"MimeType"},
})
}
func TestNC(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
tempdir1 := filepath.Join(os.TempDir(), "rclone-union-test-nc1")
tempdir2 := filepath.Join(os.TempDir(), "rclone-union-test-nc2")
tempdir3 := filepath.Join(os.TempDir(), "rclone-union-test-nc3")
require.NoError(t, os.MkdirAll(tempdir1, 0744))
require.NoError(t, os.MkdirAll(tempdir2, 0744))
require.NoError(t, os.MkdirAll(tempdir3, 0744))
upstreams := tempdir1 + " " + tempdir2 + ":nc " + tempdir3 + ":nc"
name := "TestUnionNC"
fstests.Run(t, &fstests.Opt{
RemoteName: name + ":",
ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "type", Value: "union"},
{Name: name, Key: "upstreams", Value: upstreams},
{Name: name, Key: "action_policy", Value: "epall"},
{Name: name, Key: "create_policy", Value: "epmfs"},
{Name: name, Key: "search_policy", Value: "ff"},
},
UnimplementableFsMethods: []string{"OpenWriterAt", "DuplicateFiles"},
UnimplementableObjectMethods: []string{"MimeType"},
})
}
func TestPolicy1(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
tempdir1 := filepath.Join(os.TempDir(), "rclone-union-test-policy11")
tempdir2 := filepath.Join(os.TempDir(), "rclone-union-test-policy12")
tempdir3 := filepath.Join(os.TempDir(), "rclone-union-test-policy13")
require.NoError(t, os.MkdirAll(tempdir1, 0744))
require.NoError(t, os.MkdirAll(tempdir2, 0744))
require.NoError(t, os.MkdirAll(tempdir3, 0744))
upstreams := tempdir1 + " " + tempdir2 + " " + tempdir3
name := "TestUnionPolicy1"
fstests.Run(t, &fstests.Opt{
RemoteName: name + ":",
ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "type", Value: "union"},
{Name: name, Key: "upstreams", Value: upstreams},
{Name: name, Key: "action_policy", Value: "all"},
{Name: name, Key: "create_policy", Value: "lus"},
{Name: name, Key: "search_policy", Value: "all"},
},
UnimplementableFsMethods: []string{"OpenWriterAt", "DuplicateFiles"},
UnimplementableObjectMethods: []string{"MimeType"},
})
}
func TestPolicy2(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
tempdir1 := filepath.Join(os.TempDir(), "rclone-union-test-policy21")
tempdir2 := filepath.Join(os.TempDir(), "rclone-union-test-policy22")
tempdir3 := filepath.Join(os.TempDir(), "rclone-union-test-policy23")
require.NoError(t, os.MkdirAll(tempdir1, 0744))
require.NoError(t, os.MkdirAll(tempdir2, 0744))
require.NoError(t, os.MkdirAll(tempdir3, 0744))
upstreams := tempdir1 + " " + tempdir2 + " " + tempdir3
name := "TestUnionPolicy2"
fstests.Run(t, &fstests.Opt{
RemoteName: name + ":",
ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "type", Value: "union"},
{Name: name, Key: "upstreams", Value: upstreams},
{Name: name, Key: "action_policy", Value: "all"},
{Name: name, Key: "create_policy", Value: "rand"},
{Name: name, Key: "search_policy", Value: "ff"},
},
UnimplementableFsMethods: []string{"OpenWriterAt", "DuplicateFiles"},
UnimplementableObjectMethods: []string{"MimeType"},
})
}

View File

@@ -0,0 +1,348 @@
package upstream
import (
"context"
"io"
"math"
"path"
"path/filepath"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/cache"
)
var (
// ErrUsageFieldNotSupported stats the usage field is not supported by the backend
ErrUsageFieldNotSupported = errors.New("this usage field is not supported")
)
// Fs is a wrap of any fs and its configs
type Fs struct {
fs.Fs
RootFs fs.Fs
RootPath string
writable bool
creatable bool
usage *fs.Usage // Cache the usage
cacheTime time.Duration // cache duration
cacheExpiry int64 // usage cache expiry time
cacheMutex sync.RWMutex
cacheOnce sync.Once
cacheUpdate bool // if the cache is updating
}
// Directory describes a wrapped Directory
//
// This is a wrapped Directory which contains the upstream Fs
type Directory struct {
fs.Directory
f *Fs
}
// Object describes a wrapped Object
//
// This is a wrapped Object which contains the upstream Fs
type Object struct {
fs.Object
f *Fs
}
// Entry describe a warpped fs.DirEntry interface with the
// information of upstream Fs
type Entry interface {
fs.DirEntry
UpstreamFs() *Fs
}
// New creates a new Fs based on the
// string formatted `type:root_path(:ro/:nc)`
func New(remote, root string, cacheTime time.Duration) (*Fs, error) {
_, configName, fsPath, err := fs.ParseRemote(remote)
if err != nil {
return nil, err
}
f := &Fs{
RootPath: root,
writable: true,
creatable: true,
cacheExpiry: time.Now().Unix(),
cacheTime: cacheTime,
usage: &fs.Usage{},
}
if strings.HasSuffix(fsPath, ":ro") {
f.writable = false
f.creatable = false
fsPath = fsPath[0 : len(fsPath)-3]
} else if strings.HasSuffix(fsPath, ":nc") {
f.writable = true
f.creatable = false
fsPath = fsPath[0 : len(fsPath)-3]
}
if configName != "local" {
fsPath = configName + ":" + fsPath
}
rFs, err := cache.Get(fsPath)
if err != nil && err != fs.ErrorIsFile {
return nil, err
}
f.RootFs = rFs
rootString := path.Join(fsPath, filepath.ToSlash(root))
myFs, err := cache.Get(rootString)
if err != nil && err != fs.ErrorIsFile {
return nil, err
}
f.Fs = myFs
return f, err
}
// WrapDirectory wraps a fs.Directory to include the info
// of the upstream Fs
func (f *Fs) WrapDirectory(e fs.Directory) *Directory {
if e == nil {
return nil
}
return &Directory{
Directory: e,
f: f,
}
}
// WrapObject wraps a fs.Object to include the info
// of the upstream Fs
func (f *Fs) WrapObject(o fs.Object) *Object {
if o == nil {
return nil
}
return &Object{
Object: o,
f: f,
}
}
// WrapEntry wraps a fs.DirEntry to include the info
// of the upstream Fs
func (f *Fs) WrapEntry(e fs.DirEntry) (Entry, error) {
switch e.(type) {
case fs.Object:
return f.WrapObject(e.(fs.Object)), nil
case fs.Directory:
return f.WrapDirectory(e.(fs.Directory)), nil
default:
return nil, errors.Errorf("unknown object type %T", e)
}
}
// UpstreamFs get the upstream Fs the entry is stored in
func (e *Directory) UpstreamFs() *Fs {
return e.f
}
// UpstreamFs get the upstream Fs the entry is stored in
func (o *Object) UpstreamFs() *Fs {
return o.f
}
// UnWrap returns the Object that this Object is wrapping or
// nil if it isn't wrapping anything
func (o *Object) UnWrap() fs.Object {
return o.Object
}
// IsCreatable return if the fs is allowed to create new objects
func (f *Fs) IsCreatable() bool {
return f.creatable
}
// IsWritable return if the fs is allowed to write
func (f *Fs) IsWritable() bool {
return f.writable
}
// Put in to the remote path with the modTime given of the given size
//
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
// nil and the error
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
o, err := f.Fs.Put(ctx, in, src, options...)
if err != nil {
return o, err
}
f.cacheMutex.Lock()
defer f.cacheMutex.Unlock()
size := src.Size()
if f.usage.Used != nil {
*f.usage.Used += size
}
if f.usage.Free != nil {
*f.usage.Free -= size
}
if f.usage.Objects != nil {
*f.usage.Objects++
}
return o, nil
}
// PutStream uploads to the remote path with the modTime given of indeterminate size
//
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
// nil and the error
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
do := f.Features().PutStream
if do == nil {
return nil, fs.ErrorNotImplemented
}
o, err := do(ctx, in, src, options...)
if err != nil {
return o, err
}
f.cacheMutex.Lock()
defer f.cacheMutex.Unlock()
size := o.Size()
if f.usage.Used != nil {
*f.usage.Used += size
}
if f.usage.Free != nil {
*f.usage.Free -= size
}
if f.usage.Objects != nil {
*f.usage.Objects++
}
return o, nil
}
// Update in to the object with the modTime given of the given size
//
// When called from outside a Fs by rclone, src.Size() will always be >= 0.
// But for unknown-sized objects (indicated by src.Size() == -1), Upload should either
// return an error or update the object properly (rather than e.g. calling panic).
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
size := o.Size()
err := o.Object.Update(ctx, in, src, options...)
if err != nil {
return err
}
o.f.cacheMutex.Lock()
defer o.f.cacheMutex.Unlock()
delta := o.Size() - size
if delta <= 0 {
return nil
}
if o.f.usage.Used != nil {
*o.f.usage.Used += size
}
if o.f.usage.Free != nil {
*o.f.usage.Free -= size
}
return nil
}
// About gets quota information from the Fs
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
if atomic.LoadInt64(&f.cacheExpiry) <= time.Now().Unix() {
err := f.updateUsage()
if err != nil {
return nil, ErrUsageFieldNotSupported
}
}
f.cacheMutex.RLock()
defer f.cacheMutex.RUnlock()
return f.usage, nil
}
// GetFreeSpace get the free space of the fs
func (f *Fs) GetFreeSpace() (int64, error) {
if atomic.LoadInt64(&f.cacheExpiry) <= time.Now().Unix() {
err := f.updateUsage()
if err != nil {
return math.MaxInt64, ErrUsageFieldNotSupported
}
}
f.cacheMutex.RLock()
defer f.cacheMutex.RUnlock()
if f.usage.Free == nil {
return math.MaxInt64, ErrUsageFieldNotSupported
}
return *f.usage.Free, nil
}
// GetUsedSpace get the used space of the fs
func (f *Fs) GetUsedSpace() (int64, error) {
if atomic.LoadInt64(&f.cacheExpiry) <= time.Now().Unix() {
err := f.updateUsage()
if err != nil {
return 0, ErrUsageFieldNotSupported
}
}
f.cacheMutex.RLock()
defer f.cacheMutex.RUnlock()
if f.usage.Used == nil {
return 0, ErrUsageFieldNotSupported
}
return *f.usage.Used, nil
}
// GetNumObjects get the number of objects of the fs
func (f *Fs) GetNumObjects() (int64, error) {
if atomic.LoadInt64(&f.cacheExpiry) <= time.Now().Unix() {
err := f.updateUsage()
if err != nil {
return 0, ErrUsageFieldNotSupported
}
}
f.cacheMutex.RLock()
defer f.cacheMutex.RUnlock()
if f.usage.Objects == nil {
return 0, ErrUsageFieldNotSupported
}
return *f.usage.Objects, nil
}
func (f *Fs) updateUsage() (err error) {
if do := f.RootFs.Features().About; do == nil {
return ErrUsageFieldNotSupported
}
done := false
f.cacheOnce.Do(func() {
f.cacheMutex.Lock()
err = f.updateUsageCore(false)
f.cacheMutex.Unlock()
done = true
})
if done {
return err
}
if !f.cacheUpdate {
f.cacheUpdate = true
go func() {
_ = f.updateUsageCore(true)
f.cacheUpdate = false
}()
}
return nil
}
func (f *Fs) updateUsageCore(lock bool) error {
// Run in background, should not be cancelled by user
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
defer cancel()
usage, err := f.RootFs.Features().About(ctx)
if err != nil {
f.cacheUpdate = false
return err
}
if lock {
f.cacheMutex.Lock()
defer f.cacheMutex.Unlock()
}
// Store usage
atomic.StoreInt64(&f.cacheExpiry, time.Now().Add(f.cacheTime).Unix())
f.usage = usage
return nil
}

View File

@@ -989,13 +989,14 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
return nil, errors.Wrap(err, "about call failed")
}
usage := &fs.Usage{}
if q.Available != 0 || q.Used != 0 {
if q.Available >= 0 && q.Used >= 0 {
usage.Total = fs.NewUsageValue(q.Available + q.Used)
}
if q.Used >= 0 {
usage.Used = fs.NewUsageValue(q.Used)
}
if q.Used >= 0 {
usage.Used = fs.NewUsageValue(q.Used)
}
if q.Available >= 0 {
usage.Free = fs.NewUsageValue(q.Available)
}
if q.Available >= 0 && q.Used >= 0 {
usage.Total = fs.NewUsageValue(q.Available + q.Used)
}
return usage, nil
}
@@ -1134,6 +1135,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
NoResponse: true,
ContentLength: &size, // FIXME this isn't necessary with owncloud - See https://github.com/nextcloud/nextcloud-snap/issues/365
ContentType: fs.MimeType(ctx, src),
Options: options,
}
if o.fs.useOCMtime || o.fs.hasMD5 || o.fs.hasSHA1 {
opts.ExtraHeaders = map[string]string{}

View File

@@ -1065,7 +1065,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
return resp.Body, err
}
func (o *Object) upload(ctx context.Context, in io.Reader, overwrite bool, mimeType string) (err error) {
func (o *Object) upload(ctx context.Context, in io.Reader, overwrite bool, mimeType string, options ...fs.OpenOption) (err error) {
// prepare upload
var resp *http.Response
var ur api.AsyncInfo
@@ -1073,6 +1073,7 @@ func (o *Object) upload(ctx context.Context, in io.Reader, overwrite bool, mimeT
Method: "GET",
Path: "/resources/upload",
Parameters: url.Values{},
Options: options,
}
opts.Parameters.Set("path", o.fs.opt.Enc.FromStandardPath(o.filePath()))
@@ -1121,7 +1122,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
}
//upload file
err = o.upload(ctx, in1, true, fs.MimeType(ctx, src))
err = o.upload(ctx, in1, true, fs.MimeType(ctx, src), options...)
if err != nil {
return err
}

3
bin/.ignore-emails Normal file
View File

@@ -0,0 +1,3 @@
# Email addresses to ignore in the git log when making the authors.md file
<nick@raig-wood.com>
<anaghk.dos@gmail.com>

136
bin/check-merged.go Normal file
View File

@@ -0,0 +1,136 @@
// +build ignore
// Attempt to work out if branches have already been merged
package main
import (
"bufio"
"errors"
"flag"
"fmt"
"log"
"os"
"os/exec"
"regexp"
)
var (
// Flags
master = flag.String("master", "master", "Branch to work out if merged into")
version = "development version" // overridden by goreleaser
)
func usage() {
fmt.Fprintf(os.Stderr, `Usage: %s [options]
Version: %s
Attempt to work out if in the current git repo branches have been
merged into master.
Example usage:
%s
Full options:
`, os.Args[0], version, os.Args[0])
flag.PrintDefaults()
}
var (
printedSep = false
)
const (
sep1 = "============================================================"
sep2 = "------------------------------------------------------------"
)
// Show the diff between two git revisions
func gitDiffDiff(rev1, rev2 string) {
fmt.Printf("Diff of diffs of %q and %q\n", rev1, rev2)
cmd := exec.Command("bash", "-c", fmt.Sprintf(`diff <(git show "%s") <(git show "%s")`, rev1, rev2))
out, err := cmd.Output()
if err != nil {
var exitErr *exec.ExitError
if errors.As(err, &exitErr) && exitErr.ExitCode() == 1 {
// OK just different
} else {
log.Fatalf("git diff diff failed: %#v", err)
}
}
_, _ = os.Stdout.Write(out)
}
var reCommit = regexp.MustCompile(`commit ([0-9a-f]{32,})`)
// Grep the git log for logLine
func gitLogGrep(branch, rev, logLine string) {
cmd := exec.Command("git", "log", "--grep", regexp.QuoteMeta(logLine), *master)
out, err := cmd.Output()
if err != nil {
log.Fatalf("git log grep failed: %v", err)
}
if len(out) > 0 {
if !printedSep {
fmt.Println(sep1)
printedSep = true
}
fmt.Printf("Branch: %s - MAY BE MERGED to %s\nLog: %s\n\n", branch, *master, logLine)
_, _ = os.Stdout.Write(out)
match := reCommit.FindSubmatch(out)
if len(match) != 0 {
commit := string(match[1])
fmt.Println(sep2)
gitDiffDiff(rev, commit)
}
fmt.Println(sep1)
}
}
// * b2-fix-download-url 4209c768a [gone] b2: fix transfers when using download_url
var reLine = regexp.MustCompile(`^[ *] (\S+)\s+([0-9a-f]+)\s+(?:\[[^]]+\] )?(.*)$`)
// Run git branch -v, parse the output and check it in the log
func gitBranch() {
cmd := exec.Command("git", "branch", "-v")
cmd.Stderr = os.Stderr
out, err := cmd.StdoutPipe()
if err != nil {
log.Fatalf("git branch pipe failed: %v", err)
}
if err := cmd.Start(); err != nil {
log.Fatalf("git branch failed: %v", err)
}
scanner := bufio.NewScanner(out)
for scanner.Scan() {
line := scanner.Text()
match := reLine.FindStringSubmatch(line)
if len(match) != 4 {
log.Printf("Invalid line %q", line)
continue
}
branch, rev, logLine := match[1], match[2], match[3]
if branch == *master {
continue
}
//fmt.Printf("branch = %q, rev = %q, logLine = %q\n", branch, rev, logLine)
gitLogGrep(branch, rev, logLine)
}
if err := scanner.Err(); err != nil {
log.Fatalf("failed reading git branch: %v", err)
}
if err := cmd.Wait(); err != nil {
log.Fatalf("git branch wait failed: %v", err)
}
}
func main() {
flag.Usage = usage
flag.Parse()
args := flag.Args()
if len(args) != 0 {
usage()
log.Fatal("Wrong number of arguments")
}
gitBranch()
}

View File

@@ -182,6 +182,7 @@ func compileArch(version, goos, goarch, dir string) bool {
args := []string{
"go", "build",
"--ldflags", "-s -X github.com/rclone/rclone/fs.Version=" + version,
"-trimpath",
"-i",
"-o", output,
"-tags", *tags,

View File

@@ -4,6 +4,7 @@ Make backend documentation
"""
import os
import io
import subprocess
marker = "<!--- autogenerated options"
@@ -19,6 +20,11 @@ def output_docs(backend, out):
out.flush()
subprocess.check_call(["rclone", "help", "backend", backend], stdout=out)
def output_backend_tool_docs(backend, out):
"""Output documentation for backend tool to out"""
out.flush()
subprocess.call(["rclone", "backend", "help", backend], stdout=out, stderr=subprocess.DEVNULL)
def alter_doc(backend):
"""Alter the documentation for backend"""
doc_file = "docs/content/"+backend+".md"
@@ -35,6 +41,7 @@ def alter_doc(backend):
start_full = start + " - DO NOT EDIT, instead edit fs.RegInfo in backend/%s/%s.go then run make backenddocs -->\n" % (backend, backend)
out_file.write(start_full)
output_docs(backend, out_file)
output_backend_tool_docs(backend, out_file)
out_file.write(stop+" -->\n")
altered = True
if not in_docs:

View File

@@ -54,8 +54,10 @@ docs = [
"pcloud.md",
"premiumizeme.md",
"putio.md",
"seafile.md",
"sftp.md",
"sugarsync.md",
"tardigrade.md",
"union.md",
"webdav.md",
"yandex.md",
@@ -110,7 +112,7 @@ def read_doc(doc):
# Remove icons
contents = re.sub(r'<i class="fa.*?</i>\s*', "", contents)
# Make [...](/links/) absolute
contents = re.sub(r'\((\/.*?\/)\)', r"(https://rclone.org\1)", contents)
contents = re.sub(r'\]\((\/.*?\/(#.*)?)\)', r"](https://rclone.org\1)", contents)
# Interpret provider shortcode
# {{< provider name="Amazon S3" home="https://aws.amazon.com/s3/" config="/s3/" >}}
contents = re.sub(r'\{\{<\s+provider.*?name="(.*?)".*?>\}\}', r"\1", contents)

View File

@@ -7,17 +7,15 @@ import re
import subprocess
AUTHORS = "docs/content/authors.md"
IGNORE = [ "nick@raig-wood.com" ]
IGNORE = "bin/.ignore-emails"
def load():
def load(filename):
"""
returns a set of emails already in authors.md
returns a set of emails already in the file
"""
with open(AUTHORS) as fd:
with open(filename) as fd:
authors = fd.read()
emails = set(re.findall(r"<(.*?)>", authors))
emails.update(IGNORE)
return emails
return set(re.findall(r"<(.*?)>", authors))
def add_email(name, email):
"""
@@ -32,7 +30,9 @@ def main():
out = subprocess.check_output(["git", "log", '--reverse', '--format=%an|%ae', "master"])
out = out.decode("utf-8")
previous = load()
ignored = load(IGNORE)
previous = load(AUTHORS)
previous.update(ignored)
for line in out.split("\n"):
line = line.strip()
if line == "":

View File

@@ -6,6 +6,7 @@ import (
_ "github.com/rclone/rclone/cmd"
_ "github.com/rclone/rclone/cmd/about"
_ "github.com/rclone/rclone/cmd/authorize"
_ "github.com/rclone/rclone/cmd/backend"
_ "github.com/rclone/rclone/cmd/cachestats"
_ "github.com/rclone/rclone/cmd/cat"
_ "github.com/rclone/rclone/cmd/check"

179
cmd/backend/backend.go Normal file
View File

@@ -0,0 +1,179 @@
package backend
import (
"context"
"encoding/json"
"fmt"
"os"
"sort"
"github.com/pkg/errors"
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/cmd/rc"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/flags"
"github.com/rclone/rclone/fs/operations"
"github.com/spf13/cobra"
)
var (
options []string
useJSON bool
)
func init() {
cmd.Root.AddCommand(commandDefinition)
cmdFlags := commandDefinition.Flags()
flags.StringArrayVarP(cmdFlags, &options, "option", "o", options, "Option in the form name=value or name.")
flags.BoolVarP(cmdFlags, &useJSON, "json", "", useJSON, "Always output in JSON format.")
}
var commandDefinition = &cobra.Command{
Use: "backend <command> remote:path [opts] <args>",
Short: `Run a backend specific command.`,
Long: `
This runs a backend specific command. The commands themselves (except
for "help" and "features") are defined by the backends and you should
see the backend docs for definitions.
You can discover what commands a backend implements by using
rclone backend help remote:
rclone backend help <backendname>
You can also discover information about the backend using (see
[operations/fsinfo](/rc/#operations/fsinfo) in the remote control docs
for more info).
rclone backend features remote:
Pass options to the backend command with -o. This should be key=value or key, eg:
rclone backend stats remote:path stats -o format=json -o long
Pass arguments to the backend by placing them on the end of the line
rclone backend cleanup remote:path file1 file2 file3
Note to run these commands on a running backend then see
[backend/command](/rc/#backend/command) in the rc docs.
`,
RunE: func(command *cobra.Command, args []string) error {
cmd.CheckArgs(2, 1E6, command, args)
name, remote := args[0], args[1]
cmd.Run(false, false, command, func() error {
// show help if remote is a backend name
if name == "help" {
fsInfo, err := fs.Find(remote)
if err == nil {
return showHelp(fsInfo)
}
}
// Create remote
fsInfo, configName, fsPath, config, err := fs.ConfigFs(remote)
if err != nil {
return err
}
f, err := fsInfo.NewFs(configName, fsPath, config)
if err != nil {
return err
}
// Run the command
var out interface{}
switch name {
case "help":
return showHelp(fsInfo)
case "features":
out = operations.GetFsInfo(f)
default:
doCommand := f.Features().Command
if doCommand == nil {
return errors.Errorf("%v: doesn't support backend commands", f)
}
arg := args[2:]
opt := rc.ParseOptions(options)
out, err = doCommand(context.Background(), name, arg, opt)
}
if err != nil {
return errors.Wrapf(err, "command %q failed", name)
}
// Output the result
writeJSON := false
if useJSON {
writeJSON = true
} else {
switch x := out.(type) {
case nil:
case string:
fmt.Println(out)
case []string:
for _, line := range x {
fmt.Println(line)
}
default:
writeJSON = true
}
}
if writeJSON {
// Write indented JSON to the output
enc := json.NewEncoder(os.Stdout)
enc.SetIndent("", "\t")
err = enc.Encode(out)
if err != nil {
return errors.Wrap(err, "failed to write JSON")
}
}
return nil
})
return nil
},
}
// show help for a backend
func showHelp(fsInfo *fs.RegInfo) error {
cmds := fsInfo.CommandHelp
name := fsInfo.Name
if len(cmds) == 0 {
return errors.Errorf("%s backend has no commands", name)
}
fmt.Printf("### Backend commands\n\n")
fmt.Printf(`Here are the commands specific to the %s backend.
Run them with with
rclone backend COMMAND remote:
The help below will explain what arguments each command takes.
See [the "rclone backend" command](/commands/rclone_backend/) for more
info on how to pass options and arguments.
These can be run on a running backend using the rc command
[backend/command](/rc/#backend/command).
`, name)
for _, cmd := range cmds {
fmt.Printf("#### %s\n\n", cmd.Name)
fmt.Printf("%s\n\n", cmd.Short)
fmt.Printf(" rclone backend %s remote: [options] [<arguments>+]\n\n", cmd.Name)
if cmd.Long != "" {
fmt.Printf("%s\n\n", cmd.Long)
}
if len(cmd.Opts) != 0 {
fmt.Printf("Options:\n\n")
ks := []string{}
for k := range cmd.Opts {
ks = append(ks, k)
}
sort.Strings(ks)
for _, k := range ks {
v := cmd.Opts[k]
fmt.Printf("- %q: %s\n", k, v)
}
fmt.Printf("\n")
}
}
return nil
}

View File

@@ -9,6 +9,7 @@ import (
"github.com/pkg/errors"
"github.com/rclone/rclone/backend/cache"
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/fs"
"github.com/spf13/cobra"
)
@@ -22,8 +23,10 @@ var commandDefinition = &cobra.Command{
Long: `
Print cache stats for a remote in JSON format
`,
Hidden: true,
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(1, 1, command, args)
fs.Logf(nil, `"rclone cachestats" is deprecated, use "rclone backend stats %s" instead`, args[0])
fsrc := cmd.NewFsSrc(args)
cmd.Run(false, false, command, func() error {

View File

@@ -66,6 +66,7 @@ const (
exitCodeNoRetryError
exitCodeFatalError
exitCodeTransferExceeded
exitCodeNoFilesTransferred
)
// ShowVersion prints the version to stdout
@@ -312,6 +313,7 @@ func Run(Retry bool, showStats bool, cmd *cobra.Command, f func() error) {
}
}
resolveExitCode(cmdErr)
}
// CheckArgs checks there are enough arguments and prints a message if not
@@ -430,6 +432,11 @@ func initConfig() {
func resolveExitCode(err error) {
atexit.Run()
if err == nil {
if fs.Config.ErrorOnNoTransfer {
if accounting.GlobalStats().GetTransfers() == 0 {
os.Exit(exitCodeNoFilesTransferred)
}
}
os.Exit(exitCodeSuccess)
}

View File

@@ -251,7 +251,14 @@ func (fsys *FS) Readdir(dirPath string,
fs.Errorf(dirPath, "Name too long (%d bytes) for FUSE, skipping: %s", len(name), name)
continue
}
fill(name, nil, 0)
if usingReaddirPlus {
// We have called host.SetCapReaddirPlus() so supply the stat information
var stat fuse.Stat_t
_ = fsys.stat(node, &stat) // not capable of returning an error
fill(name, &stat, 0)
} else {
fill(name, nil, 0)
}
}
}
itemsRead = len(items)
@@ -268,25 +275,15 @@ func (fsys *FS) Releasedir(path string, fh uint64) (errc int) {
func (fsys *FS) Statfs(path string, stat *fuse.Statfs_t) (errc int) {
defer log.Trace(path, "")("stat=%+v, errc=%d", stat, &errc)
const blockSize = 4096
const fsBlocks = (1 << 50) / blockSize
stat.Blocks = fsBlocks // Total data blocks in file system.
stat.Bfree = fsBlocks // Free blocks in file system.
stat.Bavail = fsBlocks // Free blocks in file system if you're not root.
stat.Files = 1e9 // Total files in file system.
stat.Ffree = 1e9 // Free files in file system.
stat.Bsize = blockSize // Block size
stat.Namemax = 255 // Maximum file name length?
stat.Frsize = blockSize // Fragment size, smallest addressable data size in the file system.
total, used, free := fsys.VFS.Statfs()
if total >= 0 {
stat.Blocks = uint64(total) / blockSize
}
if used >= 0 {
stat.Bfree = stat.Blocks - uint64(used)/blockSize
}
if free >= 0 {
stat.Bavail = uint64(free) / blockSize
}
total, _, free := fsys.VFS.Statfs()
stat.Blocks = uint64(total) / blockSize // Total data blocks in file system.
stat.Bfree = uint64(free) / blockSize // Free blocks in file system.
stat.Bavail = stat.Bfree // Free blocks in file system if you're not root.
stat.Files = 1e9 // Total files in file system.
stat.Ffree = 1e9 // Free files in file system.
stat.Bsize = blockSize // Block size
stat.Namemax = 255 // Maximum file name length?
stat.Frsize = blockSize // Fragment size, smallest addressable data size in the file system.
mountlib.ClipBlocks(&stat.Blocks)
mountlib.ClipBlocks(&stat.Bfree)
mountlib.ClipBlocks(&stat.Bavail)
@@ -548,11 +545,11 @@ func translateError(err error) (errc int) {
switch errors.Cause(err) {
case vfs.OK:
return 0
case vfs.ENOENT:
case vfs.ENOENT, fs.ErrorDirNotFound, fs.ErrorObjectNotFound:
return -fuse.ENOENT
case vfs.EEXIST:
case vfs.EEXIST, fs.ErrorDirExists:
return -fuse.EEXIST
case vfs.EPERM:
case vfs.EPERM, fs.ErrorPermissionDenied:
return -fuse.EPERM
case vfs.ECLOSED:
return -fuse.EBADF
@@ -564,7 +561,7 @@ func translateError(err error) (errc int) {
return -fuse.EBADF
case vfs.EROFS:
return -fuse.EROFS
case vfs.ENOSYS:
case vfs.ENOSYS, fs.ErrorNotImplemented:
return -fuse.ENOSYS
case vfs.EINVAL:
return -fuse.EINVAL

View File

@@ -26,6 +26,13 @@ import (
"github.com/rclone/rclone/vfs/vfsflags"
)
const (
// SetCapReaddirPlus informs the host that the hosted file system has the readdir-plus
// capability [Windows only]. A file system that has the readdir-plus capability can send
// full stat information during Readdir, thus avoiding extraneous Getattr calls.
usingReaddirPlus = runtime.GOOS == "windows"
)
func init() {
name := "cmount"
if runtime.GOOS == "windows" {
@@ -142,6 +149,10 @@ func mount(f fs.Fs, mountpoint string) (*vfs.VFS, <-chan error, func() error, er
// Create underlying FS
fsys := NewFS(f)
host := fuse.NewFileSystemHost(fsys)
if usingReaddirPlus {
host.SetCapReaddirPlus(true)
}
host.SetCapCaseInsensitive(f.Features().CaseInsensitive)
// Create options
options := mountOptions(f.Name()+":"+f.Root(), mountpoint)

View File

@@ -11,9 +11,9 @@ package cmount
import (
"testing"
"github.com/rclone/rclone/cmd/mountlib/mounttest"
"github.com/rclone/rclone/vfs/vfstest"
)
func TestMount(t *testing.T) {
mounttest.RunTests(t, mount)
vfstest.RunTests(t, false, mount)
}

View File

@@ -66,8 +66,8 @@ option when copying a small number of files into a large destination
can speed transfers up greatly.
For example, if you have many files in /path/to/src but only a few of
them change every day, you can to copy all the files which have
changed recently very efficiently like this:
them change every day, you can copy all the files which have changed
recently very efficiently like this:
rclone copy --max-age 24h --no-traverse /path/to/src remote:

View File

@@ -15,12 +15,14 @@ import (
var (
autoFilename = false
stdout = false
noClobber = false
)
func init() {
cmd.Root.AddCommand(commandDefinition)
cmdFlags := commandDefinition.Flags()
flags.BoolVarP(cmdFlags, &autoFilename, "auto-filename", "a", autoFilename, "Get the file name from the URL and use it for destination file path")
flags.BoolVarP(cmdFlags, &noClobber, "no-clobber", "", noClobber, "Prevent overwriting file with same name")
flags.BoolVarP(cmdFlags, &stdout, "stdout", "", stdout, "Write the output to stdout rather than a file")
}
@@ -35,6 +37,9 @@ Setting --auto-filename will cause the file name to be retreived from
the from URL (after any redirections) and used in the destination
path.
Setting --no-clobber will prevent overwriting file on the
destination if there is one with the same name.
Setting --stdout or making the output file name "-" will cause the
output to be written to standard output.
`,
@@ -59,7 +64,7 @@ output to be written to standard output.
if stdout {
err = operations.CopyURLToWriter(context.Background(), args[0], os.Stdout)
} else {
_, err = operations.CopyURL(context.Background(), fsdst, dstFileName, args[0], autoFilename)
_, err = operations.CopyURL(context.Background(), fsdst, dstFileName, args[0], autoFilename, noClobber)
}
return err
})

View File

@@ -110,7 +110,6 @@ func cryptCheck(ctx context.Context, fdst, fsrc fs.Fs) error {
fs.Errorf(src, err.Error())
return true, false
}
fs.Debugf(src, "OK")
return false, false
}

View File

@@ -60,7 +60,7 @@ use it like this
}
// cryptDecode returns the unencrypted file name
func cryptDecode(cipher crypt.Cipher, args []string) error {
func cryptDecode(cipher *crypt.Cipher, args []string) error {
output := ""
for _, encryptedFileName := range args {
@@ -78,7 +78,7 @@ func cryptDecode(cipher crypt.Cipher, args []string) error {
}
// cryptEncode returns the encrypted file name
func cryptEncode(cipher crypt.Cipher, args []string) error {
func cryptEncode(cipher *crypt.Cipher, args []string) error {
output := ""
for _, fileName := range args {

View File

@@ -6,6 +6,7 @@ import (
"github.com/rclone/rclone/backend/dropbox"
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/operations"
"github.com/spf13/cobra"
)
@@ -23,9 +24,11 @@ hashes are calculated according to [Dropbox content hash
rules](https://www.dropbox.com/developers/reference/content-hash).
The output is in the same format as md5sum and sha1sum.
`,
Hidden: true,
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(1, 1, command, args)
fsrc := cmd.NewFsSrc(args)
fs.Logf(nil, `"rclone dbhashsum" is deprecated, use "rclone hashsum %v %s" instead`, dropbox.DbHashType, args[0])
cmd.Run(false, false, command, func() error {
return operations.HashLister(context.Background(), dropbox.DbHashType, fsrc, os.Stdout)
})

View File

@@ -4,12 +4,19 @@ import (
"context"
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/fs/config/flags"
"github.com/rclone/rclone/fs/operations"
"github.com/spf13/cobra"
)
var (
rmdirs = false
)
func init() {
cmd.Root.AddCommand(commandDefinition)
cmdFlags := commandDefinition.Flags()
flags.BoolVarP(cmdFlags, &rmdirs, "rmdirs", "", rmdirs, "rmdirs removes empty directories but leaves root intact")
}
var commandDefinition = &cobra.Command{
@@ -23,6 +30,8 @@ filters so can be used to selectively delete files.
alone. If you want to delete a directory and all of its contents use
` + "`" + `rclone purge` + "`" + `
If you supply the --rmdirs flag, it will remove all empty directories along with it.
Eg delete all files bigger than 100MBytes
Check what would be deleted first (use either)
@@ -41,7 +50,14 @@ delete all files bigger than 100MBytes.
cmd.CheckArgs(1, 1, command, args)
fsrc := cmd.NewFsSrc(args)
cmd.Run(true, false, command, func() error {
return operations.Delete(context.Background(), fsrc)
if err := operations.Delete(context.Background(), fsrc); err != nil {
return err
}
if rmdirs {
fdst := cmd.NewFsDir(args)
return operations.Rmdirs(context.Background(), fdst, "", true)
}
return nil
})
},
}

View File

@@ -0,0 +1,44 @@
package genautocomplete
import (
"log"
"github.com/rclone/rclone/cmd"
"github.com/spf13/cobra"
)
func init() {
completionDefinition.AddCommand(fishCommandDefinition)
}
var fishCommandDefinition = &cobra.Command{
Use: "fish [output_file]",
Short: `Output fish completion script for rclone.`,
Long: `
Generates a fish autocompletion script for rclone.
This writes to /etc/fish/completions/rclone.fish by default so will
probably need to be run with sudo or as root, eg
sudo rclone genautocomplete fish
Logout and login again to use the autocompletion scripts, or source
them directly
. /etc/fish/completions/rclone.fish
If you supply a command line argument the script will be written
there.
`,
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(0, 1, command, args)
out := "/etc/fish/completions/rclone.fish"
if len(args) > 0 {
out = args[0]
}
err := cmd.Root.GenFishCompletionFile(out, true)
if err != nil {
log.Fatal(err)
}
},
}

View File

@@ -33,3 +33,16 @@ func TestCompletionZsh(t *testing.T) {
assert.NoError(t, err)
assert.NotEmpty(t, string(bs))
}
func TestCompletionFish(t *testing.T) {
tempFile, err := ioutil.TempFile("", "completion_fish")
assert.NoError(t, err)
defer func() { _ = tempFile.Close() }()
defer func() { _ = os.Remove(tempFile.Name()) }()
fishCommandDefinition.Run(fishCommandDefinition, []string{tempFile.Name()})
bs, err := ioutil.ReadFile(tempFile.Name())
assert.NoError(t, err)
assert.NotEmpty(t, string(bs))
}

Some files were not shown because too many files have changed in this diff Show More