1
0
mirror of https://github.com/rclone/rclone.git synced 2025-12-21 18:53:34 +00:00

Compare commits

..

342 Commits

Author SHA1 Message Date
Nick Craig-Wood
a80287effd crypt: fix tests after introduction of no data encryption 2021-11-15 18:03:13 +00:00
Nick Craig-Wood
4216d55a05 test: make sure we run wrapping backend tests in "make quicktest" 2021-11-15 18:03:13 +00:00
Nick Craig-Wood
ba6730720d Fix repeated error messages after pkg/errors removal 2021-11-15 17:58:40 +00:00
Nick Craig-Wood
7735b5c694 Add Sinan Tan to contributors 2021-11-15 17:58:40 +00:00
Nick Craig-Wood
d45b3479ee Add Andy Jackson to contributors 2021-11-15 17:58:40 +00:00
Nick Craig-Wood
4c5df0a765 Add Fredric Arklid to contributors 2021-11-15 17:58:40 +00:00
Sinan Tan
8c61a09be2 crypt: add test cases and documentation for base64 and base32768 filename encoding #5801 2021-11-15 17:57:02 +00:00
Max Sum
c217145cae crypt: add base64 and base32768 filename encoding options #5801 2021-11-15 17:57:02 +00:00
thomae
4c93378f0e serve sftp: update docs on --stdio 2021-11-12 10:49:35 +00:00
thomae
f9e54f96c3 docs/sftp: fix typo 2021-11-11 19:20:15 +01:00
Andy Jackson
af0fcd03cb hdfs: add file and directory move/rename support 2021-11-11 16:41:43 +00:00
albertony
00aafc957e sftp: add rclone to list of supported md5sum/sha1sum commands to look for
See #5781
2021-11-11 15:16:45 +01:00
albertony
29abbd2032 hashsum: support creating hash from data received on stdin
See #5781
2021-11-11 15:16:45 +01:00
Fredric Arklid
663b2d9c46 jottacloud: Add support for Tele2 Cloud 2021-11-11 12:32:23 +00:00
Nick Craig-Wood
f36d6d01b5 rc: fix operations/publiclink default for expires parameter
Before this change the expires parameter was defaulting to 0 if not
provided.

This change makes it default to fs.DurationOff which is the same as
the `rclone link` command.

See: https://forum.rclone.org/t/operations-publiclink-from-dropbox-error-not-autorized/27374
2021-11-11 11:14:22 +00:00
Nick Craig-Wood
0c03aa3a8b dropbox: speed up directory listings by specifying 1000 items in a chunk 2021-11-11 11:14:22 +00:00
Nick Craig-Wood
caa2b8bf40 dropbox: save an API request when at the root
Before this change, rclone always emitted an API request to discover
what type of thing the root is.

This is unecessary as it is always a directory.
2021-11-11 11:14:22 +00:00
Nick Craig-Wood
421e840e37 Add Borna Butkovic to contributors 2021-11-11 11:14:22 +00:00
Nick Craig-Wood
9b57d27be4 Add David to contributors 2021-11-11 11:14:22 +00:00
Borna Butkovic
627ac1b2d9 ftp: add --ftp-ask-password to prompt for password when needed 2021-11-10 17:34:14 +00:00
David
ae395d8cf0 rc: error on web GUI update won't be fatal - fixes #5385 2021-11-10 17:05:13 +00:00
Ankur Gupta
f04520a6e3 operations: fix goroutine leak in case of copy retry
Whenever transfer.Account() is called, a new goroutine acc.averageLoop()
is started. This goroutine exits only when the channel acc.exit is closed.
acc.exit is closed when acc.Done() is called, which happens during tr.Done().

However, if tr.Reset is called during a copy low level retry, it replaces
the tr.acc, without calling acc.Done(), which results in the goroutine
mentioned above never exiting.

This commit calls acc.Done() during a tr.Reset()
2021-11-10 16:44:29 +00:00
Nick Craig-Wood
c968c3e41c build: raise minimum go version to go1.15
This was necessary because go1.14 seems to have a modules related bug
which means it tries to build modules even though the uses of them are
all disabled with build constraints. This seems to be fixed in go1.15.
2021-11-10 16:11:12 +00:00
Nick Craig-Wood
3661791e82 serve restic: disable for go1.16 and earlier after update 2021-11-10 15:42:50 +00:00
Nick Craig-Wood
4198763c35 build: update all dependencies 2021-11-10 10:34:56 +00:00
Nick Craig-Wood
3de47b8ed4 build: upgrade go.mod file to go1.17 2021-11-10 10:34:56 +00:00
Nick Craig-Wood
71b8e1e80b build: more docs on upgrading dependencies 2021-11-10 10:34:56 +00:00
Nick Craig-Wood
7366e97dfc mega: fix error handling broken by removal of github.com/pkg/errors
There were instances of errors.Wrap being called with a nil error
which the conversion didn't deal with correctly.
2021-11-09 13:43:45 +00:00
Nick Craig-Wood
21ba4d9a18 onedrive: fix error handling broken by removal of github.com/pkg/errors
There were instances of errors.Wrap being called with a nil error
which the conversion didn't deal with correctly.
2021-11-09 13:43:45 +00:00
Nick Craig-Wood
96e099d8e7 union: fix error handling broken by removal of github.com/pkg/errors
There were instances of errors.Wrap being called with a nil error
which the conversion didn't deal with correctly.
2021-11-09 13:43:45 +00:00
Nick Craig-Wood
2a31b5bdd6 Add bbabich to contributors 2021-11-09 13:43:45 +00:00
Nick Craig-Wood
9bdfe4c36f Add Vitor Arruda to contributors 2021-11-09 13:43:45 +00:00
Nick Craig-Wood
e3a2f539fe Add Chris Lu to contributors 2021-11-09 13:43:45 +00:00
Nick Craig-Wood
ffa943e31f Add Carlo Mion to contributors 2021-11-09 13:43:45 +00:00
bbabich
b16f603c51 s3: Add RackCorp object storage to providers 2021-11-09 11:46:58 +00:00
database64128
a7a8372976 🧪 fstest: fix time tests on Windows and add convenience methods to check local and remote fs with precision
Previously only the fs being checked on gets passed to
GetModifyWindow(). However, in most tests, the test files are
generated in the local fs and transferred to the remote fs. So the
local fs time precision has to be taken into account.

This meant that on Windows the time tests failed because the
local fs has a time precision of 100ns. Checking remote items uploaded
from local fs on Windows also requires a modify window of 100ns.
2021-11-09 11:43:36 +00:00
Vitor Arruda
9beb0677e4 backend: Fix union eplus policy returned nil 2021-11-08 11:55:27 +00:00
Nick Craig-Wood
e43b5ce5e5 Remove github.com/pkg/errors and replace with std library version
This is possible now that we no longer support go1.12 and brings
rclone into line with standard practices in the Go world.

This also removes errors.New and errors.Errorf from lib/errors and
prefers the stdlib errors package over lib/errors.
2021-11-07 11:53:30 +00:00
Chris Lu
97328e5755 Improve description for SeaweedFS 2021-11-06 21:01:50 +03:00
Carlo Mion
7b7d780fff stats: fix missing StatsInfo fields in the computation of the group sum 2021-11-05 15:33:00 +00:00
Carlo Mion
c2600f9e4d stats: fix missing computation of transferQueueSize when summing up statistics group - fixes #5749 2021-11-05 15:33:00 +00:00
Ivan Andreev
7bd853ce35 Add Roberto Ricci to contributors 2021-11-05 18:29:47 +03:00
Roberto Ricci
05150cfb1d backend/ftp: increase testUploadTimeout.maxTime to 10 seconds
On slow machines (e.g. Github CI), especially if GOARCH=386,
the test for cmd/serve/ftp could fail if this value is too small.

Fixes #5783
2021-11-05 18:27:44 +03:00
albertony
25366268fe Add Atílio Antônio to contributors 2021-11-04 12:55:49 +01:00
Atílio Antônio
c08d48a50d docs: improve grammar and fix typos (#5361)
This alters some comments in source files, but is interested mainly in documentation files and help messages.
2021-11-04 12:50:43 +01:00
Nick Craig-Wood
454574e2cc s3: collect the provider quirks into a single function and update
This removes the checks against the provider throughout the code and
puts them into a single setQuirks function for easy maintenance when
adding a new provider.

It also updates the quirks with the results of testing against
backends we have access to.

This also adds a list_url_encode parameter so that quirk can be
manually set.
2021-11-03 21:44:09 +00:00
Nick Craig-Wood
9218a3eb00 fs: add a tristate true/false/unset configuration value 2021-11-03 21:44:09 +00:00
Nick Craig-Wood
1e4ef4b4d5 Add Felix Bünemann to contributors 2021-11-03 21:44:09 +00:00
Nick Craig-Wood
8d92f7d697 s3: fallback to ListObject v1 on unsupported providers
This implements a quirks system for providers and notes which
providers we have tested to support ListObjectsV2.

For those providers which don't support ListObjectsV2 we use the
original ListObjects call.
2021-11-03 19:13:50 +00:00
Felix Bünemann
fd56abc5f2 s3: Use ListObjectsV2 for faster listings
Using ListObjectsV2 with a continuation token is about 5-6x faster than
ListObjectsV2 with a marker.
2021-11-03 19:13:50 +00:00
Ivan Andreev
b323bf34e2 sync/test: skip test ConcurrentTruncate on uptobox (take 2)
The test is not applicable to uptobox which can't upload empty files.
The test was not skipped as intended because the direct error was compared.
This fix will compare error Cause because Sync wraps the error.
2021-11-02 19:24:23 +03:00
Ivan Andreev
e78e73eae7 lib/encoder: fix benchmarks
Some day in the past the Slash encode option was added to Onedrive
encoder so it began to encode slashes in file names rather then treat
them as path separators.
This patch adapts benchmark test cases accordingly.

Fixes #5659
2021-11-02 19:23:16 +03:00
Nick Craig-Wood
f51a5eca2e fstests: add encoding test for URL encoded path name #5768
Add an encoding test to make sure backends can deal with a URL encoded
path name. This is a fairly common failing in backends and has been an
intermittent problem with onedrive itself.
2021-11-02 15:59:36 +00:00
albertony
39e2af7974 config: allow dot in remote names (#5606) 2021-11-01 20:50:06 +01:00
Ivan Andreev
b3217adf08 Add Chris Nelson to contributors 2021-11-01 21:24:06 +03:00
Ivan Andreev
074234119a bisync: documentation #5164
Co-authored-by: Chris Nelson <stuff@cjnaz.com>
2021-11-01 21:00:27 +03:00
Ivan Andreev
6210e22ab5 bisync: implementation #5164
Fixes #118

Co-authored-by: Chris Nelson <stuff@cjnaz.com>
2021-11-01 21:00:27 +03:00
Ivan Andreev
940e99a929 bisync: test scenarios #5164
Co-authored-by: Chris Nelson <stuff@cjnaz.com>
2021-11-01 21:00:27 +03:00
Ivan Andreev
79b6866b57 rc: export NewErrParamInvalid #5164 2021-11-01 21:00:27 +03:00
Ivan Andreev
c142e3edcc filter: export GlobToRegexp #5164 2021-11-01 21:00:27 +03:00
Nick Craig-Wood
5c646dff9a Start v1.58.0-DEV development 2021-11-01 16:54:17 +00:00
Nick Craig-Wood
19dfaf7440 docs: fix shortcode rendering on download page 2021-11-01 16:50:52 +00:00
Nick Craig-Wood
169990e270 Version v1.57.0 2021-11-01 15:45:40 +00:00
Nick Craig-Wood
e781bcbba1 Add David Liu to contributors 2021-11-01 15:45:40 +00:00
Nick Craig-Wood
409916b0c5 Add trevyn to contributors 2021-11-01 15:45:40 +00:00
albertony
d9c5be32e7 Add thomae to contributors 2021-11-01 15:10:14 +01:00
thomae
57074be9b3 serve sftp: fix typo 2021-11-01 15:07:13 +01:00
Joda Stößer
bae5c7c81b docs: remove application type "other" from drive.md
The application type "other" is not an option anymore.
2021-11-01 13:15:31 +00:00
albertony
05321f4aef docs/sftp: more detailed explanation of pubkey file and certificate 2021-11-01 13:14:17 +00:00
albertony
c9d7248d85 docs/jottacloud: update description of whitelabel services 2021-11-01 12:57:51 +00:00
albertony
da8f9be84b docs: describe the --human-readable option in more detail 2021-11-01 12:55:52 +00:00
David Liu
b806166147 docs: swift: Update OCI url
Oracle cloud storage now rebranded also as OCI bucket with new entry point
2021-11-01 12:54:23 +00:00
acsfer
20f936c9d4 Add note about S3 compatible services 2021-11-01 12:47:18 +00:00
albertony
91cdaffcc1 docs: add faq section explaining why rclone changes fullwidth characters in file names 2021-11-01 12:46:23 +00:00
trevyn
33bf9b4923 Add mention of Rust bindings for librclone 2021-11-01 12:43:31 +00:00
albertony
b4944f4520 docs/librclone: document that strings are utf8 encoded 2021-11-01 12:39:00 +00:00
albertony
286b152e7b librclone: free strings in python example 2021-11-01 12:36:52 +00:00
Nick Craig-Wood
f7764a0c9d premiumizeme: fix server side directory move after API changes
Apparently moving a directory using the id "0" as the root no longer
works, so this reads the real root ID when it is listed and uses that.

This fixes the DirMove problem.

See: https://forum.rclone.org/t/premiumize-cant-move-files/27169
See: #5734
2021-10-31 19:18:55 +00:00
Nick Craig-Wood
07fcba888c dircache: add SetRootIDAlias to update RootID from FindLeaf 2021-10-31 19:18:55 +00:00
Nick Craig-Wood
af705c754c premiumizeme: fix server side move after API change
See: https://forum.rclone.org/t/premiumize-cant-move-files/27169
See: #5734
2021-10-31 19:18:55 +00:00
Nick Craig-Wood
f85e3209b3 premiumizeme: fix directory listing after API changes
The API doesn't seem to accept a value of "0" any more for the root
directory ID, giving the error "Could not decode folder id".

However omitting it seems to work fine.
2021-10-31 19:18:55 +00:00
Nick Craig-Wood
e77dcb7f52 test_all: remove stray debug 2021-10-31 19:18:55 +00:00
Nick Craig-Wood
4ab842198a Revert "premiumizeme: attempt to fix integration tests"
This reverts commit 1eff0eef7a.

Now that the test account is premium again it is very fast and this is
no longer needed.
2021-10-31 19:18:54 +00:00
albertony
a8059b8a90 docs/mount: add note that to execute programs one must set custom filesystem permissions (#5771) 2021-10-31 00:48:56 +02:00
Nick Craig-Wood
cf2c2792e6 s3: fix corrupted on transfer: sizes differ 0 vs xxxx with Ceph
In this commit, released in 1.56.0 we started reading the size of the
object from the Content-Length header as returned by the GET request
to read the object.

4401d180aa s3: add --s3-no-head-object

However some object storage systems, notably Ceph, don't return a
Content-Length header.

The new code correctly calls the setMetaData function with a nil
pointer to the ContentLength.

However due to this commit from 2014, released in v1.18, the
setMetaData function was not ignoring the size as it should have done.

0da6f24221  s3: use official github.com/aws/aws-sdk-go including multipart upload #101

This commit correctly ignores the content length if not set.

Fixes #5732
2021-10-30 12:01:09 +01:00
Nick Craig-Wood
e6e1c49b58 s3: fix shared_credentials_file auth after reverting incorrect fix #5762
Before this change the `shared_credentials_file` config option was
being ignored.

The correct value is passed into the SDK but it only sets the
credentials in the default provider. Unfortunately we wipe the default
provider in order to install our own chain if env_auth is true.

This patch restores the shared credentials file in the session
options, exactly the same as how we restore the profile.

Original fix:

1605f9e14d s3: Fix shared_credentials_file auth
2021-10-30 11:54:17 +01:00
Nick Craig-Wood
712f9c9760 s3: fix IAM Role for Service Account not working and other auth problems
This patch reverts this commit

1605f9e14d s3: Fix shared_credentials_file auth

It unfortunately had the side effect of making the s3 SDK ignore the
config in our custom chain and use the default provider. This means
that advanced auth was being ignored such as --s3-profile with
role_arn.

Fixes #5468
Fixes #5762
2021-10-30 11:54:17 +01:00
albertony
a238877ad8 docs: note that destination is always a directory (#5766) 2021-10-30 00:30:00 +02:00
Ivan Andreev
70297c3aed sync/test: TestConcurrentTruncate needs empty files - skip on uptobox 2021-10-28 17:04:56 +03:00
Nolan Woods
a074a2b983 lib/http: Fix handling of ssl credentials
Adds a test that makes an actual http and https request against the server
2021-10-27 14:46:10 +03:00
Nick Craig-Wood
00ceeef21c hdfs: wait longer for the server to start up in the integration tests #5734
This needs fixing properly so rclone knows when the server has started
properly.
2021-10-23 22:53:17 +01:00
Nick Craig-Wood
2e81b78486 Add Dmitry Bogatov to contributors 2021-10-23 22:53:17 +01:00
Dmitry Bogatov
bb11803f1f Create direct share link for "koofr" backend
Instead of creating link to web interface, create direct link usable by
curl(1) or wget(1).
2021-10-23 15:00:33 +03:00
Nick Craig-Wood
a542ddf60a hdfs: attempt to make integration tests more reliable #5734
This makes sure the namenode is accepting TCP connections before
starting the integration tests in an attempt to make them more
reliable.
2021-10-22 13:07:48 +01:00
Nick Craig-Wood
257f5d279a filefabric: fix directory move after API change #5734
The API has changed in the directory move call JSON response from
returning a TaskID as a string to returning it as an integer. In other
places it is still returned as a string though.

This patch allows the TaskID to be an integer or a string in the JSON
response and keeps it internally as a string like before.
2021-10-22 12:58:00 +01:00
albertony
4f05ece39e test: fix touchdir test on backends without modtime support 2021-10-22 13:37:34 +02:00
albertony
9c8c0a58b5 touch: fix recursive touch due to recently introduced error ErrorIsDir 2021-10-22 13:37:34 +02:00
albertony
a70c20fe6b touch: improve error message from recursive touch 2021-10-22 13:37:34 +02:00
Ivan Andreev
59e77f794e serve/docker: skip race test until we find a solution for deadlock
Related to #5738
2021-10-22 14:00:48 +03:00
Ivan Andreev
1a66736ef0 Add Thomas Stachl to contributors 2021-10-21 15:23:48 +03:00
Ivan Andreev
844025d053 ftp: add support for precise time #5655 2021-10-21 14:50:53 +03:00
albertony
3a03f2778c test: ignore integration test TestCopyFileMaxTransfer on Google Drive
The test fails because it expects a copy with MaxTransfer and CutoffModeHard should
return fatal error, because this is thrown from accounting (ErrorMaxTransferLimitReachedFatal),
but in case of Google Drive the external google API catches and replaces it with a
non-fatal error:

pw.CloseWithError(fmt.Errorf("googleapi: Copy failed: %v", err))

(7290f25351/internal/gensupport/media.go (L140))
2021-10-21 12:42:25 +01:00
Ivan Andreev
29c6c86c00 ftp: fix timeout after long uploads #5596 2021-10-21 14:18:23 +03:00
Ivan Andreev
a32fde09ca fs/http: declutter code #5596 2021-10-21 14:18:23 +03:00
Ivan Andreev
1d50336615 ftp: replace jlaffaye/ftp by rclone/ftp in the build #5596 2021-10-21 14:18:23 +03:00
Thomas Stachl
015b250905 serve/docker: build docker plugin for multiple platforms #5668
Fixes #5462

Co-authored-by: Ivan Andreev <ivandeex@gmail.com>
2021-10-21 13:01:23 +03:00
Nick Craig-Wood
4b1ada2d51 filefabric: allow integration tests double time as they keep timing out #5734 2021-10-21 09:54:29 +01:00
albertony
f589dbc077 docs: don't show fictional example values for options as multiple choice items
See #5538
2021-10-20 22:56:19 +02:00
albertony
8efead1ee6 docs: update guide for contributing documentation
See #5538
2021-10-20 22:56:19 +02:00
albertony
9a17b32b5d docs: automatically remove ending punctuation from first line of backend option help string when used for flag usage
See #5538
2021-10-20 22:56:19 +02:00
albertony
8b65c55711 cmd/config: improve option prompt
See #5538
2021-10-20 22:56:19 +02:00
albertony
e2f47ecdeb docs: punctuation cleanup
See #5538
2021-10-20 22:56:19 +02:00
albertony
b868561951 jottacloud: return direct download link from rclone link command
If the shared path is a directory, then the download will be a zip archive.

Fixes #5391

See #5399
2021-10-20 19:54:29 +02:00
albertony
78db3dba0e jottacloud: add support for UserInfo feature
See #5399
2021-10-20 19:54:29 +02:00
albertony
41876dd669 touch: add support for touching files in directory, with options for recursive, filtering and dry-run/interactive
Fixes #5301
2021-10-20 19:24:57 +02:00
Nick Craig-Wood
2e72ec96c1 qingstor: ignore harmless failing integration test #5734
The test TestIntegration/FsMkdir/FsPutFiles/FromRoot/ListR fails in
the integration test because there is a broken bucket in the test
account which support haven't been able to remove.
2021-10-20 17:51:06 +01:00
Nick Craig-Wood
9742648fce fichier: allow more list retries for the integration tests #5734 2021-10-20 17:45:54 +01:00
Nick Craig-Wood
d73264572b putio: allow integration tests double time as they keep timing out #5734 2021-10-20 17:38:30 +01:00
Nick Craig-Wood
ff801e8e17 test_all: allow configuring a multiplier for the timeout #5734 2021-10-20 17:38:30 +01:00
Nick Craig-Wood
72c013c6f4 vfs: increase time to wait for writers in tests to 30s
In some backends (eg putio) this deadline was consistently missed at
10s so this patch increases it to 30s.

See: #5734
2021-10-20 17:38:30 +01:00
Nick Craig-Wood
1eff0eef7a premiumizeme: attempt to fix integration tests
This tries to fix the integration tests by only allowing one
premiumizeme test to run at once, in the hope it will stop rclone
hitting the rate limits and breaking the tests.

See: #5734
2021-10-20 17:38:30 +01:00
Nick Craig-Wood
5a5318720a onedrive: stop public link test complaining on non-business account
The TestIntegration/FsMkdir/FsPutFiles/PublicLink test doesn't work on
a standard onedrive account, it returns

    accessDenied: accountUpgradeRequired: Account Upgrade is required for this operation.

See: #5734
2021-10-20 17:38:30 +01:00
Nick Craig-Wood
e253b44882 dropbox: stop public link test complaining on non-enterprise account
The TestIntegration/FsMkdir/FsPutFiles/PublicLink test doesn't work on
a standard dropbox account, only on an enterprise account because it
sets expiry dates.

See: #5734
2021-10-20 17:38:30 +01:00
Ivan Andreev
0d7426a2dd hasher: backend documentation #5587 2021-10-20 19:11:54 +03:00
Ivan Andreev
f102ef2161 hasher: add hasher backend #5587 2021-10-20 19:11:54 +03:00
Ivan Andreev
57c7fde864 lib/kv: add unit tests, fix races #5587
After testing concurrent calling of `kv.Start` and `db.Stop` I had to restrict
more parts of these under mutex to make results deterministic without Sleep's
in the test body. It's more safe but has potential to lock Start for up to
2 seconds due to `db.open`.
2021-10-20 19:11:54 +03:00
Ivan Andreev
50df8cec9c lib/kv: add key-value database api #5587
Add bolt-based key-value database support.

Quick API description:
https://github.com/rclone/rclone/pull/5587#issuecomment-942174768
2021-10-20 19:11:54 +03:00
Nick Craig-Wood
8cd3251b57 fstests: Relax MimeType support checking #5587
Before this change we checked that features.ReadMimeTime was set if
and only if the Object.MimeType method was implemented.

However this test is overly general - we don't care if Objects
advertise MimeType when features.ReadMimeTime is set provided that
they always return an empty string (which is what a wrapping backend
might do).

This patch implements that logic.
2021-10-20 19:11:54 +03:00
Nick Craig-Wood
cc2f6f722c filefabric: make backoff exponential for error_background to fix errors
Before this change the backoff for the error_background error was 6
seconds. This means that if it wasn't resolved in 60 seconds (with the
default 10 low level retries) then an error was reported.

This error was being reported frequently in the integration tests, so
is likely affecting real users too.

This patch changes the backoff into an exponential backoff
1,2,4,8...1024 seconds to make sure we wait long enough for the
background operation to complete.

See #5734
2021-10-20 15:41:09 +01:00
Ivan Andreev
6cda4c2c3c Add Ian Levesque to contributors 2021-10-19 23:02:51 +03:00
Ivan Andreev
023b666863 Add Filip Rysavy to contributors 2021-10-19 23:01:47 +03:00
Ivan Andreev
2a4c6ad0e7 Add Matthew Sevey to contributors 2021-10-19 23:00:16 +03:00
Ivan Andreev
6d02530f9d sia: finish documentation #4514
Also rename stuttering `--sia-sia-user-agent` to `--sia-user-agent`
2021-10-19 22:55:27 +03:00
Ivan Andreev
c5bc857f9b sia: fix and enable integration tests #4514
- setup correct path encoding (fixes backend test FsEncoding)
- ignore range option if file is empty (fixes VFS test TestFileReadAtZeroLength)
- cleanup stray files left after failed upload (fixes test FsPutError)
- rebase code on master, adapt backend for rclone context passing
- translate Siad errors to rclone native FS errors in sia errorHandler
- TestSia: return proper backend options from the script
- TestSia: use uptodate AntFarm image, nebulouslabs/siaantfarm is stale
2021-10-19 22:55:27 +03:00
Matthew Sevey
0d1e017e09 sia: setup docker with sia-antfarm for test #4514
Always pull the latest Sia Antfarm docker image
Add wait for Sia renter to become upload ready

Co-authored-by: Filip Rysavy <fil@siasky.net>
2021-10-19 22:55:27 +03:00
Ian Levesque
3351b1e6ae sia: add backend for sia decentralized cloud #4514 2021-10-19 22:55:27 +03:00
Fred
b085aa1a3f seafile: fix error when not configured for 2fa (#5665) 2021-10-19 20:53:35 +01:00
Nick Craig-Wood
eb0c8284f1 azureblob: fix incorrect size after --azureblob-no-head-object patch
In

05f128868f azureblob: add --azureblob-no-head-object

we incorrectly parsed the size of the object as the Content-Length of
the returned header. This is incorrect in the presense of Range
requests.

This fixes the problem by parsing the Content-Range header if
avaialble to read the correct length from if a Range request was
issued.

See: #5734
2021-10-19 20:12:17 +01:00
Nick Craig-Wood
f5c7c597ba s3: Use a combination of SDK retries and rclone retries - fixes #5509
This reverts commit

dc06973796 Revert "s3: use rclone's low level retries instead of AWS SDK to fix listing retries"

Which in turn reverted

5470d34740 "backend/s3: use low-level-retries as the number of SDK retries"

So we are back where we started.

It then modifies it to set the AWS SDK to `--low-level-retries`
retries, but set the rclone retries to 2 so that directory listings
can be retried.
2021-10-19 20:12:17 +01:00
Nick Craig-Wood
3cef84aabe Add r0kk3rz to contributors 2021-10-19 20:12:17 +01:00
Nick Craig-Wood
93afd5c346 Add Rajat Goel to contributors 2021-10-19 20:12:17 +01:00
Alex Chen
1c3c8babd3 docs: mention make for building and cmount tag for macos (#5487) 2021-10-19 12:18:06 +08:00
Ivan Andreev
690a7ac783 chunker: fix md5all test for no-meta test remotes 2021-10-18 18:04:07 +03:00
Ivan Andreev
bbcc9a45fe serve/docker: allow to customize proxy settings of docker plugin 2021-10-18 18:03:06 +03:00
albertony
16949fde09 Do not override mime types from os defaults
https://forum.rclone.org/t/rclone-serve-http-save-as/26672
2021-10-18 13:28:22 +01:00
r0kk3rz
8e4b87ae03 s3: Add AWS Snowball Edge to providers examples - fixes #5720 2021-10-18 12:52:59 +01:00
Rajat Goel
db6002952e dropbox: upgrade sdk version 2021-10-16 10:55:02 +01:00
Nick Craig-Wood
96e14bf456 sftp: fix initialization bug introduced by fs.ErrorIsDir return
3fbaa4c0b0 backends: make NewObject return fs.ErrorIsDir if possible
2021-10-16 10:38:24 +01:00
Nick Craig-Wood
54da6154c4 operations: fix lsjson --stat on root directories of bucket based backends 2021-10-16 10:38:24 +01:00
Ivan Andreev
f50537b64b ftp: add option to disable tls13 2021-10-15 20:24:37 +03:00
Ivan Andreev
f37b25a2df ftp: enable tls session cache by default 2021-10-15 19:54:25 +03:00
albertony
29b8c71522 build: force utf8 when updating backend docs from python script (#5721) 2021-10-15 18:51:57 +02:00
Nick Craig-Wood
7b66ca132d build: increase timeout for golangci-lint to 10 minutes 2021-10-15 15:58:52 +01:00
Nick Craig-Wood
9ce0df3242 dropbox: add --dropbox-batch-commit-timeout to control batch timeout
This also adds an Debug message log showing how long each batch took

See: #5491
2021-10-15 15:32:40 +01:00
Nick Craig-Wood
f4c5f1f185 box: retry operation_blocked_temporary errors #5545 2021-10-15 15:28:54 +01:00
Nick Craig-Wood
825f7826f5 box: add --box-owned-by to only show items owned by the login passed #5545 2021-10-15 15:28:54 +01:00
Nick Craig-Wood
34140b2f57 box: delete items in parallel in cleanup using --checkers threads #5545 2021-10-15 15:28:54 +01:00
Nick Craig-Wood
e18ae5da09 box: factor directory listing and cleanup listing into one function #5545 2021-10-15 15:28:54 +01:00
Nick Craig-Wood
b61912b4c8 box: add --box-list-chunk to control listing chunk size #5545 2021-10-15 15:28:54 +01:00
Nick Craig-Wood
bfecf5301b box: when doing cleanup delete as much as possible - fixes #5545
Before this change the cleanup routine exited on the first deletion
error.

This change counts any errors on deletion and exits when the iteration
is complete with an error showing the number of deletion failures.
Deletion failures will be logged.
2021-10-15 15:28:54 +01:00
Nick Craig-Wood
308323e9c4 box: make listings of heavily used directories more reliable #5545
Before this change we uses limit/offset paging for directories in the
main directory listing routine and in the trash cleanup listing.

This switches to the new scheme of limit/marker which is more reliable
on a directory which is continuously changing. It has the disadvantage
that it doesn't tell us the total number of items available, however
that wasn't information rclone uses.
2021-10-15 15:28:54 +01:00
Nick Craig-Wood
fc5d6c16b6 serve ftp: ensure modtime is passed as UTC always to fix timezone oddities
See: https://forum.rclone.org/t/ftp-server-reports-file-timestamps-in-utc/26274
2021-10-15 15:25:51 +01:00
Nick Craig-Wood
c821fbeddf drive: add -o config option to backend drives to config for all shared drives
See: https://forum.rclone.org/t/bulk-create-remotes-to-existing-google-shared-drives/26837/
2021-10-15 15:22:14 +01:00
Nick Craig-Wood
93d85015af sftp: fix timeout when doing MD5SUM of large file
Before this change we were timing out MD5SUMs after 1 minute because
rclone was closing the SSH session when there were sessions still
aftive.

This change counts sessions active for all SSH sessions now (Upload,
Download, Hashes and running commands).

See: https://forum.rclone.org/t/while-rclone-copying-large-files-md5sum-failed-with-exit-status/26845/
2021-10-15 15:19:22 +01:00
Nick Craig-Wood
a98e3ea6f1 build: replace the deprecated golint linter with revive
This fixes up a small number of new lint items also
2021-10-15 12:51:31 +01:00
Nick Craig-Wood
167406bc68 build: switch to using the golangci-lint action for better error reporting
The action reports errors to users in their pull requests which is
much easier to understand.
2021-10-15 12:50:22 +01:00
Nick Craig-Wood
036abde393 build: fix indentation in build.yml 2021-10-15 12:50:22 +01:00
Nick Craig-Wood
edf8978d15 operations: fix HashSum tests after removing ERROR and UNSUPPORTED
This was caused by

7a1cab57b6 cmd/hashsum: dont put ERROR or UNSUPPORTED in output

And was picked up in the integration tests.

This patch no longer calls the HashLister for unsupported hash types.
2021-10-15 10:51:08 +01:00
Nick Craig-Wood
f529c02446 lsjson: add --stat flag and operations/stat api
This enables information about single files to be efficiently
retrieved.
2021-10-14 17:15:50 +01:00
Nick Craig-Wood
3fbaa4c0b0 backends: make NewObject return fs.ErrorIsDir if possible
This changes the interface to NewObject so that if NewObject is called
on a directory then it should return fs.ErrorIsDir if possible without
doing any extra work, otherwise fs.ErrorObjectNotFound.

Tested on integration test server with:

go run integration-test.go -tests backend -run TestIntegration/FsMkdir/FsPutFiles/FsNewObjectDir -branch fix-stat -maxtries 1
2021-10-14 17:15:50 +01:00
Nick Craig-Wood
af732c5431 Add Logeshwaran to contributors 2021-10-14 17:15:48 +01:00
Nick Craig-Wood
14de0cfb43 Add Joda Stößer to contributors 2021-10-14 17:14:53 +01:00
albertony
c2597a4fa3 docs: cleanup header levels in backend docs (#5698) 2021-10-14 15:40:18 +02:00
Logeshwaran
ceaafe6620 s3: add support to use CDN URL to download the file
The egress charges while using a CloudFront CDN url is cheaper when
compared to accessing the file directly from S3. So added a download
URL advanced option, which when set downloads the file using it.
2021-10-14 11:19:38 +01:00
Joda Stößer
d41b9b46d4 docs: improve ordered list prefix for drive.md 2021-10-14 11:08:15 +01:00
Joda Stößer
98d9ba363f .github: correct contribution link in pull request template 2021-10-14 11:07:25 +01:00
Ivan Andreev
16fb608bee hashsum: treat hash values in sum file as case insensitive
Also warn duplicate file paths in sum files.

Fixes https://forum.rclone.org/t/rclone-check-sum/25566/45
2021-10-13 18:21:34 +03:00
Ivan Andreev
cf9b82b8db chunker: md5all must create metadata if base hash is slow
Before this patch the md5all option would skip creating metadata with
hashsum if base filesystem provided md5, in hope to pass it through.
However, if base hash is slow (for example on local fs), chunker passed
slow md5 but never reported this fact in features.

This patch makes chunker snapshot base hashsum in metadata when md5all is
set and base hashsum is slow since chunker was intended to provide only
instant hashsums from the start.

Fixes #5508
2021-10-13 16:18:08 +03:00
albertony
7d66bfbb7c docs: toc styling (#5695) 2021-10-13 15:04:11 +02:00
Nolan Woods
023e32de05 lib/http: Factor password hash salt into options with default 2021-10-13 11:33:38 +01:00
Nolan Woods
b1cb41f8da lib/http: Fix bad username check in single auth secret provider 2021-10-13 11:33:38 +01:00
Nick Craig-Wood
1cb31e8cc7 crypt: fix uploads with --crypt-no-data-encryption
Before this change, when uploading to a crypt, the ObjectInfo
accidentally used the encrypted size, not the unencrypted size when
--crypt-no-data-encryption was set.

Fixes #5498
2021-10-12 17:12:41 +01:00
Ivan Andreev
1e7db7193e docs: note minimum supported docker engine 2021-10-12 13:27:20 +03:00
Ivan Andreev
7190c058a7 crypt: return wrapped object even with no-data-encryption
In presence of no_data_encryption the Crypt's Put method used to over-optimize
and returned base object. This patch makes it return Crypt-wrapped object now.

Fixes #5498
2021-10-12 00:41:12 +03:00
albertony
85074f8f88 librclone: add RcloneFreeString function
See PR #5703

Based on initial work by Weng Haoyu (@wengh) in PR #5362
2021-10-11 19:10:07 +02:00
albertony
c7329d2ece docs: add section in install documentation about portable install
See #5591
2021-10-11 15:08:35 +02:00
albertony
f3e71f129c config: convert --cache-dir value to an absolute path 2021-10-11 15:08:35 +02:00
albertony
0ffdca42d5 docs: document --cache-dir flag 2021-10-11 15:08:35 +02:00
albertony
dbb6f94d95 config: make temporary directory user-configurable
See #5591
2021-10-11 15:08:35 +02:00
albertony
352f9bcd47 config: add paths command to show configured paths
See #5591
2021-10-11 15:08:35 +02:00
Nick Craig-Wood
d8886b37a6 serve sftp: update docs on host key generation 2021-10-11 10:43:16 +01:00
albertony
894a5a1a83 serve sftp: fix generation of server keys on windows 2021-10-11 10:43:16 +01:00
albertony
ada6a92c8b serve sftp: generate an Ed25519 server key as well as ECDSA and RSA 2021-10-11 10:43:16 +01:00
Nick Craig-Wood
df0b7d8eab serve sftp: generate an ECDSA server key as well as RSA
Before this fix, rclone only generated an RSA server key when the user
didn't supply a key.

However the RSA server key is being deprecated as it is now insecure.

This patch generates an ECDSA server key too which will be used in
preference over the RSA key, but the RSA key will carry on working.

Fixes #5671
2021-10-11 10:43:16 +01:00
Nick Craig-Wood
0dfffc0ed4 Add YenForYang to contributors 2021-10-11 10:43:16 +01:00
Alfonso Montero
19fc1b2a95 docs/compress: minor improvements 2021-10-09 18:22:38 +02:00
Ivan Andreev
bce395385d mount/docs: improve wording 2021-10-09 18:53:57 +03:00
albertony
a5b8fcc127 docs: align dropdown items when icons have different sizes 2021-10-09 18:49:05 +03:00
YenForYang
269f90c1e4 drive: Fix buffering for single request upload for files smaller than --drive-upload-cutoff
I discovered that `rclone` always upload in chunks of 16MiB whenever
uploading a file smaller than `--drive-upload-cutoff`. This is
undesirable since the purpose of the flag `--drive-upload-cutoff` is
to *prevent* chunking below a certain file size.

I realized that it wasn't `rclone` forcing the 16MiB chunks. The
`google-api-go-client` forces a chunk size default of
[`googleapi.DefaultUploadChunkSize`](32bf29c2e1/googleapi/googleapi.go (L55-L57))
bytes for resumable type uploads. This means that all requests that
use `*drive.Service` directly for upload without specifying a
`googleapi.ChunkSize` will be forced to use a *`resumable`*
`uploadType` (rather than `multipart`) for files less than
`googleapi.DefaultUploadChunkSize`. This is also noted directly in the
Drive API client documentation [here](https://pkg.go.dev/google.golang.org/api/drive/v3@v0.44.0#FilesUpdateCall.Media).

This fixes the problem by passing `googleapi.ChunkSize(0)` to
`Media()` method calls, which is the only way to disable chunking
completely. This is mentioned in the API docs
[here](https://pkg.go.dev/google.golang.org/api/googleapi@v0.44.0#ChunkSize).

The other alternative would be to pass
`googleapi.ChunkSize(f.opt.ChunkSize)` -- however, I'm *strongly* in
favor of *not* doing this for performance reasons. By not explicitly
passing a `googleapi.ChunkSize(0)`, we effectively allow
[`PrepareUpload()`](https://pkg.go.dev/google.golang.org/api/internal/gensupport@v0.44.0#PrepareUpload)
to create a
[`NewMediaBuffer`](https://pkg.go.dev/google.golang.org/api/internal/gensupport@v0.44.0#NewMediaBuffer)
that copies the original `io.Reader` passed to `Media()` in order to
check that its size is less than `ChunkSize`, which will unnecessarily
consume time and memory.

`minChunkSize` is also changed to be `googleapi.MinUploadChunkSize`,
as it is something specified we have no control over.
2021-10-08 15:29:38 +01:00
Ivan Andreev
7a1cab57b6 cmd/hashsum: dont put ERROR or UNSUPPORTED in output 2021-10-08 14:26:27 +03:00
Ankur Gupta
c8d5606f2c Removed multiple emails for Ankur Gupta 2021-10-08 10:25:42 +01:00
Ivan Andreev
a2545066e2 drive: constrain list by filter #5023
Google Drive API allows for clauses like "modifiedTime > '2012-06-04T12:00:00'"
in the query param, so the filter flags --max-age and --min-age can be applied
directly at the directory listing phase rather than in a filter.
This is extremely helpful when we want to do an incremental backup of a remote
drive with many files but the number of recently changed file is small.

Co-authored-by: fotile96 <fotile96@users.noreply.github.com>
2021-10-07 22:11:22 +03:00
Ivan Andreev
729704bcb8 serve/docker: fix octal umask 2021-10-07 22:02:27 +03:00
Nick Craig-Wood
8b4a89d34b Update github.com/ncw/swift to v2.0.1 2021-10-07 12:02:09 +01:00
Ivan Andreev
15a9816512 ftp: update encoding in integration tests with ProFtpd, PureFtpd, VsFtpd
PR #5589 established recommended encodings to use with major FTP servers.
This patch updates integration tests correspondingly.
2021-10-05 21:45:08 +03:00
Ivan Andreev
cace18d89a docs/ftp: state clearly that active mode is not supported 2021-10-05 15:52:50 +03:00
Ivan Andreev
a065fb23e5 mount: document the mount helper mode, make command docs - #5594 2021-10-03 01:44:08 +03:00
Ivan Andreev
a95c7a001e core: run rclone as mount helper - #5594 2021-10-03 01:44:08 +03:00
Ivan Andreev
ffa1b1a258 config: enable verbose logging by the --verbose argument - #5594 2021-10-03 01:44:08 +03:00
Ivan Andreev
8b8a943dd8 mountlib: correctly daemonize for compatibility with automount - #5593
This patch will:
- add --daemon-wait flag to control the time to wait for background mount
- remove dependency on sevlyar/go-daemon and implement backgrounding directly
- avoid setsid during backgrounding as it can result in race under Automount
- provide a fallback PATH to correctly run `fusermount` under systemd as it
  runs mount units without standard environment variables
- correctly handle ^C pressed while background process is being setting up
2021-10-02 23:45:16 +03:00
Ivan Andreev
8c10dee510 mountlib: use procfs to validate mount on linux - #5593
Current way of checking whether mountpoint has been already mounted (directory
list) can result in race if rclone runs under Automount (classic or systemd).

This patch adopts Linux ProcFS for the check. Note that mountpoint is considered
empty if it's tagged as "mounted" by autofs. Also ProcFS is used to check whether
rclone mount was successful (ie. tagged by a string containing "rclone").

On macOS/BSD where ProcFS is unavailable the old method is still used.

This patch also moves a few utility functions unchanged to utils.go:
CheckOverlap, CheckAllowings, SetVolumeName.
2021-10-02 23:45:16 +03:00
Ivan Andreev
68be24c88d log: optionally print pid in logs - #5593
This option is useful to troubleshoot `rclone mount --daemon`
2021-10-02 23:45:16 +03:00
albertony
fbc7f2e61b lib/file: improve error message when attempting to create dir on nonexistent drive on windows
This replaces built-in os.MkdirAll with a patched version that stops the recursion
when reaching the volume part of the path. The original version would continue recursion,
and for extended length paths end up with \\? as the top-level directory, and the error
message would then be something like:
mkdir \\?: The filename, directory name, or volume label syntax is incorrect.
2021-10-01 23:18:39 +02:00
Nolan Woods
b30731c9d0 lib/http: Add auth to http service
Fixes https://github.com/rclone/rclone/issues/5620
2021-10-01 15:51:48 +01:00
albertony
26b6c83e49 docs: extend documentation on valid remote names 2021-10-01 15:18:04 +02:00
albertony
59c74ea1b8 config: support hyphen in remote name from environment variable 2021-10-01 15:18:04 +02:00
Ivan Andreev
2d05b28b0a ftp: enable CI for ProFtpd, PureFtpd, VsFtpd 2021-10-01 10:09:57 +03:00
Ivan Andreev
dc589d3070 ftp: provide valid encoding for ProFtpd, PureFtpd, VsFtpd 2021-10-01 10:09:57 +03:00
Ivan Andreev
48e7246163 lib/encoder: add encoding of square brackets 2021-10-01 10:09:57 +03:00
Ivan Andreev
69f4b48719 ftp: fix deadlock after failed update when concurrency=1 2021-10-01 10:03:59 +03:00
Nick Craig-Wood
bb0c4ad2d8 union: fix rename not working with union of local disk and bucket based remote
Before this change the union's feature flags were a strict AND of the
underlying remotes. This means that a union of a local disk (which can
Move but not Copy) and a bucket based remote (which can Copy but not
Move) could neither Move nor Copy.

This fix advertises Move in the union if all the remotes can Move or
Copy. It also implements Move as Copy+Delete (like rclone does
normally) if the underlying union does not support Move.

This enables renames to work with unions of local disk and bucket
based remotes expected.

Fixes #5632
2021-09-30 20:09:02 +01:00
albertony
b389b84685 jottacloud: refactor all file state checks into common functions 2021-09-30 19:34:48 +02:00
albertony
b0f06d9920 jottacloud: improved error handling with SetModTime and corrupt files in general 2021-09-30 19:34:48 +02:00
albertony
159229527d jottacloud: implement SetModTime to support modtime-only changes - #5627 2021-09-30 19:34:48 +02:00
albertony
b5a27b1c75 docs: cleanup header levels 2021-09-30 17:54:57 +02:00
albertony
db7db952c1 Add Jonta to contributors 2021-09-30 15:38:24 +02:00
Jonta
d8d621c175 docs: grammar/readability (#5633) 2021-09-30 15:34:00 +02:00
Nick Craig-Wood
0902e5c48e vfs: Ignore ECLOSED in Setattr when truncating file handles
Before this change file handles could get closed while the truncate
the file handles loop was running.

This would mean that ocassionally an ECLOSED (which is translated into
EBADF by cmd/mount) would spuriously be returned if Release happened
to happen in the middle of a Truncate call (Setattr called with
size=0).

This change ignores the ECLOSED while truncating file handles.

See: https://forum.rclone.org/t/writes-to-wasabi-mount-failing-with-bad-file-descriptor-intermittently/26321
2021-09-28 11:51:41 +01:00
Nick Craig-Wood
5b6bcfc184 Add HNGamingUK to contributors 2021-09-28 11:51:41 +01:00
HNGamingUK
1409b89f6c swift: document OVH Cloud Archive - fixes ##3041
Added documentation for OVH Cloud Archive, provides information on how to restore/unfreeze/download objects.
2021-09-20 17:32:13 +01:00
Fred
00c6642fad seafile: fix 2fa state machine 2021-09-18 12:44:59 +01:00
Nick Craig-Wood
badefdb060 pcloud: try harder to delete a failed upload
This fixes the integration tests when testing errored uploads
2021-09-17 10:43:45 +01:00
Nick Craig-Wood
9c2533821d pcloud: return an early error when Put is called with an unknown size
This stops the 10 minute pause in the integration tests
2021-09-17 10:43:45 +01:00
Nick Craig-Wood
c718fe4330 pcloud: fix sha256 hashes #5496
This was started in

3626f10f26 pcloud: add sha256 support - fixes #5496

But this support turned out to be incomplete and caused the
integration tests to fail.
2021-09-17 10:43:45 +01:00
Nick Craig-Wood
3298493b0b Add wzl to contributors 2021-09-17 10:43:45 +01:00
Abhinav Sharma
18f3929186 docs: update ignored email as per #5586 2021-09-12 18:46:37 +01:00
wzl
b35db61a80 docs: add a step for drive.md 2021-09-11 23:00:59 +03:00
Abhinav Sharma
3c17762c4e update the email 2021-09-11 13:08:29 +03:00
Ivan Andreev
24de896df2 build: apply gofmt from golang 1.17 2021-09-09 20:43:59 +03:00
Ivan Andreev
2bc2546d5c test: skip mount2 test on single-CPU runners 2021-09-06 15:01:44 +03:00
Tatsuya Noyori
05f128868f azureblob: add --azureblob-no-head-object 2021-09-06 10:41:54 +01:00
x0b
f7f4468cbc build: update Go to 1.16 and NDK to 22b for android/any 2021-09-03 13:32:48 +03:00
Ivan Andreev
aa0ceb6c5c cmd/version: add support for openbsd/386
After this patch the version command will be fully
supported on openbsd/amd64 and openbsd/386.
Remaining os/arch combinations stay as is.
2021-09-02 11:13:12 +03:00
albertony
f1f923a986 Change byte unit format from MiByte to MiB 2021-08-31 09:57:27 +02:00
albertony
8500d95579 test: consider global option for printing human-readable sizes and avoid unsigned integer overflow 2021-08-31 09:57:27 +02:00
albertony
8c4b06da79 tree: option to print human-readable sizes removed in favor of global option 2021-08-31 09:57:27 +02:00
albertony
6d25ba7c02 about: make human-readable output more consistent with other commands 2021-08-31 09:57:27 +02:00
albertony
774efeabf0 ncdu: introduce key u to toggle human-readable 2021-08-31 09:57:27 +02:00
albertony
d24f87c6a9 size: include human-readable count 2021-08-31 09:57:27 +02:00
albertony
721a9786a7 ls: introduce and global option to print human-readable sizes and consider it for ls commands
Fixes #1890
2021-08-31 09:57:27 +02:00
albertony
94521959f8 docs/config: remove use of backticks around words within a larger code block 2021-08-31 09:02:31 +02:00
Nick Craig-Wood
6a9ef27b09 cache: don't run failing tests on windows/386
After updating rclone's dependencies these tests started failing on
windows/386

- TestInternalDoubleWrittenContentMatches
- TestInternalMaxChunkSizeRespected

The failures look like this. The root cause is unknown. The `Wait(n=1)
would exceed context deadline` errors come from golang.org/x/time/rate
but it isn't clear what is calling them.

2021/08/20 21:57:16 ERROR : worker-0 <one>: object open failed 0: rate: Wait(n=1) would exceed context deadline
[snip ~10 duplicates]
2021/08/20 21:57:56 ERROR : tidwcm1629496636/one: (0/26) error (chunk not found 0) response
2021/08/20 21:58:02 ERROR : worker-0 <one>: object open failed 0: rate: Wait(n=1) would exceed context deadline
--- FAIL: TestInternalDoubleWrittenContentMatches (45.77s)
    cache_internal_test.go:310:
        	Error Trace:	cache_internal_test.go:310
        	Error:      	Not equal:
        	            	expected: "one content updated double"
        	            	actual  : ""

        	            	Diff:
        	            	--- Expected
        	            	+++ Actual
        	            	@@ -1 +1 @@
        	            	-one content updated double
        	            	+
        	Test:       	TestInternalDoubleWrittenContentMatches
2021/08/20 21:58:03 original size: 23592960
2021/08/20 21:58:03 updated size: 12
2021-08-20 23:28:18 +01:00
Nick Craig-Wood
09fd258b5c build: update all dependencies 2021-08-20 22:03:38 +01:00
Nick Craig-Wood
2cefae51a1 build: make go1.14 the minimum supported Go for building 2021-08-20 19:04:29 +01:00
Nick Craig-Wood
e4fb5e99ef build: use go1.17 for building 2021-08-20 19:04:29 +01:00
Nick Craig-Wood
8bd26c663a build: update golang.org/x/sys for go1.17 build 2021-08-20 19:04:29 +01:00
Nick Craig-Wood
dd97fbc55f Add Parth Shukla to contributors 2021-08-20 19:04:23 +01:00
Nick Craig-Wood
b32d00ba37 Add Justin Hellings to contributors 2021-08-20 19:04:23 +01:00
albertony
3a2f748aeb vfs: ensure names used in cache path are legal on current os
Fixes #5360
2021-08-19 20:14:50 +02:00
albertony
18be4ad10d vfs: fix issue where empty dirs would build up in cache meta dir 2021-08-19 20:14:50 +02:00
albertony
9a2811f0b2 local: refactor default os encoding out from local backend into shared encoder lib 2021-08-19 20:14:50 +02:00
albertony
63708d73be docs/vfs: Merge duplicate chunked reading documentation from mount docs 2021-08-19 19:29:41 +02:00
Parth Shukla
60323dc5e2 googlephotos: Use encoder for album names 2021-08-19 16:38:31 +01:00
Justin Hellings
359648e002 docs: Removed ambiguity from copy command docs
Switched from talking about "unchanged" files to "identical" files.

I found out the hard way that the rclone copy will overwrite newer files.
Looking at posts in the rclone forum, this is a common experience.

The docs for copy have referred to "unchanged" files.
This is ambiguous because it intuitively introduces a sense
of chronology, but chronology is irrelevant.
Rclone only "cares" about difference, not change.
2021-08-19 16:34:57 +01:00
Ivan Andreev
e45c23ab79 cmd/version: add support for openbsd/amd64
After this patch the version command will be
- fully supported on openbsd/amd64
- stay stub on openbsd/i386 until we deprecate go 1.17
Remaining os/arch combinations stay as is.
2021-08-16 11:39:34 +03:00
Nick Craig-Wood
890b6a45b5 sugarsync: fix initial connection after config re-arrangement - Fixes #5525
In this commit the config system was re-arranged

    94dbfa4ea fs: change Config callback into state based callback #3455

This passed the password as a temporary config parameter but forgot to
reveal it in the API call.
2021-08-14 12:53:36 +01:00
Nick Craig-Wood
227119da16 Add Ken Enrique Morel to contributors 2021-08-14 12:53:36 +01:00
Ken Enrique Morel
3626f10f26 pcloud: add sha256 support - fixes #5496 2021-08-14 12:48:25 +01:00
negative0
82ad9a30b9 rc: fix speed does not update in core/stats 2021-08-14 12:45:51 +01:00
Ivan Andreev
448a03181f cmd/mount: --fast-list does nothing on a mount 2021-08-13 21:11:56 +03:00
Ivan Andreev
3615619645 serve/docker: retry saveState to fix sporadic test failure on macOS/Windows 2021-08-13 21:00:21 +03:00
Nick Craig-Wood
33ddd540b6 accounting: fix maximum bwlimit by scaling scale max token bucket size
Before this fix, on Windows, the --bwlimit would max out at 2.5Gbps
even when set to 10 Gbps.

This turned out to be because of the maximum token bucket size.

This fix scales up the token bucket size linearly above a bwlimit of
2Gbps.

Fixes #5507
2021-08-13 16:55:24 +01:00
Nick Craig-Wood
a5f277f47e vfs: fix crash when truncating a just uploaded object - Fixes #5522 2021-08-11 11:55:09 +01:00
Nick Craig-Wood
bec253fd39 Add vinibali to contributors 2021-08-11 11:55:09 +01:00
Nick Craig-Wood
815a6ac8aa Add hota to contributors 2021-08-11 11:55:09 +01:00
Ivan Andreev
8106f65e0b Add yedamo to contributors 2021-08-11 11:07:13 +03:00
yedamo
96f77ebe5a selfupdate: fix --quiet option, not quite quiet
Fixes #5505
2021-08-11 10:14:19 +03:00
Greg Sadetsky
36f0231082 docs/drive: Fix lsf example without drive-impersonate (#5504) 2021-08-10 21:59:36 +02:00
albertony
168cb65c61 Add Greg Sadetsky to contributors 2021-08-10 21:50:26 +02:00
Greg Sadetsky
e00db968aa docs/s3: fix typo in s3 documentation (#5515) 2021-08-10 21:45:49 +02:00
partev
bb6b44d199 DOC: "OS X" -> "macOS" 2021-08-10 10:12:30 +03:00
vinibali
88b35bc32d Update yandex.md
add mail subscription exception
2021-08-09 23:28:41 +03:00
Nathan Collins
c32d5dd1f3 fs: move with --ignore-existing will not delete skipped files - #5463 2021-08-01 17:46:45 +01:00
Greg Sadetsky
3d9da896d2 drive: fix instructions for auto config #5499 2021-08-01 15:17:07 +01:00
hota
839c20bb35 s3: add Wasabi's AP-Northeast endpoint info
* Wasabi starts to provide AP Northeast (Tokyo) endpoint for all customers, so add it to the list

Signed-off-by: lindwurm <lindwurm.q@gmail.com>
2021-08-01 14:56:52 +01:00
Nick Craig-Wood
7c58148840 Start v1.57.0-DEV development 2021-08-01 13:43:36 +01:00
Nick Craig-Wood
6545755758 sftp: remove spurious error message on --sftp-disable-concurrent-reads 2021-07-31 11:04:45 +01:00
Nick Craig-Wood
c86a55c798 vfs: fix duplicates on rename - fixes #5469
Before this change, if there was an existing file being uploaded when
a file was renamed on top of it, then both would be uploaded. This
causes a duplicate in Google Drive as both files get uploaded at the
same time. This was triggered reliably by LibreOffice saving doc
files.

This fix removes any duplicates in the upload queue on rename.
2021-07-30 19:31:02 +01:00
Nick Craig-Wood
1d280081d4 Add Mariano Absatz (git) to contributors 2021-07-30 19:31:02 +01:00
Nick Craig-Wood
f48cb5985f Add Justin Winokur (Jwink3101) to contributors 2021-07-30 19:31:02 +01:00
Ivan Andreev
55e766f4e8 mountlib: restore daemon mode after #5415 2021-07-29 13:35:04 +03:00
Alex Chen
63a24255f8 onedrive: handle HTTP 400 better in PublicLink() (#5419) 2021-07-27 17:55:57 +08:00
Cnly
bc74f0621e http: fix serve http exits directly after starting 2021-07-25 14:06:43 +01:00
Mariano Absatz (git)
f39a08c9d7 clarification of the process for creating custom client_id 2021-07-24 09:19:48 +03:00
Justin Winokur (Jwink3101)
675548070d fs/operations: add rmdirs -v output - fixes #5464 2021-07-24 09:16:23 +03:00
Nick Craig-Wood
37ff05a5fa Version v1.56.0 2021-07-20 19:45:41 +01:00
Nick Craig-Wood
c67c1ab4ee test makefiles: fix documentation so it doesn't have HTML in 2021-07-20 19:37:09 +01:00
Nick Craig-Wood
76f8095bc5 hdfs: fix documentation so it doesn't have HTML in 2021-07-20 19:36:30 +01:00
Nick Craig-Wood
f646cd0a2a librclone: add missing sync/* rc methods
See: https://forum.rclone.org/t/missing-directory-copy-move-methods-in-librclone/24503
2021-07-20 16:59:02 +01:00
Nick Craig-Wood
d38f6bb0ab gphotos: fix read only scope not being used properly
Before this change the read only scope was being ignored and rclone
was asking for a read-write scope.

https://forum.rclone.org/t/google-photos-copy-sync-errors/25153
2021-07-20 16:57:55 +01:00
Nick Craig-Wood
11d86c74b2 docs: expand contents and make docs full screen 2021-07-20 16:53:21 +01:00
Nick Craig-Wood
feb6046a8a docs: add table of contents to every page 2021-07-20 16:53:21 +01:00
Nick Craig-Wood
807102ada2 drive: fix config system overwriting team drive ID - fixes #5454 2021-07-20 16:51:59 +01:00
Nick Craig-Wood
770b3496a1 config: fix in memory config not saving on the fly backend config
Before this fix, saving a :backend config gave the error

    Can't save config "token" = "XXX" for on the fly backend ":backend"

Even when using the in-memory config `--config ""`

This fixes the problem by
- always using the in memory config if it is configured
- moving the check for a :backend config save to the file config backend

It also removes the contents of the config items being saved from the
log which saves confidential tokens being logged.

Fixes #5451
2021-07-20 12:09:38 +01:00
buengese
da36ce08e4 docs/jottacloud: add short note on how no versions option works 2021-07-15 17:29:30 +02:00
buengese
8652cfe575 jottacloud: add no versions option 2021-07-15 17:29:30 +02:00
Nick Craig-Wood
94b1439299 drive: fix some google docs being treated as files - fixes #5455
At some point some google docs files started having sizes returned in
their listing information.

This then caused rclone to treat the docs as files which caused
downloads to fail.

The API docs now state that google docs may have sizes (whereas I'm
pretty sure it didn't earlier).

This fix removes the check for size, so google docs are identified
solely by not having an MD5 checksum.
2021-07-14 11:40:58 +01:00
Nick Craig-Wood
97c9e55ddb Add Antoine GIRARD to contributors 2021-07-14 11:40:57 +01:00
Ivan Andreev
c0b2832509 docs: serve docker: fix URL of systemd contrib files (#5415) 2021-07-11 13:23:00 +03:00
Ivan Andreev
7436768d62 docs for serve docker and docker plugin (#5415) 2021-07-10 23:56:09 +03:00
Ivan Andreev
55153403aa build docker plugin (#5415) 2021-07-10 23:56:09 +03:00
Antoine GIRARD
daf449b5f2 cmd/serve: add serve docker command (#5415)
Fixes #4750

Co-authored-by: Ivan Andreev <ivandeex@gmail.com>
2021-07-10 23:56:09 +03:00
Antoine GIRARD
221dfc3882 mountlib: refactor before adding serve docker (#5415)
Co-authored-by: Ivan Andreev <ivandeex@gmail.com>
2021-07-10 23:56:09 +03:00
Nick Craig-Wood
aab29353d1 Update email address for Serge Pouliquen 2021-07-08 12:49:13 +01:00
Nick Craig-Wood
c24504b793 Add Chuan Zh to contributors 2021-07-08 12:47:35 +01:00
Nick Craig-Wood
6338d0026e Add Michael Hanselmann to contributors 2021-07-08 12:47:35 +01:00
Chuan Zh
ba836d45ff s3: update Alibaba OSS endpoints 2021-07-08 12:03:04 +01:00
Ole Frost
367cf984af docs: added tip to reduce SharePoint throttling - fixes #5404 2021-07-08 11:39:52 +01:00
Michael Hanselmann
6b7d7d0441 atexit: Terminate with non-zero status after receiving signal
When rclone received a SIGINT (Ctrl+C) or SIGTERM signal while an atexit
function is registered it always terminated with status code 0. Unix
convention is to exit with a non-zero status code. Often it's
`128 + int(signum), but at least not zero.

With this change fatal signals handled by the `atexit` package cause
a non-zero exit code. On Unix systems it's `128 + int(signum)` while
on other systems, such as Windows, it's always 2 ("error not otherwise
categorised").

Resolves #5437.

Signed-off-by: Michael Hanselmann <public@hansmi.ch>
2021-07-07 17:59:26 +01:00
Michael Hanselmann
cf19073ac9 cmd: Move exit status codes to separate package
Signal handling by the `atexit` package needs acceess to
`exitCodeUncategorizedError`. With this change all exit status values
are moved to a dedicated package so that they can be reused.

Signed-off-by: Michael Hanselmann <public@hansmi.ch>
2021-07-07 17:59:26 +01:00
Nick Craig-Wood
ba5c559fec fs/sync: fix tests by only --compare-dest timestamp if have hash
This fixes the integration test errors introduced in #5410
2021-07-07 16:59:51 +01:00
Nick Craig-Wood
abb8fe8ba1 Add Haochen Tong to contributors 2021-07-07 16:59:51 +01:00
Nick Craig-Wood
765af387e6 Add Dmitry Sitnikov to contributors 2021-07-07 16:59:51 +01:00
Nick Craig-Wood
d05cf6aba8 Add partev to contributors 2021-07-07 16:59:51 +01:00
Nick Craig-Wood
76a3fef24d Add Xuanchen Wu to contributors 2021-07-07 16:59:51 +01:00
Ivan Andreev
b40d9bd4c4 cmd: add hashSUM file support (#5352)
Currently rclone check supports matching two file trees by sizes and hashes.
This change adds support for SUM files produced by GNU utilities like sha1sum.

Fixes #1005 

Note: checksum by default checks, hashsum by default prints sums.
New flag is named "--checkfile" but carries hash name.
Summary of introduced command forms:

```
rclone check sums.sha1 remote:path --checkfile sha1
rclone checksum sha1 sums.sha1 remote:path             
rclone hashsum sha1 remote:path --checkfile sums.sha1
rclone sha1sum remote:path --checkfile sums.sha1
rclone md5sum remote:path --checkfile sums.md5
```
2021-07-07 18:34:16 +03:00
Ivan Andreev
4680c0776d backend/local: skip entries removed concurrently with List() (#5297)
This change fixes the bug described below:
if a file is removed while the local backend List() runs,
the call will flag an accounting error.
The bug manifests itself if local backend is the Sync target
due to intrinsic concurrency.
The odds to hit this bug depend on --checkers and --transfers.
Chunker over local backend is affected even more because
updating a composite object with a smaller size content
translates into removing chunks on the underlying file system
and involves a number of List() calls.
2021-07-07 16:50:19 +03:00
buengese
fb305b5976 fichier: check that we actually got a download token and retry if we didn't 2021-07-06 14:58:50 +02:00
Ole Frost
5e91b93e59 cmdtest: end-to-end test for commands, flags and environment variables
There was no easy way to automatically test the end-to-end functionality
of commands, flags, environment variables etc.

The need for end-to-end testing was highlighted by the issues fixed
in #5341. There was no automated test to continually verify current
behaviour, nor a framework to quickly test the correctness of the fixes.

This change adds an end-to-end testing framework in the cmdtest folder.
It has some simple examples in func TestCmdTest in cmdtest_test.go. The
tests should be readable by anybody familiar with rclone and look like
this:

    // Test the rclone version command with debug logging (-vv)
    out, err = rclone("version", "-vv")
    if assert.NoError(t, err) {
        assert.Contains(t, out, "rclone v")
        assert.Contains(t, out, "os/version:")
        assert.Contains(t, out, " DEBUG : ")
    }

The end-to-end tests are executed just like the Go unit tests, that is:

    go test ./cmdtest -v

The change also contains a thorough test of environment variables in
environment_test.go.

Thanks to @ncw for encouragement and introduction to the TestMain trick.
2021-07-05 16:38:20 +01:00
Ole Frost
58c99427b3 config: fixed issues with flags/options set by environment vars.
Some environment variables didn’t behave like their corresponding
command line flags. The affected flags were --stats, --log-level,
--separator, --multi-tread-streams, --rc-addr, --rc-user and --rc-pass.
Example:

    RCLONE_STATS='10s'
    rclone check remote: remote: --progress
    # Expected: rclone check remote: remote: --progress –-stats=10s
    # Actual: rclone check remote: remote: --progress

Remote specific options set by environment variables was overruled by
less specific backend options set by environment variables. Example:

    RCLONE_DRIVE_USE_TRASH='false'
    RCLONE_CONFIG_MYDRIVE_USE_TRASH='true'
    rclone deletefile myDrive:my-test-file
    # Expected: my-test-file is recoverable in the trash folder
    # Actual: my-test-file is permanently deleted (not recoverable)

Backend specific options set by environment variables was overruled by
general backend options set by environment variables. Example:

    RCLONE_SKIP_LINKS='true'
    RCLONE_LOCAL_SKIP_LINKS='false'
    rclone lsd local:
    # Expected result: Warnings when symlinks are skipped
    # Actual result: No warnings when symlinks are skipped
    # That is RCLONE_SKIP_LINKS takes precedence

The above issues have been fixed.

The debug logging (-vv) has been enhanced to show when flags are set by
environment variables.

The documentation has been enhanced with details on the precedence of
configuration options.

See pull request #5341 for more information.
2021-07-05 16:38:20 +01:00
albertony
fee0abf513 docs: add note about use of user and logname environment variables for current username 2021-07-05 16:31:16 +01:00
Nick Gaya
40024990b7 fs/operations: Don't update timestamps of files in --compare-dest 2021-07-05 16:29:44 +01:00
Haochen Tong
04aa6969a4 accounting: calculate rolling average speed 2021-07-05 16:27:33 +01:00
Haochen Tong
d2050523de accounting: fix startTime of statsGroups.sum 2021-07-05 16:27:33 +01:00
Ivan Andreev
1cc6dd349e Add google search widget to rclone.org 2021-07-05 16:21:36 +01:00
Ole Frost
721bae11c3 docs: ease contribution for beginners in Go, Git and GitHub
Improved/added steps to:
 * Install Git with basic setup
 * Use both SSH and HTTPS for the git origin
 * Install Go and verify the GOPATH
 * Update the forked master
 * Find a popular editor for Go
2021-07-05 16:03:53 +01:00
Dmitry Sitnikov
b439199578 azureblob: Fix typo in Azure Blob help
Change the command to create RBAC file to the correct one
`az ad sp create-for-rbac`
Add the link to the command documentation
https://docs.microsoft.com/en-us/cli/azure/ad/sp?view=azure-cli-latest#az_ad_sp_create_for_rbac
2021-07-05 15:58:41 +01:00
partev
0bfd6f793b docs: replace OSX with macOS 2021-07-05 14:51:00 +01:00
Nick Craig-Wood
76ea716abf ftp: make upload error 250 indicate success
Some servers seem to send return code 250 to indicate successful
upload - previously rclone was treating this as an error.

See: https://forum.rclone.org/t/transfer-on-mega-in-ftp-mode-is-not-working/24642/
2021-07-05 10:35:02 +01:00
Alex Chen
e635f4c0be fs: make --dump imply -vv (#5418) 2021-06-23 00:32:26 +08:00
Xuanchen Wu
0cb973f127 onedrive: Make link return direct download link (#5417)
Co-authored-by: Cnly <minecnly@gmail.com>
2021-06-22 21:25:08 +08:00
Alex Chen
96ace599a8 fs: fix logging level mentioned in docs of Logf 2021-06-21 23:30:26 +08:00
1127 changed files with 86399 additions and 44952 deletions

View File

@@ -9,7 +9,7 @@ We understand you are having a problem with rclone; we want to help you with tha
**STOP and READ** **STOP and READ**
**YOUR POST WILL BE REMOVED IF IT IS LOW QUALITY**: **YOUR POST WILL BE REMOVED IF IT IS LOW QUALITY**:
Please show the effort you've put in to solving the problem and please be specific. Please show the effort you've put into solving the problem and please be specific.
People are volunteering their time to help! Low effort posts are not likely to get good answers! People are volunteering their time to help! Low effort posts are not likely to get good answers!
If you think you might have found a bug, try to replicate it with the latest beta (or stable). If you think you might have found a bug, try to replicate it with the latest beta (or stable).

View File

@@ -22,7 +22,7 @@ Link issues and relevant forum posts here.
#### Checklist #### Checklist
- [ ] I have read the [contribution guidelines](https://github.com/rclone/rclone/blob/master/CONTRIBUTING.md#submitting-a-pull-request). - [ ] I have read the [contribution guidelines](https://github.com/rclone/rclone/blob/master/CONTRIBUTING.md#submitting-a-new-feature-or-bug-fix).
- [ ] I have added tests for all changes in this PR if appropriate. - [ ] I have added tests for all changes in this PR if appropriate.
- [ ] I have added documentation for the changes if appropriate. - [ ] I have added documentation for the changes if appropriate.
- [ ] All commit messages are in [house style](https://github.com/rclone/rclone/blob/master/CONTRIBUTING.md#commit-messages). - [ ] All commit messages are in [house style](https://github.com/rclone/rclone/blob/master/CONTRIBUTING.md#commit-messages).

View File

@@ -25,12 +25,12 @@ jobs:
strategy: strategy:
fail-fast: false fail-fast: false
matrix: matrix:
job_name: ['linux', 'mac_amd64', 'mac_arm64', 'windows_amd64', 'windows_386', 'other_os', 'go1.13', 'go1.14', 'go1.15'] job_name: ['linux', 'mac_amd64', 'mac_arm64', 'windows_amd64', 'windows_386', 'other_os', 'go1.15', 'go1.16']
include: include:
- job_name: linux - job_name: linux
os: ubuntu-latest os: ubuntu-latest
go: '1.16.x' go: '1.17.x'
gotags: cmount gotags: cmount
build_flags: '-include "^linux/"' build_flags: '-include "^linux/"'
check: true check: true
@@ -41,7 +41,7 @@ jobs:
- job_name: mac_amd64 - job_name: mac_amd64
os: macOS-latest os: macOS-latest
go: '1.16.x' go: '1.17.x'
gotags: 'cmount' gotags: 'cmount'
build_flags: '-include "^darwin/amd64" -cgo' build_flags: '-include "^darwin/amd64" -cgo'
quicktest: true quicktest: true
@@ -50,14 +50,14 @@ jobs:
- job_name: mac_arm64 - job_name: mac_arm64
os: macOS-latest os: macOS-latest
go: '1.16.x' go: '1.17.x'
gotags: 'cmount' gotags: 'cmount'
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -macos-sdk macosx11.1 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib' build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -macos-sdk macosx11.1 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
deploy: true deploy: true
- job_name: windows_amd64 - job_name: windows_amd64
os: windows-latest os: windows-latest
go: '1.16.x' go: '1.17.x'
gotags: cmount gotags: cmount
build_flags: '-include "^windows/amd64" -cgo' build_flags: '-include "^windows/amd64" -cgo'
build_args: '-buildmode exe' build_args: '-buildmode exe'
@@ -67,7 +67,7 @@ jobs:
- job_name: windows_386 - job_name: windows_386
os: windows-latest os: windows-latest
go: '1.16.x' go: '1.17.x'
gotags: cmount gotags: cmount
goarch: '386' goarch: '386'
cgo: '1' cgo: '1'
@@ -78,28 +78,23 @@ jobs:
- job_name: other_os - job_name: other_os
os: ubuntu-latest os: ubuntu-latest
go: '1.16.x' go: '1.17.x'
build_flags: '-exclude "^(windows/|darwin/|linux/)"' build_flags: '-exclude "^(windows/|darwin/|linux/)"'
compile_all: true compile_all: true
deploy: true deploy: true
- job_name: go1.13
os: ubuntu-latest
go: '1.13.x'
quicktest: true
- job_name: go1.14
os: ubuntu-latest
go: '1.14.x'
quicktest: true
racequicktest: true
- job_name: go1.15 - job_name: go1.15
os: ubuntu-latest os: ubuntu-latest
go: '1.15.x' go: '1.15.x'
quicktest: true quicktest: true
racequicktest: true racequicktest: true
- job_name: go1.16
os: ubuntu-latest
go: '1.16.x'
quicktest: true
racequicktest: true
name: ${{ matrix.job_name }} name: ${{ matrix.job_name }}
runs-on: ${{ matrix.os }} runs-on: ${{ matrix.os }}
@@ -202,13 +197,6 @@ jobs:
librclone/python/test_rclone.py librclone/python/test_rclone.py
if: matrix.librclonetest if: matrix.librclonetest
- name: Code quality test
shell: bash
run: |
make build_dep
make check
if: matrix.check
- name: Compile all architectures test - name: Compile all architectures test
shell: bash shell: bash
run: | run: |
@@ -228,110 +216,126 @@ jobs:
# Deploy binaries if enabled in config && not a PR && not a fork # Deploy binaries if enabled in config && not a PR && not a fork
if: matrix.deploy && github.head_ref == '' && github.repository == 'rclone/rclone' if: matrix.deploy && github.head_ref == '' && github.repository == 'rclone/rclone'
lint:
if: ${{ github.repository == 'rclone/rclone' || github.event.inputs.manual }}
timeout-minutes: 30
name: "lint"
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Code quality test
uses: golangci/golangci-lint-action@v2
with:
# Optional: version of golangci-lint to use in form of v1.2 or v1.2.3 or `latest` to use the latest version
version: latest
android: android:
if: ${{ github.repository == 'rclone/rclone' || github.event.inputs.manual }} if: ${{ github.repository == 'rclone/rclone' || github.event.inputs.manual }}
timeout-minutes: 30 timeout-minutes: 30
name: "android-all" name: "android-all"
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
with:
fetch-depth: 0
# Upgrade together with NDK version steps:
- name: Set up Go 1.14 - name: Checkout
uses: actions/setup-go@v1 uses: actions/checkout@v2
with: with:
go-version: 1.14 fetch-depth: 0
# Upgrade together with Go version. Using a GitHub-provided version saves around 2 minutes. # Upgrade together with NDK version
- name: Force NDK version - name: Set up Go 1.16
run: echo "y" | sudo ${ANDROID_HOME}/tools/bin/sdkmanager --install "ndk;21.4.7075529" | grep -v = || true uses: actions/setup-go@v1
with:
go-version: 1.16
- name: Go module cache # Upgrade together with Go version. Using a GitHub-provided version saves around 2 minutes.
uses: actions/cache@v2 - name: Force NDK version
with: run: echo "y" | sudo ${ANDROID_HOME}/tools/bin/sdkmanager --install "ndk;22.1.7171670" | grep -v = || true
path: ~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-
- name: Set global environment variables - name: Go module cache
shell: bash uses: actions/cache@v2
run: | with:
echo "VERSION=$(make version)" >> $GITHUB_ENV path: ~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-
- name: build native rclone - name: Set global environment variables
run: | shell: bash
make run: |
echo "VERSION=$(make version)" >> $GITHUB_ENV
- name: install gomobile - name: build native rclone
run: | run: |
go get golang.org/x/mobile/cmd/gobind make
go get golang.org/x/mobile/cmd/gomobile
env PATH=$PATH:~/go/bin gomobile init
- name: arm-v7a gomobile build
run: env PATH=$PATH:~/go/bin gomobile bind -v -target=android/arm -javapkg=org.rclone -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} github.com/rclone/rclone/librclone/gomobile
- name: arm-v7a Set environment variables
shell: bash
run: |
echo "CC=$(echo $ANDROID_HOME/ndk/21.4.7075529/toolchains/llvm/prebuilt/linux-x86_64/bin/armv7a-linux-androideabi16-clang)" >> $GITHUB_ENV
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
echo 'GOOS=android' >> $GITHUB_ENV
echo 'GOARCH=arm' >> $GITHUB_ENV
echo 'GOARM=7' >> $GITHUB_ENV
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
- name: arm-v7a build
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-16-armv7a .
- name: arm64-v8a Set environment variables - name: install gomobile
shell: bash run: |
run: | go get golang.org/x/mobile/cmd/gobind
echo "CC=$(echo $ANDROID_HOME/ndk/21.4.7075529/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android21-clang)" >> $GITHUB_ENV go get golang.org/x/mobile/cmd/gomobile
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV env PATH=$PATH:~/go/bin gomobile init
echo 'GOOS=android' >> $GITHUB_ENV
echo 'GOARCH=arm64' >> $GITHUB_ENV
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
- name: arm64-v8a build - name: arm-v7a gomobile build
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-21-armv8a . run: env PATH=$PATH:~/go/bin gomobile bind -v -target=android/arm -javapkg=org.rclone -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} github.com/rclone/rclone/librclone/gomobile
- name: x86 Set environment variables - name: arm-v7a Set environment variables
shell: bash shell: bash
run: | run: |
echo "CC=$(echo $ANDROID_HOME/ndk/21.4.7075529/toolchains/llvm/prebuilt/linux-x86_64/bin/i686-linux-android16-clang)" >> $GITHUB_ENV echo "CC=$(echo $ANDROID_HOME/ndk/22.1.7171670/toolchains/llvm/prebuilt/linux-x86_64/bin/armv7a-linux-androideabi16-clang)" >> $GITHUB_ENV
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
echo 'GOOS=android' >> $GITHUB_ENV echo 'GOOS=android' >> $GITHUB_ENV
echo 'GOARCH=386' >> $GITHUB_ENV echo 'GOARCH=arm' >> $GITHUB_ENV
echo 'CGO_ENABLED=1' >> $GITHUB_ENV echo 'GOARM=7' >> $GITHUB_ENV
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV echo 'CGO_ENABLED=1' >> $GITHUB_ENV
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
- name: arm-v7a build
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-16-armv7a .
- name: x86 build - name: arm64-v8a Set environment variables
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-16-x86 . shell: bash
run: |
echo "CC=$(echo $ANDROID_HOME/ndk/22.1.7171670/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android21-clang)" >> $GITHUB_ENV
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
echo 'GOOS=android' >> $GITHUB_ENV
echo 'GOARCH=arm64' >> $GITHUB_ENV
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
- name: x64 Set environment variables - name: arm64-v8a build
shell: bash run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-21-armv8a .
run: |
echo "CC=$(echo $ANDROID_HOME/ndk/21.4.7075529/toolchains/llvm/prebuilt/linux-x86_64/bin/x86_64-linux-android21-clang)" >> $GITHUB_ENV
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
echo 'GOOS=android' >> $GITHUB_ENV
echo 'GOARCH=amd64' >> $GITHUB_ENV
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
- name: x64 build - name: x86 Set environment variables
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-21-x64 . shell: bash
run: |
echo "CC=$(echo $ANDROID_HOME/ndk/22.1.7171670/toolchains/llvm/prebuilt/linux-x86_64/bin/i686-linux-android16-clang)" >> $GITHUB_ENV
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
echo 'GOOS=android' >> $GITHUB_ENV
echo 'GOARCH=386' >> $GITHUB_ENV
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
- name: Upload artifacts - name: x86 build
run: | run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-16-x86 .
make ci_upload
env: - name: x64 Set environment variables
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }} shell: bash
# Upload artifacts if not a PR && not a fork run: |
if: github.head_ref == '' && github.repository == 'rclone/rclone' echo "CC=$(echo $ANDROID_HOME/ndk/22.1.7171670/toolchains/llvm/prebuilt/linux-x86_64/bin/x86_64-linux-android21-clang)" >> $GITHUB_ENV
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
echo 'GOOS=android' >> $GITHUB_ENV
echo 'GOARCH=amd64' >> $GITHUB_ENV
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
- name: x64 build
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-21-x64 .
- name: Upload artifacts
run: |
make ci_upload
env:
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
# Upload artifacts if not a PR && not a fork
if: github.head_ref == '' && github.repository == 'rclone/rclone'

View File

@@ -32,3 +32,28 @@ jobs:
publish: true publish: true
dockerHubUser: ${{ secrets.DOCKER_HUB_USER }} dockerHubUser: ${{ secrets.DOCKER_HUB_USER }}
dockerHubPassword: ${{ secrets.DOCKER_HUB_PASSWORD }} dockerHubPassword: ${{ secrets.DOCKER_HUB_PASSWORD }}
build_docker_volume_plugin:
if: github.repository == 'rclone/rclone'
needs: build
runs-on: ubuntu-latest
name: Build docker plugin job
steps:
- name: Checkout master
uses: actions/checkout@v2
with:
fetch-depth: 0
- name: Build and publish docker plugin
shell: bash
run: |
VER=${GITHUB_REF#refs/tags/}
PLUGIN_USER=rclone
docker login --username ${{ secrets.DOCKER_HUB_USER }} \
--password-stdin <<< "${{ secrets.DOCKER_HUB_PASSWORD }}"
for PLUGIN_ARCH in amd64 arm64 arm/v7 ;do
export PLUGIN_USER PLUGIN_ARCH
make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}
make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}-${VER#v}
done
make docker-plugin PLUGIN_ARCH=amd64 PLUGIN_TAG=latest
make docker-plugin PLUGIN_ARCH=amd64 PLUGIN_TAG=${VER#v}

2
.gitignore vendored
View File

@@ -13,3 +13,5 @@ rclone.iml
fuzz-build.zip fuzz-build.zip
*.orig *.orig
*.rej *.rej
Thumbs.db
__pycache__

View File

@@ -5,7 +5,7 @@ linters:
- deadcode - deadcode
- errcheck - errcheck
- goimports - goimports
- golint - revive
- ineffassign - ineffassign
- structcheck - structcheck
- varcheck - varcheck
@@ -24,3 +24,7 @@ issues:
# Maximum count of issues with the same text. Set to 0 to disable. Default is 3. # Maximum count of issues with the same text. Set to 0 to disable. Default is 3.
max-same-issues: 0 max-same-issues: 0
run:
# timeout for analysis, e.g. 30s, 5m, default is 1m
timeout: 10m

View File

@@ -12,95 +12,164 @@ When filing an issue, please include the following information if
possible as well as a description of the problem. Make sure you test possible as well as a description of the problem. Make sure you test
with the [latest beta of rclone](https://beta.rclone.org/): with the [latest beta of rclone](https://beta.rclone.org/):
* Rclone version (e.g. output from `rclone -V`) * Rclone version (e.g. output from `rclone version`)
* Which OS you are using and how many bits (e.g. Windows 7, 64 bit) * Which OS you are using and how many bits (e.g. Windows 10, 64 bit)
* The command you were trying to run (e.g. `rclone copy /tmp remote:tmp`) * The command you were trying to run (e.g. `rclone copy /tmp remote:tmp`)
* A log of the command with the `-vv` flag (e.g. output from `rclone -vv copy /tmp remote:tmp`) * A log of the command with the `-vv` flag (e.g. output from `rclone -vv copy /tmp remote:tmp`)
* if the log contains secrets then edit the file with a text editor first to obscure them * if the log contains secrets then edit the file with a text editor first to obscure them
## Submitting a pull request ## ## Submitting a new feature or bug fix ##
If you find a bug that you'd like to fix, or a new feature that you'd If you find a bug that you'd like to fix, or a new feature that you'd
like to implement then please submit a pull request via GitHub. like to implement then please submit a pull request via GitHub.
If it is a big feature then make an issue first so it can be discussed. If it is a big feature, then [make an issue](https://github.com/rclone/rclone/issues) first so it can be discussed.
You'll need a Go environment set up with GOPATH set. See [the Go To prepare your pull request first press the fork button on [rclone's GitHub
getting started docs](https://golang.org/doc/install) for more info.
First in your web browser press the fork button on [rclone's GitHub
page](https://github.com/rclone/rclone). page](https://github.com/rclone/rclone).
Now in your terminal Then [install Git](https://git-scm.com/downloads) and set your public contribution [name](https://docs.github.com/en/github/getting-started-with-github/setting-your-username-in-git) and [email](https://docs.github.com/en/github/setting-up-and-managing-your-github-user-account/setting-your-commit-email-address#setting-your-commit-email-address-in-git).
Next open your terminal, change directory to your preferred folder and initialise your local rclone project:
git clone https://github.com/rclone/rclone.git git clone https://github.com/rclone/rclone.git
cd rclone cd rclone
git remote rename origin upstream git remote rename origin upstream
# if you have SSH keys setup in your GitHub account:
git remote add origin git@github.com:YOURUSER/rclone.git git remote add origin git@github.com:YOURUSER/rclone.git
go build # otherwise:
git remote add origin https://github.com/YOURUSER/rclone.git
Make a branch to add your new feature Note that most of the terminal commands in the rest of this guide must be executed from the rclone folder created above.
Now [install Go](https://golang.org/doc/install) and verify your installation:
go version
Great, you can now compile and execute your own version of rclone:
go build
./rclone version
(Note that you can also replace `go build` with `make`, which will include a
more accurate version number in the executable as well as enable you to specify
more build options.) Finally make a branch to add your new feature
git checkout -b my-new-feature git checkout -b my-new-feature
And get hacking. And get hacking.
When ready - run the unit tests for the code you changed You may like one of the [popular editors/IDE's for Go](https://github.com/golang/go/wiki/IDEsAndTextEditorPlugins) and a quick view on the rclone [code organisation](#code-organisation).
When ready - test the affected functionality and run the unit tests for the code you changed
cd folder/with/changed/files
go test -v go test -v
Note that you may need to make a test remote, e.g. `TestSwift` for some Note that you may need to make a test remote, e.g. `TestSwift` for some
of the unit tests. of the unit tests.
Note the top level Makefile targets This is typically enough if you made a simple bug fix, otherwise please read the rclone [testing](#testing) section too.
* make check
* make test
Both of these will be run by Travis when you make a pull request but
you can do this yourself locally too. These require some extra go
packages which you can install with
* make build_dep
Make sure you Make sure you
* Add [unit tests](#testing) for a new feature.
* Add [documentation](#writing-documentation) for a new feature. * Add [documentation](#writing-documentation) for a new feature.
* Follow the [commit message guidelines](#commit-messages). * [Commit your changes](#committing-your-changes) using the [message guideline](#commit-messages).
* Add [unit tests](#testing) for a new feature
* squash commits down to one per feature
* rebase to master with `git rebase master`
When you are done with that When you are done with that push your changes to Github:
git push -u origin my-new-feature git push -u origin my-new-feature
Go to the GitHub website and click [Create pull and open the GitHub website to [create your pull
request](https://help.github.com/articles/creating-a-pull-request/). request](https://help.github.com/articles/creating-a-pull-request/).
You patch will get reviewed and you might get asked to fix some stuff. Your changes will then get reviewed and you might get asked to fix some stuff. If so, then make the changes in the same branch, commit and push your updates to GitHub.
If so, then make the changes in the same branch, squash the commits (make multiple commits one commit) by running: You may sometimes be asked to [base your changes on the latest master](#basing-your-changes-on-the-latest-master) or [squash your commits](#squashing-your-commits).
```
git log # See how many commits you want to squash
git reset --soft HEAD~2 # This squashes the 2 latest commits together.
git status # Check what will happen, if you made a mistake resetting, you can run git reset 'HEAD@{1}' to undo.
git commit # Add a new commit message.
git push --force # Push the squashed commit to your GitHub repo.
# For more, see Stack Overflow, Git docs, or generally Duck around the web. jtagcat also recommends wizardzines.com
```
## CI for your fork ## ## Using Git and Github ##
### Committing your changes ###
Follow the guideline for [commit messages](#commit-messages) and then:
git checkout my-new-feature # To switch to your branch
git status # To see the new and changed files
git add FILENAME # To select FILENAME for the commit
git status # To verify the changes to be committed
git commit # To do the commit
git log # To verify the commit. Use q to quit the log
You can modify the message or changes in the latest commit using:
git commit --amend
If you amend to commits that have been pushed to GitHub, then you will have to [replace your previously pushed commits](#replacing-your-previously-pushed-commits).
### Replacing your previously pushed commits ###
Note that you are about to rewrite the GitHub history of your branch. It is good practice to involve your collaborators before modifying commits that have been pushed to GitHub.
Your previously pushed commits are replaced by:
git push --force origin my-new-feature
### Basing your changes on the latest master ###
To base your changes on the latest version of the [rclone master](https://github.com/rclone/rclone/tree/master) (upstream):
git checkout master
git fetch upstream
git merge --ff-only
git push origin --follow-tags # optional update of your fork in GitHub
git checkout my-new-feature
git rebase master
If you rebase commits that have been pushed to GitHub, then you will have to [replace your previously pushed commits](#replacing-your-previously-pushed-commits).
### Squashing your commits ###
To combine your commits into one commit:
git log # To count the commits to squash, e.g. the last 2
git reset --soft HEAD~2 # To undo the 2 latest commits
git status # To check everything is as expected
If everything is fine, then make the new combined commit:
git commit # To commit the undone commits as one
otherwise, you may roll back using:
git reflog # To check that HEAD{1} is your previous state
git reset --soft 'HEAD@{1}' # To roll back to your previous state
If you squash commits that have been pushed to GitHub, then you will have to [replace your previously pushed commits](#replacing-your-previously-pushed-commits).
Tip: You may like to use `git rebase -i master` if you are experienced or have a more complex situation.
### GitHub Continuous Integration ###
rclone currently uses [GitHub Actions](https://github.com/rclone/rclone/actions) to build and test the project, which should be automatically available for your fork too from the `Actions` tab in your repository. rclone currently uses [GitHub Actions](https://github.com/rclone/rclone/actions) to build and test the project, which should be automatically available for your fork too from the `Actions` tab in your repository.
## Testing ## ## Testing ##
### Quick testing ###
rclone's tests are run from the go testing framework, so at the top rclone's tests are run from the go testing framework, so at the top
level you can run this to run all the tests. level you can run this to run all the tests.
go test -v ./... go test -v ./...
You can also use `make`, if supported by your platform
make quicktest
The quicktest is [automatically run by GitHub](#github-continuous-integration) when you push your branch to GitHub.
### Backend testing ###
rclone contains a mixture of unit tests and integration tests. rclone contains a mixture of unit tests and integration tests.
Because it is difficult (and in some respects pointless) to test cloud Because it is difficult (and in some respects pointless) to test cloud
storage systems by mocking all their interfaces, rclone unit tests can storage systems by mocking all their interfaces, rclone unit tests can
@@ -134,12 +203,19 @@ project root:
go install github.com/rclone/rclone/fstest/test_all go install github.com/rclone/rclone/fstest/test_all
test_all -backend drive test_all -backend drive
### Full integration testing ###
If you want to run all the integration tests against all the remotes, If you want to run all the integration tests against all the remotes,
then change into the project root and run then change into the project root and run
make check
make test make test
This command is run daily on the integration test server. You can The commands may require some extra go packages which you can install with
make build_dep
The full integration tests are run daily on the integration test server. You can
find the results at https://pub.rclone.org/integration-tests/ find the results at https://pub.rclone.org/integration-tests/
## Code Organisation ## ## Code Organisation ##
@@ -147,16 +223,17 @@ find the results at https://pub.rclone.org/integration-tests/
Rclone code is organised into a small number of top level directories Rclone code is organised into a small number of top level directories
with modules beneath. with modules beneath.
* backend - the rclone backends for interfacing to cloud providers - * backend - the rclone backends for interfacing to cloud providers -
* all - import this to load all the cloud providers * all - import this to load all the cloud providers
* ...providers * ...providers
* bin - scripts for use while building or maintaining rclone * bin - scripts for use while building or maintaining rclone
* cmd - the rclone commands * cmd - the rclone commands
* all - import this to load all the commands * all - import this to load all the commands
* ...commands * ...commands
* cmdtest - end-to-end tests of commands, flags, environment variables,...
* docs - the documentation and website * docs - the documentation and website
* content - adjust these docs only - everything else is autogenerated * content - adjust these docs only - everything else is autogenerated
* command - these are auto generated - edit the corresponding .go file * command - these are auto-generated - edit the corresponding .go file
* fs - main rclone definitions - minimal amount of code * fs - main rclone definitions - minimal amount of code
* accounting - bandwidth limiting and statistics * accounting - bandwidth limiting and statistics
* asyncreader - an io.Reader which reads ahead * asyncreader - an io.Reader which reads ahead
@@ -198,18 +275,39 @@ If you add a new general flag (not for a backend), then document it in
alphabetical order. alphabetical order.
If you add a new backend option/flag, then it should be documented in If you add a new backend option/flag, then it should be documented in
the source file in the `Help:` field. The first line of this is used the source file in the `Help:` field.
for the flag help, the remainder is shown to the user in `rclone
config` and is added to the docs with `make backenddocs`. * Start with the most important information about the option,
as a single sentence on a single line.
* This text will be used for the command-line flag help.
* It will be combined with other information, such as any default value,
and the result will look odd if not written as a single sentence.
* It should end with a period/full stop character, which will be shown
in docs but automatically removed when producing the flag help.
* Try to keep it below 80 characters, to reduce text wrapping in the terminal.
* More details can be added in a new paragraph, after an empty line (`"\n\n"`).
* Like with docs generated from Markdown, a single line break is ignored
and two line breaks creates a new paragraph.
* This text will be shown to the user in `rclone config`
and in the docs (where it will be added by `make backenddocs`,
normally run some time before next release).
* To create options of enumeration type use the `Examples:` field.
* Each example value have their own `Help:` field, but they are treated
a bit different than the main option help text. They will be shown
as an unordered list, therefore a single line break is enough to
create a new list item. Also, for enumeration texts like name of
countries, it looks better without an ending period/full stop character.
The only documentation you need to edit are the `docs/content/*.md` The only documentation you need to edit are the `docs/content/*.md`
files. The `MANUAL.*`, `rclone.1`, web site, etc. are all auto generated files. The `MANUAL.*`, `rclone.1`, website, etc. are all auto-generated
from those during the release process. See the `make doc` and `make from those during the release process. See the `make doc` and `make
website` targets in the Makefile if you are interested in how. You website` targets in the Makefile if you are interested in how. You
don't need to run these when adding a feature. don't need to run these when adding a feature.
Documentation for rclone sub commands is with their code, e.g. Documentation for rclone sub commands is with their code, e.g.
`cmd/ls/ls.go`. `cmd/ls/ls.go`. Write flag help strings as a single sentence on a single
line, without a period/full stop character at the end, as it will be
combined unmodified with other information (such as any default value).
Note that you can use [GitHub's online editor](https://help.github.com/en/github/managing-files-in-a-repository/editing-files-in-another-users-repository) Note that you can use [GitHub's online editor](https://help.github.com/en/github/managing-files-in-a-repository/editing-files-in-another-users-repository)
for small changes in the docs which makes it very easy. for small changes in the docs which makes it very easy.
@@ -252,7 +350,7 @@ And here is an example of a longer one:
``` ```
mount: fix hang on errored upload mount: fix hang on errored upload
In certain circumstances if an upload failed then the mount could hang In certain circumstances, if an upload failed then the mount could hang
indefinitely. This was fixed by closing the read pipe after the Put indefinitely. This was fixed by closing the read pipe after the Put
completed. This will cause the write side to return a pipe closed completed. This will cause the write side to return a pipe closed
error fixing the hang. error fixing the hang.
@@ -284,7 +382,7 @@ and `go.sum` in the same commit as your other changes.
If you need to update a dependency then run If you need to update a dependency then run
GO111MODULE=on go get -u github.com/pkg/errors GO111MODULE=on go get -u golang.org/x/crypto
Check in a single commit as above. Check in a single commit as above.
@@ -327,8 +425,8 @@ Research
Getting going Getting going
* Create `backend/remote/remote.go` (copy this from a similar remote) * Create `backend/remote/remote.go` (copy this from a similar remote)
* box is a good one to start from if you have a directory based remote * box is a good one to start from if you have a directory-based remote
* b2 is a good one to start from if you have a bucket based remote * b2 is a good one to start from if you have a bucket-based remote
* Add your remote to the imports in `backend/all/all.go` * Add your remote to the imports in `backend/all/all.go`
* HTTP based remotes are easiest to maintain if they use rclone's rest module, but if there is a really good go SDK then use that instead. * HTTP based remotes are easiest to maintain if they use rclone's rest module, but if there is a really good go SDK then use that instead.
* Try to implement as many optional methods as possible as it makes the remote more usable. * Try to implement as many optional methods as possible as it makes the remote more usable.

View File

@@ -19,7 +19,7 @@ Current active maintainers of rclone are:
**This is a work in progress Draft** **This is a work in progress Draft**
This is a guide for how to be an rclone maintainer. This is mostly a writeup of what I (@ncw) attempt to do. This is a guide for how to be an rclone maintainer. This is mostly a write-up of what I (@ncw) attempt to do.
## Triaging Tickets ## ## Triaging Tickets ##
@@ -27,15 +27,15 @@ When a ticket comes in it should be triaged. This means it should be classified
Rclone uses the labels like this: Rclone uses the labels like this:
* `bug` - a definite verified bug * `bug` - a definitely verified bug
* `can't reproduce` - a problem which we can't reproduce * `can't reproduce` - a problem which we can't reproduce
* `doc fix` - a bug in the documentation - if users need help understanding the docs add this label * `doc fix` - a bug in the documentation - if users need help understanding the docs add this label
* `duplicate` - normally close these and ask the user to subscribe to the original * `duplicate` - normally close these and ask the user to subscribe to the original
* `enhancement: new remote` - a new rclone backend * `enhancement: new remote` - a new rclone backend
* `enhancement` - a new feature * `enhancement` - a new feature
* `FUSE` - to do with `rclone mount` command * `FUSE` - to do with `rclone mount` command
* `good first issue` - mark these if you find a small self contained issue - these get shown to new visitors to the project * `good first issue` - mark these if you find a small self-contained issue - these get shown to new visitors to the project
* `help` wanted - mark these if you find a self contained issue - these get shown to new visitors to the project * `help` wanted - mark these if you find a self-contained issue - these get shown to new visitors to the project
* `IMPORTANT` - note to maintainers not to forget to fix this for the release * `IMPORTANT` - note to maintainers not to forget to fix this for the release
* `maintenance` - internal enhancement, code re-organisation, etc. * `maintenance` - internal enhancement, code re-organisation, etc.
* `Needs Go 1.XX` - waiting for that version of Go to be released * `Needs Go 1.XX` - waiting for that version of Go to be released
@@ -51,7 +51,7 @@ The milestones have these meanings:
* v1.XX - stuff we would like to fit into this release * v1.XX - stuff we would like to fit into this release
* v1.XX+1 - stuff we are leaving until the next release * v1.XX+1 - stuff we are leaving until the next release
* Soon - stuff we think is a good idea - waiting to be scheduled to a release * Soon - stuff we think is a good idea - waiting to be scheduled for a release
* Help wanted - blue sky stuff that might get moved up, or someone could help with * Help wanted - blue sky stuff that might get moved up, or someone could help with
* Known bugs - bugs waiting on external factors or we aren't going to fix for the moment * Known bugs - bugs waiting on external factors or we aren't going to fix for the moment
@@ -65,7 +65,7 @@ Close tickets as soon as you can - make sure they are tagged with a release. Po
Try to process pull requests promptly! Try to process pull requests promptly!
Merging pull requests on GitHub itself works quite well now-a-days so you can squash and rebase or rebase pull requests. rclone doesn't use merge commits. Use the squash and rebase option if you need to edit the commit message. Merging pull requests on GitHub itself works quite well nowadays so you can squash and rebase or rebase pull requests. rclone doesn't use merge commits. Use the squash and rebase option if you need to edit the commit message.
After merging the commit, in your local master branch, do `git pull` then run `bin/update-authors.py` to update the authors file then `git push`. After merging the commit, in your local master branch, do `git pull` then run `bin/update-authors.py` to update the authors file then `git push`.
@@ -81,15 +81,15 @@ Rclone aims for a 6-8 week release cycle. Sometimes release cycles take longer
High impact regressions should be fixed before the next release. High impact regressions should be fixed before the next release.
Near the start of the release cycle the dependencies should be updated with `make update` to give time for bugs to surface. Near the start of the release cycle, the dependencies should be updated with `make update` to give time for bugs to surface.
Towards the end of the release cycle try not to merge anything too big so let things settle down. Towards the end of the release cycle try not to merge anything too big so let things settle down.
Follow the instructions in RELEASE.md for making the release. Note that the testing part is the most time consuming often needing several rounds of test and fix depending on exactly how many new features rclone has gained. Follow the instructions in RELEASE.md for making the release. Note that the testing part is the most time-consuming often needing several rounds of test and fix depending on exactly how many new features rclone has gained.
## Mailing list ## ## Mailing list ##
There is now an invite only mailing list for rclone developers `rclone-dev` on google groups. There is now an invite-only mailing list for rclone developers `rclone-dev` on google groups.
## TODO ## ## TODO ##

7476
MANUAL.html generated

File diff suppressed because it is too large Load Diff

9110
MANUAL.md generated

File diff suppressed because it is too large Load Diff

9289
MANUAL.txt generated

File diff suppressed because it is too large Load Diff

View File

@@ -104,10 +104,14 @@ showupdates:
@echo "*** Direct dependencies that could be updated ***" @echo "*** Direct dependencies that could be updated ***"
@GO111MODULE=on go list -u -f '{{if (and (not (or .Main .Indirect)) .Update)}}{{.Path}}: {{.Version}} -> {{.Update.Version}}{{end}}' -m all 2> /dev/null @GO111MODULE=on go list -u -f '{{if (and (not (or .Main .Indirect)) .Update)}}{{.Path}}: {{.Version}} -> {{.Update.Version}}{{end}}' -m all 2> /dev/null
# Update direct dependencies only
updatedirect:
GO111MODULE=on go get -d $$(go list -m -f '{{if not (or .Main .Indirect)}}{{.Path}}{{end}}' all)
GO111MODULE=on go mod tidy
# Update direct and indirect dependencies and test dependencies # Update direct and indirect dependencies and test dependencies
update: update:
GO111MODULE=on go get -u -t ./... GO111MODULE=on go get -d -u -t ./...
-#GO111MODULE=on go get -d $(go list -m -f '{{if not (or .Main .Indirect)}}{{.Path}}{{end}}' all)
GO111MODULE=on go mod tidy GO111MODULE=on go mod tidy
# Tidy the module dependencies # Tidy the module dependencies
@@ -256,3 +260,33 @@ startstable:
winzip: winzip:
zip -9 rclone-$(TAG).zip rclone.exe zip -9 rclone-$(TAG).zip rclone.exe
# docker volume plugin
PLUGIN_USER ?= rclone
PLUGIN_TAG ?= latest
PLUGIN_BASE_TAG ?= latest
PLUGIN_ARCH ?= amd64
PLUGIN_IMAGE := $(PLUGIN_USER)/docker-volume-rclone:$(PLUGIN_TAG)
PLUGIN_BASE := $(PLUGIN_USER)/rclone:$(PLUGIN_BASE_TAG)
PLUGIN_BUILD_DIR := ./build/docker-plugin
PLUGIN_CONTRIB_DIR := ./contrib/docker-plugin/managed
docker-plugin-create:
docker buildx inspect |grep -q /${PLUGIN_ARCH} || \
docker run --rm --privileged tonistiigi/binfmt --install all
rm -rf ${PLUGIN_BUILD_DIR}
docker buildx build \
--no-cache --pull \
--build-arg BASE_IMAGE=${PLUGIN_BASE} \
--platform linux/${PLUGIN_ARCH} \
--output ${PLUGIN_BUILD_DIR}/rootfs \
${PLUGIN_CONTRIB_DIR}
cp ${PLUGIN_CONTRIB_DIR}/config.json ${PLUGIN_BUILD_DIR}
docker plugin rm --force ${PLUGIN_IMAGE} 2>/dev/null || true
docker plugin create ${PLUGIN_IMAGE} ${PLUGIN_BUILD_DIR}
docker-plugin-push:
docker plugin push ${PLUGIN_IMAGE}
docker plugin rm ${PLUGIN_IMAGE}
docker-plugin: docker-plugin-create docker-plugin-push

View File

@@ -2,7 +2,7 @@
[Website](https://rclone.org) | [Website](https://rclone.org) |
[Documentation](https://rclone.org/docs/) | [Documentation](https://rclone.org/docs/) |
[Download](https://rclone.org/downloads/) | [Download](https://rclone.org/downloads/) |
[Contributing](CONTRIBUTING.md) | [Contributing](CONTRIBUTING.md) |
[Changelog](https://rclone.org/changelog/) | [Changelog](https://rclone.org/changelog/) |
[Installation](https://rclone.org/install/) | [Installation](https://rclone.org/install/) |
@@ -10,12 +10,12 @@
[![Build Status](https://github.com/rclone/rclone/workflows/build/badge.svg)](https://github.com/rclone/rclone/actions?query=workflow%3Abuild) [![Build Status](https://github.com/rclone/rclone/workflows/build/badge.svg)](https://github.com/rclone/rclone/actions?query=workflow%3Abuild)
[![Go Report Card](https://goreportcard.com/badge/github.com/rclone/rclone)](https://goreportcard.com/report/github.com/rclone/rclone) [![Go Report Card](https://goreportcard.com/badge/github.com/rclone/rclone)](https://goreportcard.com/report/github.com/rclone/rclone)
[![GoDoc](https://godoc.org/github.com/rclone/rclone?status.svg)](https://godoc.org/github.com/rclone/rclone) [![GoDoc](https://godoc.org/github.com/rclone/rclone?status.svg)](https://godoc.org/github.com/rclone/rclone)
[![Docker Pulls](https://img.shields.io/docker/pulls/rclone/rclone)](https://hub.docker.com/r/rclone/rclone) [![Docker Pulls](https://img.shields.io/docker/pulls/rclone/rclone)](https://hub.docker.com/r/rclone/rclone)
# Rclone # Rclone
Rclone *("rsync for cloud storage")* is a command line program to sync files and directories to and from different cloud storage providers. Rclone *("rsync for cloud storage")* is a command-line program to sync files and directories to and from different cloud storage providers.
## Storage providers ## Storage providers
@@ -32,7 +32,6 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and
* Dropbox [:page_facing_up:](https://rclone.org/dropbox/) * Dropbox [:page_facing_up:](https://rclone.org/dropbox/)
* Enterprise File Fabric [:page_facing_up:](https://rclone.org/filefabric/) * Enterprise File Fabric [:page_facing_up:](https://rclone.org/filefabric/)
* FTP [:page_facing_up:](https://rclone.org/ftp/) * FTP [:page_facing_up:](https://rclone.org/ftp/)
* GetSky [:page_facing_up:](https://rclone.org/jottacloud/)
* Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/) * Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/)
* Google Drive [:page_facing_up:](https://rclone.org/drive/) * Google Drive [:page_facing_up:](https://rclone.org/drive/)
* Google Photos [:page_facing_up:](https://rclone.org/googlephotos/) * Google Photos [:page_facing_up:](https://rclone.org/googlephotos/)
@@ -60,6 +59,7 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and
* put.io [:page_facing_up:](https://rclone.org/putio/) * put.io [:page_facing_up:](https://rclone.org/putio/)
* QingStor [:page_facing_up:](https://rclone.org/qingstor/) * QingStor [:page_facing_up:](https://rclone.org/qingstor/)
* Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/) * Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
* RackCorp Object Storage [:page_facing_up:](https://rclone.org/s3/#RackCorp)
* Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway) * Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)
* Seafile [:page_facing_up:](https://rclone.org/seafile/) * Seafile [:page_facing_up:](https://rclone.org/seafile/)
* SeaweedFS [:page_facing_up:](https://rclone.org/s3/#seaweedfs) * SeaweedFS [:page_facing_up:](https://rclone.org/s3/#seaweedfs)
@@ -73,7 +73,7 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and
* Yandex Disk [:page_facing_up:](https://rclone.org/yandex/) * Yandex Disk [:page_facing_up:](https://rclone.org/yandex/)
* Zoho WorkDrive [:page_facing_up:](https://rclone.org/zoho/) * Zoho WorkDrive [:page_facing_up:](https://rclone.org/zoho/)
* The local filesystem [:page_facing_up:](https://rclone.org/local/) * The local filesystem [:page_facing_up:](https://rclone.org/local/)
Please see [the full list of all storage providers and their features](https://rclone.org/overview/) Please see [the full list of all storage providers and their features](https://rclone.org/overview/)
## Features ## Features

View File

@@ -34,13 +34,24 @@ This file describes how to make the various kinds of releases
* make startdev # make startstable for stable branch * make startdev # make startstable for stable branch
* # announce with forum post, twitter post, patreon post * # announce with forum post, twitter post, patreon post
## Update dependencies
Early in the next release cycle update the dependencies Early in the next release cycle update the dependencies
* Review any pinned packages in go.mod and remove if possible * Review any pinned packages in go.mod and remove if possible
* make update * make updatedirect
* git status * make
* git add new files
* git commit -a -v * git commit -a -v
* make update
* make
* roll back any updates which didn't compile
* git commit -a -v --amend
Note that `make update` updates all direct and indirect dependencies
and there can occasionally be forwards compatibility problems with
doing that so it may be necessary to roll back dependencies to the
version specified by `make updatedirect` in order to get rclone to
build.
## Making a point release ## Making a point release

View File

@@ -1 +1 @@
v1.56.0 v1.58.0

View File

@@ -20,7 +20,7 @@ func init() {
NewFs: NewFs, NewFs: NewFs,
Options: []fs.Option{{ Options: []fs.Option{{
Name: "remote", Name: "remote",
Help: "Remote or path to alias.\nCan be \"myremote:path/to/dir\", \"myremote:bucket\", \"myremote:\" or \"/local/path\".", Help: "Remote or path to alias.\n\nCan be \"myremote:path/to/dir\", \"myremote:bucket\", \"myremote:\" or \"/local/path\".",
Required: true, Required: true,
}}, }},
} }

View File

@@ -18,6 +18,7 @@ import (
_ "github.com/rclone/rclone/backend/ftp" _ "github.com/rclone/rclone/backend/ftp"
_ "github.com/rclone/rclone/backend/googlecloudstorage" _ "github.com/rclone/rclone/backend/googlecloudstorage"
_ "github.com/rclone/rclone/backend/googlephotos" _ "github.com/rclone/rclone/backend/googlephotos"
_ "github.com/rclone/rclone/backend/hasher"
_ "github.com/rclone/rclone/backend/hdfs" _ "github.com/rclone/rclone/backend/hdfs"
_ "github.com/rclone/rclone/backend/http" _ "github.com/rclone/rclone/backend/http"
_ "github.com/rclone/rclone/backend/hubic" _ "github.com/rclone/rclone/backend/hubic"
@@ -37,6 +38,7 @@ import (
_ "github.com/rclone/rclone/backend/seafile" _ "github.com/rclone/rclone/backend/seafile"
_ "github.com/rclone/rclone/backend/sftp" _ "github.com/rclone/rclone/backend/sftp"
_ "github.com/rclone/rclone/backend/sharefile" _ "github.com/rclone/rclone/backend/sharefile"
_ "github.com/rclone/rclone/backend/sia"
_ "github.com/rclone/rclone/backend/sugarsync" _ "github.com/rclone/rclone/backend/sugarsync"
_ "github.com/rclone/rclone/backend/swift" _ "github.com/rclone/rclone/backend/swift"
_ "github.com/rclone/rclone/backend/tardigrade" _ "github.com/rclone/rclone/backend/tardigrade"

View File

@@ -14,6 +14,7 @@ we ignore assets completely!
import ( import (
"context" "context"
"encoding/json" "encoding/json"
"errors"
"fmt" "fmt"
"io" "io"
"net/http" "net/http"
@@ -22,7 +23,6 @@ import (
"time" "time"
acd "github.com/ncw/go-acd" acd "github.com/ncw/go-acd"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config" "github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/config/configmap"
@@ -259,7 +259,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
} }
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(ctx, name, m, acdConfig, baseClient) oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(ctx, name, m, acdConfig, baseClient)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "failed to configure Amazon Drive") return nil, fmt.Errorf("failed to configure Amazon Drive: %w", err)
} }
c := acd.NewClient(oAuthClient) c := acd.NewClient(oAuthClient)
@@ -292,13 +292,13 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
return f.shouldRetry(ctx, resp, err) return f.shouldRetry(ctx, resp, err)
}) })
if err != nil { if err != nil {
return nil, errors.Wrap(err, "failed to get endpoints") return nil, fmt.Errorf("failed to get endpoints: %w", err)
} }
// Get rootID // Get rootID
rootInfo, err := f.getRootInfo(ctx) rootInfo, err := f.getRootInfo(ctx)
if err != nil || rootInfo.Id == nil { if err != nil || rootInfo.Id == nil {
return nil, errors.Wrap(err, "failed to get root") return nil, fmt.Errorf("failed to get root: %w", err)
} }
f.trueRootID = *rootInfo.Id f.trueRootID = *rootInfo.Id

View File

@@ -1,5 +1,6 @@
// Test AmazonCloudDrive filesystem interface // Test AmazonCloudDrive filesystem interface
//go:build acd
// +build acd // +build acd
package amazonclouddrive_test package amazonclouddrive_test

View File

@@ -1,6 +1,7 @@
// Package azureblob provides an interface to the Microsoft Azure blob object storage system // Package azureblob provides an interface to the Microsoft Azure blob object storage system
// +build !plan9,!solaris,!js,go1.14 //go:build !plan9 && !solaris && !js
// +build !plan9,!solaris,!js
package azureblob package azureblob
@@ -9,12 +10,14 @@ import (
"encoding/base64" "encoding/base64"
"encoding/hex" "encoding/hex"
"encoding/json" "encoding/json"
"errors"
"fmt" "fmt"
"io" "io"
"io/ioutil" "io/ioutil"
"net/http" "net/http"
"net/url" "net/url"
"path" "path"
"strconv"
"strings" "strings"
"sync" "sync"
"time" "time"
@@ -22,7 +25,6 @@ import (
"github.com/Azure/azure-pipeline-go/pipeline" "github.com/Azure/azure-pipeline-go/pipeline"
"github.com/Azure/azure-storage-blob-go/azblob" "github.com/Azure/azure-storage-blob-go/azblob"
"github.com/Azure/go-autorest/autorest/adal" "github.com/Azure/go-autorest/autorest/adal"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config" "github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/config/configmap"
@@ -73,30 +75,29 @@ func init() {
NewFs: NewFs, NewFs: NewFs,
Options: []fs.Option{{ Options: []fs.Option{{
Name: "account", Name: "account",
Help: "Storage Account Name (leave blank to use SAS URL or Emulator)", Help: "Storage Account Name.\n\nLeave blank to use SAS URL or Emulator.",
}, { }, {
Name: "service_principal_file", Name: "service_principal_file",
Help: `Path to file containing credentials for use with a service principal. Help: `Path to file containing credentials for use with a service principal.
Leave blank normally. Needed only if you want to use a service principal instead of interactive login. Leave blank normally. Needed only if you want to use a service principal instead of interactive login.
$ az sp create-for-rbac --name "<name>" \ $ az ad sp create-for-rbac --name "<name>" \
--role "Storage Blob Data Owner" \ --role "Storage Blob Data Owner" \
--scopes "/subscriptions/<subscription>/resourceGroups/<resource-group>/providers/Microsoft.Storage/storageAccounts/<storage-account>/blobServices/default/containers/<container>" \ --scopes "/subscriptions/<subscription>/resourceGroups/<resource-group>/providers/Microsoft.Storage/storageAccounts/<storage-account>/blobServices/default/containers/<container>" \
> azure-principal.json > azure-principal.json
See [Use Azure CLI to assign an Azure role for access to blob and queue data](https://docs.microsoft.com/en-us/azure/storage/common/storage-auth-aad-rbac-cli) See ["Create an Azure service principal"](https://docs.microsoft.com/en-us/cli/azure/create-an-azure-service-principal-azure-cli) and ["Assign an Azure role for access to blob data"](https://docs.microsoft.com/en-us/azure/storage/common/storage-auth-aad-rbac-cli) pages for more details.
for more details.
`, `,
}, { }, {
Name: "key", Name: "key",
Help: "Storage Account Key (leave blank to use SAS URL or Emulator)", Help: "Storage Account Key.\n\nLeave blank to use SAS URL or Emulator.",
}, { }, {
Name: "sas_url", Name: "sas_url",
Help: "SAS URL for container level access only\n(leave blank if using account/key or Emulator)", Help: "SAS URL for container level access only.\n\nLeave blank if using account/key or Emulator.",
}, { }, {
Name: "use_msi", Name: "use_msi",
Help: `Use a managed service identity to authenticate (only works in Azure) Help: `Use a managed service identity to authenticate (only works in Azure).
When true, use a [managed service identity](https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/) When true, use a [managed service identity](https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/)
to authenticate to Azure Storage instead of a SAS token or account key. to authenticate to Azure Storage instead of a SAS token or account key.
@@ -109,27 +110,27 @@ msi_client_id, or msi_mi_res_id parameters.`,
Default: false, Default: false,
}, { }, {
Name: "msi_object_id", Name: "msi_object_id",
Help: "Object ID of the user-assigned MSI to use, if any. Leave blank if msi_client_id or msi_mi_res_id specified.", Help: "Object ID of the user-assigned MSI to use, if any.\n\nLeave blank if msi_client_id or msi_mi_res_id specified.",
Advanced: true, Advanced: true,
}, { }, {
Name: "msi_client_id", Name: "msi_client_id",
Help: "Object ID of the user-assigned MSI to use, if any. Leave blank if msi_object_id or msi_mi_res_id specified.", Help: "Object ID of the user-assigned MSI to use, if any.\n\nLeave blank if msi_object_id or msi_mi_res_id specified.",
Advanced: true, Advanced: true,
}, { }, {
Name: "msi_mi_res_id", Name: "msi_mi_res_id",
Help: "Azure resource ID of the user-assigned MSI to use, if any. Leave blank if msi_client_id or msi_object_id specified.", Help: "Azure resource ID of the user-assigned MSI to use, if any.\n\nLeave blank if msi_client_id or msi_object_id specified.",
Advanced: true, Advanced: true,
}, { }, {
Name: "use_emulator", Name: "use_emulator",
Help: "Uses local storage emulator if provided as 'true' (leave blank if using real azure storage endpoint)", Help: "Uses local storage emulator if provided as 'true'.\n\nLeave blank if using real azure storage endpoint.",
Default: false, Default: false,
}, { }, {
Name: "endpoint", Name: "endpoint",
Help: "Endpoint for the service\nLeave blank normally.", Help: "Endpoint for the service.\n\nLeave blank normally.",
Advanced: true, Advanced: true,
}, { }, {
Name: "upload_cutoff", Name: "upload_cutoff",
Help: "Cutoff for switching to chunked upload (<= 256 MiB). (Deprecated)", Help: "Cutoff for switching to chunked upload (<= 256 MiB) (deprecated).",
Advanced: true, Advanced: true,
}, { }, {
Name: "chunk_size", Name: "chunk_size",
@@ -200,6 +201,7 @@ to start uploading.`,
Default: memoryPoolFlushTime, Default: memoryPoolFlushTime,
Advanced: true, Advanced: true,
Help: `How often internal memory buffer pools will be flushed. Help: `How often internal memory buffer pools will be flushed.
Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations. Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations.
This option controls how often unused buffers will be removed from the pool.`, This option controls how often unused buffers will be removed from the pool.`,
}, { }, {
@@ -219,12 +221,12 @@ This option controls how often unused buffers will be removed from the pool.`,
encoder.EncodeRightPeriod), encoder.EncodeRightPeriod),
}, { }, {
Name: "public_access", Name: "public_access",
Help: "Public access level of a container: blob, container.", Help: "Public access level of a container: blob or container.",
Default: string(azblob.PublicAccessNone), Default: string(azblob.PublicAccessNone),
Examples: []fs.OptionExample{ Examples: []fs.OptionExample{
{ {
Value: string(azblob.PublicAccessNone), Value: string(azblob.PublicAccessNone),
Help: "The container and its blobs can be accessed only with an authorized request. It's a default value", Help: "The container and its blobs can be accessed only with an authorized request.\nIt's a default value.",
}, { }, {
Value: string(azblob.PublicAccessBlob), Value: string(azblob.PublicAccessBlob),
Help: "Blob data within this container can be read via anonymous request.", Help: "Blob data within this container can be read via anonymous request.",
@@ -234,6 +236,11 @@ This option controls how often unused buffers will be removed from the pool.`,
}, },
}, },
Advanced: true, Advanced: true,
}, {
Name: "no_head_object",
Help: `If set, do not do HEAD before GET when getting objects.`,
Default: false,
Advanced: true,
}}, }},
}) })
} }
@@ -259,6 +266,7 @@ type Options struct {
MemoryPoolUseMmap bool `config:"memory_pool_use_mmap"` MemoryPoolUseMmap bool `config:"memory_pool_use_mmap"`
Enc encoder.MultiEncoder `config:"encoding"` Enc encoder.MultiEncoder `config:"encoding"`
PublicAccess string `config:"public_access"` PublicAccess string `config:"public_access"`
NoHeadObject bool `config:"no_head_object"`
} }
// Fs represents a remote azure server // Fs represents a remote azure server
@@ -406,10 +414,10 @@ func (f *Fs) shouldRetry(ctx context.Context, err error) (bool, error) {
func checkUploadChunkSize(cs fs.SizeSuffix) error { func checkUploadChunkSize(cs fs.SizeSuffix) error {
const minChunkSize = fs.SizeSuffixBase const minChunkSize = fs.SizeSuffixBase
if cs < minChunkSize { if cs < minChunkSize {
return errors.Errorf("%s is less than %s", cs, minChunkSize) return fmt.Errorf("%s is less than %s", cs, minChunkSize)
} }
if cs > maxChunkSize { if cs > maxChunkSize {
return errors.Errorf("%s is greater than %s", cs, maxChunkSize) return fmt.Errorf("%s is greater than %s", cs, maxChunkSize)
} }
return nil return nil
} }
@@ -451,11 +459,11 @@ const azureStorageEndpoint = "https://storage.azure.com/"
func newServicePrincipalTokenRefresher(ctx context.Context, credentialsData []byte) (azblob.TokenRefresher, error) { func newServicePrincipalTokenRefresher(ctx context.Context, credentialsData []byte) (azblob.TokenRefresher, error) {
var spCredentials servicePrincipalCredentials var spCredentials servicePrincipalCredentials
if err := json.Unmarshal(credentialsData, &spCredentials); err != nil { if err := json.Unmarshal(credentialsData, &spCredentials); err != nil {
return nil, errors.Wrap(err, "error parsing credentials from JSON file") return nil, fmt.Errorf("error parsing credentials from JSON file: %w", err)
} }
oauthConfig, err := adal.NewOAuthConfig(azureActiveDirectoryEndpoint, spCredentials.Tenant) oauthConfig, err := adal.NewOAuthConfig(azureActiveDirectoryEndpoint, spCredentials.Tenant)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "error creating oauth config") return nil, fmt.Errorf("error creating oauth config: %w", err)
} }
// Create service principal token for Azure Storage. // Create service principal token for Azure Storage.
@@ -465,7 +473,7 @@ func newServicePrincipalTokenRefresher(ctx context.Context, credentialsData []by
spCredentials.Password, spCredentials.Password,
azureStorageEndpoint) azureStorageEndpoint)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "error creating service principal token") return nil, fmt.Errorf("error creating service principal token: %w", err)
} }
// Wrap token inside a refresher closure. // Wrap token inside a refresher closure.
@@ -518,10 +526,10 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
err = checkUploadChunkSize(opt.ChunkSize) err = checkUploadChunkSize(opt.ChunkSize)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "azure: chunk size") return nil, fmt.Errorf("azure: chunk size: %w", err)
} }
if opt.ListChunkSize > maxListChunkSize { if opt.ListChunkSize > maxListChunkSize {
return nil, errors.Errorf("azure: blob list size can't be greater than %v - was %v", maxListChunkSize, opt.ListChunkSize) return nil, fmt.Errorf("azure: blob list size can't be greater than %v - was %v", maxListChunkSize, opt.ListChunkSize)
} }
if opt.Endpoint == "" { if opt.Endpoint == "" {
opt.Endpoint = storageDefaultBaseURL opt.Endpoint = storageDefaultBaseURL
@@ -530,12 +538,12 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
if opt.AccessTier == "" { if opt.AccessTier == "" {
opt.AccessTier = string(defaultAccessTier) opt.AccessTier = string(defaultAccessTier)
} else if !validateAccessTier(opt.AccessTier) { } else if !validateAccessTier(opt.AccessTier) {
return nil, errors.Errorf("Azure Blob: Supported access tiers are %s, %s and %s", return nil, fmt.Errorf("Azure Blob: Supported access tiers are %s, %s and %s",
string(azblob.AccessTierHot), string(azblob.AccessTierCool), string(azblob.AccessTierArchive)) string(azblob.AccessTierHot), string(azblob.AccessTierCool), string(azblob.AccessTierArchive))
} }
if !validatePublicAccess((opt.PublicAccess)) { if !validatePublicAccess((opt.PublicAccess)) {
return nil, errors.Errorf("Azure Blob: Supported public access level are %s and %s", return nil, fmt.Errorf("Azure Blob: Supported public access level are %s and %s",
string(azblob.PublicAccessBlob), string(azblob.PublicAccessContainer)) string(azblob.PublicAccessBlob), string(azblob.PublicAccessContainer))
} }
@@ -577,11 +585,11 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
case opt.UseEmulator: case opt.UseEmulator:
credential, err := azblob.NewSharedKeyCredential(emulatorAccount, emulatorAccountKey) credential, err := azblob.NewSharedKeyCredential(emulatorAccount, emulatorAccountKey)
if err != nil { if err != nil {
return nil, errors.Wrapf(err, "Failed to parse credentials") return nil, fmt.Errorf("Failed to parse credentials: %w", err)
} }
u, err = url.Parse(emulatorBlobEndpoint) u, err = url.Parse(emulatorBlobEndpoint)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "failed to make azure storage url from account and endpoint") return nil, fmt.Errorf("failed to make azure storage url from account and endpoint: %w", err)
} }
pipeline := f.newPipeline(credential, azblob.PipelineOptions{Retry: azblob.RetryOptions{TryTimeout: maxTryTimeout}}) pipeline := f.newPipeline(credential, azblob.PipelineOptions{Retry: azblob.RetryOptions{TryTimeout: maxTryTimeout}})
serviceURL = azblob.NewServiceURL(*u, pipeline) serviceURL = azblob.NewServiceURL(*u, pipeline)
@@ -623,12 +631,12 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
}) })
if err != nil { if err != nil {
return nil, errors.Wrapf(err, "Failed to acquire MSI token") return nil, fmt.Errorf("Failed to acquire MSI token: %w", err)
} }
u, err = url.Parse(fmt.Sprintf("https://%s.%s", opt.Account, opt.Endpoint)) u, err = url.Parse(fmt.Sprintf("https://%s.%s", opt.Account, opt.Endpoint))
if err != nil { if err != nil {
return nil, errors.Wrap(err, "failed to make azure storage url from account and endpoint") return nil, fmt.Errorf("failed to make azure storage url from account and endpoint: %w", err)
} }
credential := azblob.NewTokenCredential(token.AccessToken, func(credential azblob.TokenCredential) time.Duration { credential := azblob.NewTokenCredential(token.AccessToken, func(credential azblob.TokenCredential) time.Duration {
fs.Debugf(f, "Token refresher called.") fs.Debugf(f, "Token refresher called.")
@@ -658,19 +666,19 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
case opt.Account != "" && opt.Key != "": case opt.Account != "" && opt.Key != "":
credential, err := azblob.NewSharedKeyCredential(opt.Account, opt.Key) credential, err := azblob.NewSharedKeyCredential(opt.Account, opt.Key)
if err != nil { if err != nil {
return nil, errors.Wrapf(err, "Failed to parse credentials") return nil, fmt.Errorf("Failed to parse credentials: %w", err)
} }
u, err = url.Parse(fmt.Sprintf("https://%s.%s", opt.Account, opt.Endpoint)) u, err = url.Parse(fmt.Sprintf("https://%s.%s", opt.Account, opt.Endpoint))
if err != nil { if err != nil {
return nil, errors.Wrap(err, "failed to make azure storage url from account and endpoint") return nil, fmt.Errorf("failed to make azure storage url from account and endpoint: %w", err)
} }
pipeline := f.newPipeline(credential, azblob.PipelineOptions{Retry: azblob.RetryOptions{TryTimeout: maxTryTimeout}}) pipeline := f.newPipeline(credential, azblob.PipelineOptions{Retry: azblob.RetryOptions{TryTimeout: maxTryTimeout}})
serviceURL = azblob.NewServiceURL(*u, pipeline) serviceURL = azblob.NewServiceURL(*u, pipeline)
case opt.SASURL != "": case opt.SASURL != "":
u, err = url.Parse(opt.SASURL) u, err = url.Parse(opt.SASURL)
if err != nil { if err != nil {
return nil, errors.Wrapf(err, "failed to parse SAS URL") return nil, fmt.Errorf("failed to parse SAS URL: %w", err)
} }
// use anonymous credentials in case of sas url // use anonymous credentials in case of sas url
pipeline := f.newPipeline(azblob.NewAnonymousCredential(), azblob.PipelineOptions{Retry: azblob.RetryOptions{TryTimeout: maxTryTimeout}}) pipeline := f.newPipeline(azblob.NewAnonymousCredential(), azblob.PipelineOptions{Retry: azblob.RetryOptions{TryTimeout: maxTryTimeout}})
@@ -690,17 +698,17 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
// Create a standard URL. // Create a standard URL.
u, err = url.Parse(fmt.Sprintf("https://%s.%s", opt.Account, opt.Endpoint)) u, err = url.Parse(fmt.Sprintf("https://%s.%s", opt.Account, opt.Endpoint))
if err != nil { if err != nil {
return nil, errors.Wrap(err, "failed to make azure storage url from account and endpoint") return nil, fmt.Errorf("failed to make azure storage url from account and endpoint: %w", err)
} }
// Try loading service principal credentials from file. // Try loading service principal credentials from file.
loadedCreds, err := ioutil.ReadFile(env.ShellExpand(opt.ServicePrincipalFile)) loadedCreds, err := ioutil.ReadFile(env.ShellExpand(opt.ServicePrincipalFile))
if err != nil { if err != nil {
return nil, errors.Wrap(err, "error opening service principal credentials file") return nil, fmt.Errorf("error opening service principal credentials file: %w", err)
} }
// Create a token refresher from service principal credentials. // Create a token refresher from service principal credentials.
tokenRefresher, err := newServicePrincipalTokenRefresher(ctx, loadedCreds) tokenRefresher, err := newServicePrincipalTokenRefresher(ctx, loadedCreds)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "failed to create a service principal token") return nil, fmt.Errorf("failed to create a service principal token: %w", err)
} }
options := azblob.PipelineOptions{Retry: azblob.RetryOptions{TryTimeout: maxTryTimeout}} options := azblob.PipelineOptions{Retry: azblob.RetryOptions{TryTimeout: maxTryTimeout}}
pipe := f.newPipeline(azblob.NewTokenCredential("", tokenRefresher), options) pipe := f.newPipeline(azblob.NewTokenCredential("", tokenRefresher), options)
@@ -757,7 +765,7 @@ func (f *Fs) newObjectWithInfo(remote string, info *azblob.BlobItemInternal) (fs
if err != nil { if err != nil {
return nil, err return nil, err
} }
} else { } else if !o.fs.opt.NoHeadObject {
err := o.readMetaData() // reads info and headers, returning an error err := o.readMetaData() // reads info and headers, returning an error
if err != nil { if err != nil {
return nil, err return nil, err
@@ -1316,7 +1324,7 @@ func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
} }
data, err := base64.StdEncoding.DecodeString(o.md5) data, err := base64.StdEncoding.DecodeString(o.md5)
if err != nil { if err != nil {
return "", errors.Wrapf(err, "Failed to decode Content-MD5: %q", o.md5) return "", fmt.Errorf("Failed to decode Content-MD5: %q: %w", o.md5, err)
} }
return hex.EncodeToString(data), nil return hex.EncodeToString(data), nil
} }
@@ -1367,6 +1375,39 @@ func (o *Object) decodeMetaDataFromPropertiesResponse(info *azblob.BlobGetProper
return nil return nil
} }
func (o *Object) decodeMetaDataFromDownloadResponse(info *azblob.DownloadResponse) (err error) {
metadata := info.NewMetadata()
size := info.ContentLength()
if isDirectoryMarker(size, metadata, o.remote) {
return fs.ErrorNotAFile
}
// NOTE - Client library always returns MD5 as base64 decoded string, Object needs to maintain
// this as base64 encoded string.
o.md5 = base64.StdEncoding.EncodeToString(info.ContentMD5())
o.mimeType = info.ContentType()
o.size = size
o.modTime = info.LastModified()
o.accessTier = o.AccessTier()
o.setMetadata(metadata)
// If it was a Range request, the size is wrong, so correct it
if contentRange := info.ContentRange(); contentRange != "" {
slash := strings.IndexRune(contentRange, '/')
if slash >= 0 {
i, err := strconv.ParseInt(contentRange[slash+1:], 10, 64)
if err == nil {
o.size = i
} else {
fs.Debugf(o, "Failed to find parse integer from in %q: %v", contentRange, err)
}
} else {
fs.Debugf(o, "Failed to find length in %q", contentRange)
}
}
return nil
}
func (o *Object) decodeMetaDataFromBlob(info *azblob.BlobItemInternal) (err error) { func (o *Object) decodeMetaDataFromBlob(info *azblob.BlobItemInternal) (err error) {
metadata := info.Metadata metadata := info.Metadata
size := *info.Properties.ContentLength size := *info.Properties.ContentLength
@@ -1469,7 +1510,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
var offset int64 var offset int64
var count int64 var count int64
if o.AccessTier() == azblob.AccessTierArchive { if o.AccessTier() == azblob.AccessTierArchive {
return nil, errors.Errorf("Blob in archive tier, you need to set tier to hot or cool first") return nil, fmt.Errorf("Blob in archive tier, you need to set tier to hot or cool first")
} }
fs.FixRangeOption(options, o.size) fs.FixRangeOption(options, o.size)
for _, option := range options { for _, option := range options {
@@ -1495,7 +1536,11 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
return o.fs.shouldRetry(ctx, err) return o.fs.shouldRetry(ctx, err)
}) })
if err != nil { if err != nil {
return nil, errors.Wrap(err, "failed to open for download") return nil, fmt.Errorf("failed to open for download: %w", err)
}
err = o.decodeMetaDataFromDownloadResponse(downloadResponse)
if err != nil {
return nil, fmt.Errorf("failed to decode metadata for download: %w", err)
} }
in = downloadResponse.Body(azblob.RetryReaderOptions{}) in = downloadResponse.Body(azblob.RetryReaderOptions{})
return in, nil return in, nil
@@ -1585,7 +1630,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
fs.Debugf(o, "deleting archive tier blob before updating") fs.Debugf(o, "deleting archive tier blob before updating")
err = o.Remove(ctx) err = o.Remove(ctx)
if err != nil { if err != nil {
return errors.Wrap(err, "failed to delete archive blob before updating") return fmt.Errorf("failed to delete archive blob before updating: %w", err)
} }
} else { } else {
return errCantUpdateArchiveTierBlobs return errCantUpdateArchiveTierBlobs
@@ -1678,7 +1723,7 @@ func (o *Object) AccessTier() azblob.AccessTierType {
// SetTier performs changing object tier // SetTier performs changing object tier
func (o *Object) SetTier(tier string) error { func (o *Object) SetTier(tier string) error {
if !validateAccessTier(tier) { if !validateAccessTier(tier) {
return errors.Errorf("Tier %s not supported by Azure Blob Storage", tier) return fmt.Errorf("Tier %s not supported by Azure Blob Storage", tier)
} }
// Check if current tier already matches with desired tier // Check if current tier already matches with desired tier
@@ -1694,7 +1739,7 @@ func (o *Object) SetTier(tier string) error {
}) })
if err != nil { if err != nil {
return errors.Wrap(err, "Failed to set Blob Tier") return fmt.Errorf("Failed to set Blob Tier: %w", err)
} }
// Set access tier on local object also, this typically // Set access tier on local object also, this typically

View File

@@ -1,4 +1,5 @@
// +build !plan9,!solaris,!js,go1.14 //go:build !plan9 && !solaris && !js
// +build !plan9,!solaris,!js
package azureblob package azureblob

View File

@@ -1,6 +1,7 @@
// Test AzureBlob filesystem interface // Test AzureBlob filesystem interface
// +build !plan9,!solaris,!js,go1.14 //go:build !plan9 && !solaris && !js
// +build !plan9,!solaris,!js
package azureblob package azureblob

View File

@@ -1,6 +1,7 @@
// Build for azureblob for unsupported platforms to stop go complaining // Build for azureblob for unsupported platforms to stop go complaining
// about "no buildable Go source files " // about "no buildable Go source files "
// +build plan9 solaris js !go1.14 //go:build plan9 || solaris || js
// +build plan9 solaris js
package azureblob package azureblob

View File

@@ -1,4 +1,5 @@
// +build !plan9,!solaris,!js,go1.14 //go:build !plan9 && !solaris && !js
// +build !plan9,!solaris,!js
package azureblob package azureblob
@@ -12,7 +13,6 @@ import (
"net/http" "net/http"
"github.com/Azure/go-autorest/autorest/adal" "github.com/Azure/go-autorest/autorest/adal"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/fshttp" "github.com/rclone/rclone/fs/fshttp"
) )
@@ -94,7 +94,7 @@ func GetMSIToken(ctx context.Context, identity *userMSI) (adal.Token, error) {
httpClient := fshttp.NewClient(ctx) httpClient := fshttp.NewClient(ctx)
resp, err := httpClient.Do(req) resp, err := httpClient.Do(req)
if err != nil { if err != nil {
return result, errors.Wrap(err, "MSI is not enabled on this VM") return result, fmt.Errorf("MSI is not enabled on this VM: %w", err)
} }
defer func() { // resp and Body should not be nil defer func() { // resp and Body should not be nil
_, err = io.Copy(ioutil.Discard, resp.Body) _, err = io.Copy(ioutil.Discard, resp.Body)
@@ -119,7 +119,7 @@ func GetMSIToken(ctx context.Context, identity *userMSI) (adal.Token, error) {
b, err := ioutil.ReadAll(resp.Body) b, err := ioutil.ReadAll(resp.Body)
if err != nil { if err != nil {
return result, errors.Wrap(err, "Couldn't read IMDS response") return result, fmt.Errorf("Couldn't read IMDS response: %w", err)
} }
// Remove BOM, if any. azcopy does this so I'm following along. // Remove BOM, if any. azcopy does this so I'm following along.
b = bytes.TrimPrefix(b, []byte("\xef\xbb\xbf")) b = bytes.TrimPrefix(b, []byte("\xef\xbb\xbf"))
@@ -130,7 +130,7 @@ func GetMSIToken(ctx context.Context, identity *userMSI) (adal.Token, error) {
// storage API call. // storage API call.
err = json.Unmarshal(b, &result) err = json.Unmarshal(b, &result)
if err != nil { if err != nil {
return result, errors.Wrap(err, "Couldn't unmarshal IMDS response") return result, fmt.Errorf("Couldn't unmarshal IMDS response: %w", err)
} }
return result, nil return result, nil

View File

@@ -1,4 +1,5 @@
// +build !plan9,!solaris,!js,go1.14 //go:build !plan9 && !solaris && !js
// +build !plan9,!solaris,!js
package azureblob package azureblob

View File

@@ -9,6 +9,7 @@ import (
"bytes" "bytes"
"context" "context"
"crypto/sha1" "crypto/sha1"
"errors"
"fmt" "fmt"
gohash "hash" gohash "hash"
"io" "io"
@@ -19,7 +20,6 @@ import (
"sync" "sync"
"time" "time"
"github.com/pkg/errors"
"github.com/rclone/rclone/backend/b2/api" "github.com/rclone/rclone/backend/b2/api"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting" "github.com/rclone/rclone/fs/accounting"
@@ -75,15 +75,15 @@ func init() {
NewFs: NewFs, NewFs: NewFs,
Options: []fs.Option{{ Options: []fs.Option{{
Name: "account", Name: "account",
Help: "Account ID or Application Key ID", Help: "Account ID or Application Key ID.",
Required: true, Required: true,
}, { }, {
Name: "key", Name: "key",
Help: "Application Key", Help: "Application Key.",
Required: true, Required: true,
}, { }, {
Name: "endpoint", Name: "endpoint",
Help: "Endpoint for the service.\nLeave blank normally.", Help: "Endpoint for the service.\n\nLeave blank normally.",
Advanced: true, Advanced: true,
}, { }, {
Name: "test_mode", Name: "test_mode",
@@ -103,7 +103,7 @@ in the [b2 integrations checklist](https://www.backblaze.com/b2/docs/integration
Advanced: true, Advanced: true,
}, { }, {
Name: "versions", Name: "versions",
Help: "Include old versions in directory listings.\nNote that when using this no file write operations are permitted,\nso you can't upload files or delete them.", Help: "Include old versions in directory listings.\n\nNote that when using this no file write operations are permitted,\nso you can't upload files or delete them.",
Default: false, Default: false,
Advanced: true, Advanced: true,
}, { }, {
@@ -121,7 +121,7 @@ This value should be set no larger than 4.657 GiB (== 5 GB).`,
Advanced: true, Advanced: true,
}, { }, {
Name: "copy_cutoff", Name: "copy_cutoff",
Help: `Cutoff for switching to multipart copy Help: `Cutoff for switching to multipart copy.
Any files larger than this that need to be server-side copied will be Any files larger than this that need to be server-side copied will be
copied in chunks of this size. copied in chunks of this size.
@@ -131,17 +131,19 @@ The minimum is 0 and the maximum is 4.6 GiB.`,
Advanced: true, Advanced: true,
}, { }, {
Name: "chunk_size", Name: "chunk_size",
Help: `Upload chunk size. Must fit in memory. Help: `Upload chunk size.
When uploading large files, chunk the file into this size. Note that When uploading large files, chunk the file into this size.
these chunks are buffered in memory and there might a maximum of
"--transfers" chunks in progress at once. 5,000,000 Bytes is the Must fit in memory. These chunks are buffered in memory and there
minimum size.`, might a maximum of "--transfers" chunks in progress at once.
5,000,000 Bytes is the minimum size.`,
Default: defaultChunkSize, Default: defaultChunkSize,
Advanced: true, Advanced: true,
}, { }, {
Name: "disable_checksum", Name: "disable_checksum",
Help: `Disable checksums for large (> upload cutoff) files Help: `Disable checksums for large (> upload cutoff) files.
Normally rclone will calculate the SHA1 checksum of the input before Normally rclone will calculate the SHA1 checksum of the input before
uploading it so it can add it to metadata on the object. This is great uploading it so it can add it to metadata on the object. This is great
@@ -364,7 +366,7 @@ func errorHandler(resp *http.Response) error {
func checkUploadChunkSize(cs fs.SizeSuffix) error { func checkUploadChunkSize(cs fs.SizeSuffix) error {
if cs < minChunkSize { if cs < minChunkSize {
return errors.Errorf("%s is less than %s", cs, minChunkSize) return fmt.Errorf("%s is less than %s", cs, minChunkSize)
} }
return nil return nil
} }
@@ -379,7 +381,7 @@ func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error)
func checkUploadCutoff(opt *Options, cs fs.SizeSuffix) error { func checkUploadCutoff(opt *Options, cs fs.SizeSuffix) error {
if cs < opt.ChunkSize { if cs < opt.ChunkSize {
return errors.Errorf("%v is less than chunk size %v", cs, opt.ChunkSize) return fmt.Errorf("%v is less than chunk size %v", cs, opt.ChunkSize)
} }
return nil return nil
} }
@@ -412,11 +414,11 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
} }
err = checkUploadCutoff(opt, opt.UploadCutoff) err = checkUploadCutoff(opt, opt.UploadCutoff)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "b2: upload cutoff") return nil, fmt.Errorf("b2: upload cutoff: %w", err)
} }
err = checkUploadChunkSize(opt.ChunkSize) err = checkUploadChunkSize(opt.ChunkSize)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "b2: chunk size") return nil, fmt.Errorf("b2: chunk size: %w", err)
} }
if opt.Account == "" { if opt.Account == "" {
return nil, errors.New("account not found") return nil, errors.New("account not found")
@@ -461,7 +463,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
} }
err = f.authorizeAccount(ctx) err = f.authorizeAccount(ctx)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "failed to authorize account") return nil, fmt.Errorf("failed to authorize account: %w", err)
} }
// If this is a key limited to a single bucket, it must exist already // If this is a key limited to a single bucket, it must exist already
if f.rootBucket != "" && f.info.Allowed.BucketID != "" { if f.rootBucket != "" && f.info.Allowed.BucketID != "" {
@@ -470,7 +472,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
return nil, errors.New("bucket that application key is restricted to no longer exists") return nil, errors.New("bucket that application key is restricted to no longer exists")
} }
if allowedBucket != f.rootBucket { if allowedBucket != f.rootBucket {
return nil, errors.Errorf("you must use bucket %q with this application key", allowedBucket) return nil, fmt.Errorf("you must use bucket %q with this application key", allowedBucket)
} }
f.cache.MarkOK(f.rootBucket) f.cache.MarkOK(f.rootBucket)
f.setBucketID(f.rootBucket, f.info.Allowed.BucketID) f.setBucketID(f.rootBucket, f.info.Allowed.BucketID)
@@ -510,7 +512,7 @@ func (f *Fs) authorizeAccount(ctx context.Context) error {
return f.shouldRetryNoReauth(ctx, resp, err) return f.shouldRetryNoReauth(ctx, resp, err)
}) })
if err != nil { if err != nil {
return errors.Wrap(err, "failed to authenticate") return fmt.Errorf("failed to authenticate: %w", err)
} }
f.srv.SetRoot(f.info.APIURL+"/b2api/v1").SetHeader("Authorization", f.info.AuthorizationToken) f.srv.SetRoot(f.info.APIURL+"/b2api/v1").SetHeader("Authorization", f.info.AuthorizationToken)
return nil return nil
@@ -556,7 +558,7 @@ func (f *Fs) getUploadURL(ctx context.Context, bucket string) (upload *api.GetUp
return f.shouldRetry(ctx, resp, err) return f.shouldRetry(ctx, resp, err)
}) })
if err != nil { if err != nil {
return nil, errors.Wrap(err, "failed to get upload URL") return nil, fmt.Errorf("failed to get upload URL: %w", err)
} }
return upload, nil return upload, nil
} }
@@ -1046,7 +1048,7 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) error {
} }
} }
} }
return errors.Wrap(err, "failed to create bucket") return fmt.Errorf("failed to create bucket: %w", err)
} }
f.setBucketID(bucket, response.ID) f.setBucketID(bucket, response.ID)
f.setBucketType(bucket, response.Type) f.setBucketType(bucket, response.Type)
@@ -1081,7 +1083,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
return f.shouldRetry(ctx, resp, err) return f.shouldRetry(ctx, resp, err)
}) })
if err != nil { if err != nil {
return errors.Wrap(err, "failed to delete bucket") return fmt.Errorf("failed to delete bucket: %w", err)
} }
f.clearBucketID(bucket) f.clearBucketID(bucket)
f.clearBucketType(bucket) f.clearBucketType(bucket)
@@ -1122,7 +1124,7 @@ func (f *Fs) hide(ctx context.Context, bucket, bucketPath string) error {
return nil return nil
} }
} }
return errors.Wrapf(err, "failed to hide %q", bucketPath) return fmt.Errorf("failed to hide %q: %w", bucketPath, err)
} }
return nil return nil
} }
@@ -1143,7 +1145,7 @@ func (f *Fs) deleteByID(ctx context.Context, ID, Name string) error {
return f.shouldRetry(ctx, resp, err) return f.shouldRetry(ctx, resp, err)
}) })
if err != nil { if err != nil {
return errors.Wrapf(err, "failed to delete %q", Name) return fmt.Errorf("failed to delete %q: %w", Name, err)
} }
return nil return nil
} }
@@ -1362,7 +1364,7 @@ func (f *Fs) getDownloadAuthorization(ctx context.Context, bucket, remote string
return f.shouldRetry(ctx, resp, err) return f.shouldRetry(ctx, resp, err)
}) })
if err != nil { if err != nil {
return "", errors.Wrap(err, "failed to get download authorization") return "", fmt.Errorf("failed to get download authorization: %w", err)
} }
return response.AuthorizationToken, nil return response.AuthorizationToken, nil
} }
@@ -1667,14 +1669,14 @@ func (file *openFile) Close() (err error) {
// Check to see we read the correct number of bytes // Check to see we read the correct number of bytes
if file.o.Size() != file.bytes { if file.o.Size() != file.bytes {
return errors.Errorf("object corrupted on transfer - length mismatch (want %d got %d)", file.o.Size(), file.bytes) return fmt.Errorf("object corrupted on transfer - length mismatch (want %d got %d)", file.o.Size(), file.bytes)
} }
// Check the SHA1 // Check the SHA1
receivedSHA1 := file.o.sha1 receivedSHA1 := file.o.sha1
calculatedSHA1 := fmt.Sprintf("%x", file.hash.Sum(nil)) calculatedSHA1 := fmt.Sprintf("%x", file.hash.Sum(nil))
if receivedSHA1 != "" && receivedSHA1 != calculatedSHA1 { if receivedSHA1 != "" && receivedSHA1 != calculatedSHA1 {
return errors.Errorf("object corrupted on transfer - SHA1 mismatch (want %q got %q)", receivedSHA1, calculatedSHA1) return fmt.Errorf("object corrupted on transfer - SHA1 mismatch (want %q got %q)", receivedSHA1, calculatedSHA1)
} }
return nil return nil
@@ -1714,7 +1716,7 @@ func (o *Object) getOrHead(ctx context.Context, method string, options []fs.Open
if resp != nil && (resp.StatusCode == http.StatusNotFound || resp.StatusCode == http.StatusBadRequest) { if resp != nil && (resp.StatusCode == http.StatusNotFound || resp.StatusCode == http.StatusBadRequest) {
return nil, nil, fs.ErrorObjectNotFound return nil, nil, fs.ErrorObjectNotFound
} }
return nil, nil, errors.Wrapf(err, "failed to %s for download", method) return nil, nil, fmt.Errorf("failed to %s for download: %w", method, err)
} }
// NB resp may be Open here - don't return err != nil without closing // NB resp may be Open here - don't return err != nil without closing

View File

@@ -15,7 +15,6 @@ import (
"strings" "strings"
"sync" "sync"
"github.com/pkg/errors"
"github.com/rclone/rclone/backend/b2/api" "github.com/rclone/rclone/backend/b2/api"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting" "github.com/rclone/rclone/fs/accounting"
@@ -102,7 +101,7 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
parts++ parts++
} }
if parts > maxParts { if parts > maxParts {
return nil, errors.Errorf("%q too big (%d bytes) makes too many parts %d > %d - increase --b2-chunk-size", remote, size, parts, maxParts) return nil, fmt.Errorf("%q too big (%d bytes) makes too many parts %d > %d - increase --b2-chunk-size", remote, size, parts, maxParts)
} }
sha1SliceSize = parts sha1SliceSize = parts
} }
@@ -185,7 +184,7 @@ func (up *largeUpload) getUploadURL(ctx context.Context) (upload *api.GetUploadP
return up.f.shouldRetry(ctx, resp, err) return up.f.shouldRetry(ctx, resp, err)
}) })
if err != nil { if err != nil {
return nil, errors.Wrap(err, "failed to get upload URL") return nil, fmt.Errorf("failed to get upload URL: %w", err)
} }
} else { } else {
upload, up.uploads = up.uploads[0], up.uploads[1:] upload, up.uploads = up.uploads[0], up.uploads[1:]
@@ -406,7 +405,7 @@ func (up *largeUpload) Stream(ctx context.Context, initialUploadBlock []byte) (e
up.size += int64(n) up.size += int64(n)
if part > maxParts { if part > maxParts {
up.f.putBuf(buf, false) up.f.putBuf(buf, false)
return errors.Errorf("%q too big (%d bytes so far) makes too many parts %d > %d - increase --b2-chunk-size", up.o, up.size, up.parts, maxParts) return fmt.Errorf("%q too big (%d bytes so far) makes too many parts %d > %d - increase --b2-chunk-size", up.o, up.size, up.parts, maxParts)
} }
part := part // for the closure part := part // for the closure

View File

@@ -61,7 +61,7 @@ func (e *Error) Error() string {
var _ error = (*Error)(nil) var _ error = (*Error)(nil)
// ItemFields are the fields needed for FileInfo // ItemFields are the fields needed for FileInfo
var ItemFields = "type,id,sequence_id,etag,sha1,name,size,created_at,modified_at,content_created_at,content_modified_at,item_status,shared_link" var ItemFields = "type,id,sequence_id,etag,sha1,name,size,created_at,modified_at,content_created_at,content_modified_at,item_status,shared_link,owned_by"
// Types of things in Item // Types of things in Item
const ( const (
@@ -90,6 +90,12 @@ type Item struct {
URL string `json:"url,omitempty"` URL string `json:"url,omitempty"`
Access string `json:"access,omitempty"` Access string `json:"access,omitempty"`
} `json:"shared_link"` } `json:"shared_link"`
OwnedBy struct {
Type string `json:"type"`
ID string `json:"id"`
Name string `json:"name"`
Login string `json:"login"`
} `json:"owned_by"`
} }
// ModTime returns the modification time of the item // ModTime returns the modification time of the item
@@ -103,10 +109,11 @@ func (i *Item) ModTime() (t time.Time) {
// FolderItems is returned from the GetFolderItems call // FolderItems is returned from the GetFolderItems call
type FolderItems struct { type FolderItems struct {
TotalCount int `json:"total_count"` TotalCount int `json:"total_count"`
Entries []Item `json:"entries"` Entries []Item `json:"entries"`
Offset int `json:"offset"` Offset int `json:"offset"`
Limit int `json:"limit"` Limit int `json:"limit"`
NextMarker *string `json:"next_marker,omitempty"`
Order []struct { Order []struct {
By string `json:"by"` By string `json:"by"`
Direction string `json:"direction"` Direction string `json:"direction"`

View File

@@ -14,6 +14,7 @@ import (
"crypto/rsa" "crypto/rsa"
"encoding/json" "encoding/json"
"encoding/pem" "encoding/pem"
"errors"
"fmt" "fmt"
"io" "io"
"io/ioutil" "io/ioutil"
@@ -22,15 +23,10 @@ import (
"path" "path"
"strconv" "strconv"
"strings" "strings"
"sync"
"sync/atomic"
"time" "time"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/env"
"github.com/rclone/rclone/lib/jwtutil"
"github.com/youmark/pkcs8"
"github.com/pkg/errors"
"github.com/rclone/rclone/backend/box/api" "github.com/rclone/rclone/backend/box/api"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config" "github.com/rclone/rclone/fs/config"
@@ -41,9 +37,13 @@ import (
"github.com/rclone/rclone/fs/fshttp" "github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/dircache" "github.com/rclone/rclone/lib/dircache"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/env"
"github.com/rclone/rclone/lib/jwtutil"
"github.com/rclone/rclone/lib/oauthutil" "github.com/rclone/rclone/lib/oauthutil"
"github.com/rclone/rclone/lib/pacer" "github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/rest" "github.com/rclone/rclone/lib/rest"
"github.com/youmark/pkcs8"
"golang.org/x/oauth2" "golang.org/x/oauth2"
"golang.org/x/oauth2/jws" "golang.org/x/oauth2/jws"
) )
@@ -56,7 +56,6 @@ const (
decayConstant = 2 // bigger for slower decay, exponential decayConstant = 2 // bigger for slower decay, exponential
rootURL = "https://api.box.com/2.0" rootURL = "https://api.box.com/2.0"
uploadURL = "https://upload.box.com/api/2.0" uploadURL = "https://upload.box.com/api/2.0"
listChunks = 1000 // chunk size to read directory listings
minUploadCutoff = 50000000 // upload cutoff can be no lower than this minUploadCutoff = 50000000 // upload cutoff can be no lower than this
defaultUploadCutoff = 50 * 1024 * 1024 defaultUploadCutoff = 50 * 1024 * 1024
tokenURL = "https://api.box.com/oauth2/token" tokenURL = "https://api.box.com/oauth2/token"
@@ -92,7 +91,7 @@ func init() {
if ok && boxSubTypeOk && jsonFile != "" && boxSubType != "" { if ok && boxSubTypeOk && jsonFile != "" && boxSubType != "" {
err = refreshJWTToken(ctx, jsonFile, boxSubType, name, m) err = refreshJWTToken(ctx, jsonFile, boxSubType, name, m)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "failed to configure token with jwt authentication") return nil, fmt.Errorf("failed to configure token with jwt authentication: %w", err)
} }
// Else, if not using an access token, use oauth2 // Else, if not using an access token, use oauth2
} else if boxAccessToken == "" || !boxAccessTokenOk { } else if boxAccessToken == "" || !boxAccessTokenOk {
@@ -109,19 +108,19 @@ func init() {
Advanced: true, Advanced: true,
}, { }, {
Name: "box_config_file", Name: "box_config_file",
Help: "Box App config.json location\nLeave blank normally." + env.ShellExpandHelp, Help: "Box App config.json location\n\nLeave blank normally." + env.ShellExpandHelp,
}, { }, {
Name: "access_token", Name: "access_token",
Help: "Box App Primary Access Token\nLeave blank normally.", Help: "Box App Primary Access Token\n\nLeave blank normally.",
}, { }, {
Name: "box_sub_type", Name: "box_sub_type",
Default: "user", Default: "user",
Examples: []fs.OptionExample{{ Examples: []fs.OptionExample{{
Value: "user", Value: "user",
Help: "Rclone should act on behalf of a user", Help: "Rclone should act on behalf of a user.",
}, { }, {
Value: "enterprise", Value: "enterprise",
Help: "Rclone should act on behalf of a service account", Help: "Rclone should act on behalf of a service account.",
}}, }},
}, { }, {
Name: "upload_cutoff", Name: "upload_cutoff",
@@ -133,6 +132,16 @@ func init() {
Help: "Max number of times to try committing a multipart file.", Help: "Max number of times to try committing a multipart file.",
Default: 100, Default: 100,
Advanced: true, Advanced: true,
}, {
Name: "list_chunk",
Default: 1000,
Help: "Size of listing chunk 1-1000.",
Advanced: true,
}, {
Name: "owned_by",
Default: "",
Help: "Only show items owned by the login (email address) passed in.",
Advanced: true,
}, { }, {
Name: config.ConfigEncoding, Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp, Help: config.ConfigEncodingHelp,
@@ -156,15 +165,15 @@ func refreshJWTToken(ctx context.Context, jsonFile string, boxSubType string, na
jsonFile = env.ShellExpand(jsonFile) jsonFile = env.ShellExpand(jsonFile)
boxConfig, err := getBoxConfig(jsonFile) boxConfig, err := getBoxConfig(jsonFile)
if err != nil { if err != nil {
return errors.Wrap(err, "get box config") return fmt.Errorf("get box config: %w", err)
} }
privateKey, err := getDecryptedPrivateKey(boxConfig) privateKey, err := getDecryptedPrivateKey(boxConfig)
if err != nil { if err != nil {
return errors.Wrap(err, "get decrypted private key") return fmt.Errorf("get decrypted private key: %w", err)
} }
claims, err := getClaims(boxConfig, boxSubType) claims, err := getClaims(boxConfig, boxSubType)
if err != nil { if err != nil {
return errors.Wrap(err, "get claims") return fmt.Errorf("get claims: %w", err)
} }
signingHeaders := getSigningHeaders(boxConfig) signingHeaders := getSigningHeaders(boxConfig)
queryParams := getQueryParams(boxConfig) queryParams := getQueryParams(boxConfig)
@@ -176,11 +185,11 @@ func refreshJWTToken(ctx context.Context, jsonFile string, boxSubType string, na
func getBoxConfig(configFile string) (boxConfig *api.ConfigJSON, err error) { func getBoxConfig(configFile string) (boxConfig *api.ConfigJSON, err error) {
file, err := ioutil.ReadFile(configFile) file, err := ioutil.ReadFile(configFile)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "box: failed to read Box config") return nil, fmt.Errorf("box: failed to read Box config: %w", err)
} }
err = json.Unmarshal(file, &boxConfig) err = json.Unmarshal(file, &boxConfig)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "box: failed to parse Box config") return nil, fmt.Errorf("box: failed to parse Box config: %w", err)
} }
return boxConfig, nil return boxConfig, nil
} }
@@ -188,7 +197,7 @@ func getBoxConfig(configFile string) (boxConfig *api.ConfigJSON, err error) {
func getClaims(boxConfig *api.ConfigJSON, boxSubType string) (claims *jws.ClaimSet, err error) { func getClaims(boxConfig *api.ConfigJSON, boxSubType string) (claims *jws.ClaimSet, err error) {
val, err := jwtutil.RandomHex(20) val, err := jwtutil.RandomHex(20)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "box: failed to generate random string for jti") return nil, fmt.Errorf("box: failed to generate random string for jti: %w", err)
} }
claims = &jws.ClaimSet{ claims = &jws.ClaimSet{
@@ -229,12 +238,12 @@ func getDecryptedPrivateKey(boxConfig *api.ConfigJSON) (key *rsa.PrivateKey, err
block, rest := pem.Decode([]byte(boxConfig.BoxAppSettings.AppAuth.PrivateKey)) block, rest := pem.Decode([]byte(boxConfig.BoxAppSettings.AppAuth.PrivateKey))
if len(rest) > 0 { if len(rest) > 0 {
return nil, errors.Wrap(err, "box: extra data included in private key") return nil, fmt.Errorf("box: extra data included in private key: %w", err)
} }
rsaKey, err := pkcs8.ParsePKCS8PrivateKey(block.Bytes, []byte(boxConfig.BoxAppSettings.AppAuth.Passphrase)) rsaKey, err := pkcs8.ParsePKCS8PrivateKey(block.Bytes, []byte(boxConfig.BoxAppSettings.AppAuth.Passphrase))
if err != nil { if err != nil {
return nil, errors.Wrap(err, "box: failed to decrypt private key") return nil, fmt.Errorf("box: failed to decrypt private key: %w", err)
} }
return rsaKey.(*rsa.PrivateKey), nil return rsaKey.(*rsa.PrivateKey), nil
@@ -247,6 +256,8 @@ type Options struct {
Enc encoder.MultiEncoder `config:"encoding"` Enc encoder.MultiEncoder `config:"encoding"`
RootFolderID string `config:"root_folder_id"` RootFolderID string `config:"root_folder_id"`
AccessToken string `config:"access_token"` AccessToken string `config:"access_token"`
ListChunk int `config:"list_chunk"`
OwnedBy string `config:"owned_by"`
} }
// Fs represents a remote box // Fs represents a remote box
@@ -326,6 +337,13 @@ func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, err
authRetry = true authRetry = true
fs.Debugf(nil, "Should retry: %v", err) fs.Debugf(nil, "Should retry: %v", err)
} }
// Box API errors which should be retries
if apiErr, ok := err.(*api.Error); ok && apiErr.Code == "operation_blocked_temporary" {
fs.Debugf(nil, "Retrying API error %v", err)
return true, err
}
return authRetry || fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err return authRetry || fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
} }
@@ -340,7 +358,7 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.It
return nil, err return nil, err
} }
found, err := f.listAll(ctx, directoryID, false, true, func(item *api.Item) bool { found, err := f.listAll(ctx, directoryID, false, true, true, func(item *api.Item) bool {
if strings.EqualFold(item.Name, leaf) { if strings.EqualFold(item.Name, leaf) {
info = item info = item
return true return true
@@ -383,7 +401,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
} }
if opt.UploadCutoff < minUploadCutoff { if opt.UploadCutoff < minUploadCutoff {
return nil, errors.Errorf("box: upload cutoff (%v) must be greater than equal to %v", opt.UploadCutoff, fs.SizeSuffix(minUploadCutoff)) return nil, fmt.Errorf("box: upload cutoff (%v) must be greater than equal to %v", opt.UploadCutoff, fs.SizeSuffix(minUploadCutoff))
} }
root = parsePath(root) root = parsePath(root)
@@ -394,7 +412,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
if opt.AccessToken == "" { if opt.AccessToken == "" {
client, ts, err = oauthutil.NewClient(ctx, name, m, oauthConfig) client, ts, err = oauthutil.NewClient(ctx, name, m, oauthConfig)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "failed to configure Box") return nil, fmt.Errorf("failed to configure Box: %w", err)
} }
} }
@@ -515,7 +533,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
// FindLeaf finds a directory of name leaf in the folder with ID pathID // FindLeaf finds a directory of name leaf in the folder with ID pathID
func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) { func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) {
// Find the leaf in pathID // Find the leaf in pathID
found, err = f.listAll(ctx, pathID, true, false, func(item *api.Item) bool { found, err = f.listAll(ctx, pathID, true, false, true, func(item *api.Item) bool {
if strings.EqualFold(item.Name, leaf) { if strings.EqualFold(item.Name, leaf) {
pathIDOut = item.ID pathIDOut = item.ID
return true return true
@@ -571,17 +589,20 @@ type listAllFn func(*api.Item) bool
// Lists the directory required calling the user function on each item found // Lists the directory required calling the user function on each item found
// //
// If the user fn ever returns true then it early exits with found = true // If the user fn ever returns true then it early exits with found = true
func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) { func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, filesOnly bool, activeOnly bool, fn listAllFn) (found bool, err error) {
opts := rest.Opts{ opts := rest.Opts{
Method: "GET", Method: "GET",
Path: "/folders/" + dirID + "/items", Path: "/folders/" + dirID + "/items",
Parameters: fieldsValue(), Parameters: fieldsValue(),
} }
opts.Parameters.Set("limit", strconv.Itoa(listChunks)) opts.Parameters.Set("limit", strconv.Itoa(f.opt.ListChunk))
offset := 0 opts.Parameters.Set("usemarker", "true")
var marker *string
OUTER: OUTER:
for { for {
opts.Parameters.Set("offset", strconv.Itoa(offset)) if marker != nil {
opts.Parameters.Set("marker", *marker)
}
var result api.FolderItems var result api.FolderItems
var resp *http.Response var resp *http.Response
@@ -590,7 +611,7 @@ OUTER:
return shouldRetry(ctx, resp, err) return shouldRetry(ctx, resp, err)
}) })
if err != nil { if err != nil {
return found, errors.Wrap(err, "couldn't list files") return found, fmt.Errorf("couldn't list files: %w", err)
} }
for i := range result.Entries { for i := range result.Entries {
item := &result.Entries[i] item := &result.Entries[i]
@@ -606,7 +627,10 @@ OUTER:
fs.Debugf(f, "Ignoring %q - unknown type %q", item.Name, item.Type) fs.Debugf(f, "Ignoring %q - unknown type %q", item.Name, item.Type)
continue continue
} }
if item.ItemStatus != api.ItemStatusActive { if activeOnly && item.ItemStatus != api.ItemStatusActive {
continue
}
if f.opt.OwnedBy != "" && f.opt.OwnedBy != item.OwnedBy.Login {
continue continue
} }
item.Name = f.opt.Enc.ToStandardName(item.Name) item.Name = f.opt.Enc.ToStandardName(item.Name)
@@ -615,8 +639,8 @@ OUTER:
break OUTER break OUTER
} }
} }
offset += result.Limit marker = result.NextMarker
if offset >= result.TotalCount { if marker == nil {
break break
} }
} }
@@ -638,7 +662,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
return nil, err return nil, err
} }
var iErr error var iErr error
_, err = f.listAll(ctx, directoryID, false, false, func(info *api.Item) bool { _, err = f.listAll(ctx, directoryID, false, false, true, func(info *api.Item) bool {
remote := path.Join(dir, info.Name) remote := path.Join(dir, info.Name)
if info.Type == api.ItemTypeFolder { if info.Type == api.ItemTypeFolder {
// cache the directory ID for later lookups // cache the directory ID for later lookups
@@ -714,14 +738,14 @@ func (f *Fs) preUploadCheck(ctx context.Context, leaf, directoryID string, size
var conflict api.PreUploadCheckConflict var conflict api.PreUploadCheckConflict
err = json.Unmarshal(apiErr.ContextInfo, &conflict) err = json.Unmarshal(apiErr.ContextInfo, &conflict)
if err != nil { if err != nil {
return "", errors.Wrap(err, "pre-upload check: JSON decode failed") return "", fmt.Errorf("pre-upload check: JSON decode failed: %w", err)
} }
if conflict.Conflicts.Type != api.ItemTypeFile { if conflict.Conflicts.Type != api.ItemTypeFile {
return "", errors.Wrap(err, "pre-upload check: can't overwrite non file with file") return "", fmt.Errorf("pre-upload check: can't overwrite non file with file: %w", err)
} }
return conflict.Conflicts.ID, nil return conflict.Conflicts.ID, nil
} }
return "", errors.Wrap(err, "pre-upload check") return "", fmt.Errorf("pre-upload check: %w", err)
} }
return "", nil return "", nil
} }
@@ -830,7 +854,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
return shouldRetry(ctx, resp, err) return shouldRetry(ctx, resp, err)
}) })
if err != nil { if err != nil {
return errors.Wrap(err, "rmdir failed") return fmt.Errorf("rmdir failed: %w", err)
} }
f.dirCache.FlushDir(dir) f.dirCache.FlushDir(dir)
if err != nil { if err != nil {
@@ -874,7 +898,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
srcPath := srcObj.fs.rootSlash() + srcObj.remote srcPath := srcObj.fs.rootSlash() + srcObj.remote
dstPath := f.rootSlash() + remote dstPath := f.rootSlash() + remote
if strings.ToLower(srcPath) == strings.ToLower(dstPath) { if strings.ToLower(srcPath) == strings.ToLower(dstPath) {
return nil, errors.Errorf("can't copy %q -> %q as are same name when lowercase", srcPath, dstPath) return nil, fmt.Errorf("can't copy %q -> %q as are same name when lowercase", srcPath, dstPath)
} }
// Create temporary object // Create temporary object
@@ -958,7 +982,7 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
return shouldRetry(ctx, resp, err) return shouldRetry(ctx, resp, err)
}) })
if err != nil { if err != nil {
return nil, errors.Wrap(err, "failed to read user info") return nil, fmt.Errorf("failed to read user info: %w", err)
} }
// FIXME max upload size would be useful to use in Update // FIXME max upload size would be useful to use in Update
usage = &fs.Usage{ usage = &fs.Usage{
@@ -1092,45 +1116,36 @@ func (f *Fs) deletePermanently(ctx context.Context, itemType, id string) error {
// CleanUp empties the trash // CleanUp empties the trash
func (f *Fs) CleanUp(ctx context.Context) (err error) { func (f *Fs) CleanUp(ctx context.Context) (err error) {
opts := rest.Opts{ var (
Method: "GET", deleteErrors = int64(0)
Path: "/folders/trash/items", concurrencyControl = make(chan struct{}, fs.GetConfig(ctx).Checkers)
Parameters: url.Values{ wg sync.WaitGroup
"fields": []string{"type", "id"}, )
}, _, err = f.listAll(ctx, "trash", false, false, false, func(item *api.Item) bool {
} if item.Type == api.ItemTypeFolder || item.Type == api.ItemTypeFile {
opts.Parameters.Set("limit", strconv.Itoa(listChunks)) wg.Add(1)
offset := 0 concurrencyControl <- struct{}{}
for { go func() {
opts.Parameters.Set("offset", strconv.Itoa(offset)) defer func() {
<-concurrencyControl
var result api.FolderItems wg.Done()
var resp *http.Response }()
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
return shouldRetry(ctx, resp, err)
})
if err != nil {
return errors.Wrap(err, "couldn't list trash")
}
for i := range result.Entries {
item := &result.Entries[i]
if item.Type == api.ItemTypeFolder || item.Type == api.ItemTypeFile {
err := f.deletePermanently(ctx, item.Type, item.ID) err := f.deletePermanently(ctx, item.Type, item.ID)
if err != nil { if err != nil {
return errors.Wrap(err, "failed to delete file") fs.Errorf(f, "failed to delete trash item %q (%q): %v", item.Name, item.ID, err)
atomic.AddInt64(&deleteErrors, 1)
} }
} else { }()
fs.Debugf(f, "Ignoring %q - unknown type %q", item.Name, item.Type) } else {
continue fs.Debugf(f, "Ignoring %q - unknown type %q", item.Name, item.Type)
}
}
offset += result.Limit
if offset >= result.TotalCount {
break
} }
return false
})
wg.Wait()
if deleteErrors != 0 {
return fmt.Errorf("failed to delete %d trash items", deleteErrors)
} }
return return err
} }
// DirCacheFlush resets the directory cache - used in testing as an // DirCacheFlush resets the directory cache - used in testing as an
@@ -1184,8 +1199,11 @@ func (o *Object) Size() int64 {
// setMetaData sets the metadata from info // setMetaData sets the metadata from info
func (o *Object) setMetaData(info *api.Item) (err error) { func (o *Object) setMetaData(info *api.Item) (err error) {
if info.Type == api.ItemTypeFolder {
return fs.ErrorIsDir
}
if info.Type != api.ItemTypeFile { if info.Type != api.ItemTypeFile {
return errors.Wrapf(fs.ErrorNotAFile, "%q is %q", o.remote, info.Type) return fmt.Errorf("%q is %q: %w", o.remote, info.Type, fs.ErrorNotAFile)
} }
o.hasMetaData = true o.hasMetaData = true
o.size = int64(info.Size) o.size = int64(info.Size)
@@ -1321,7 +1339,7 @@ func (o *Object) upload(ctx context.Context, in io.Reader, leaf, directoryID str
return err return err
} }
if result.TotalCount != 1 || len(result.Entries) != 1 { if result.TotalCount != 1 || len(result.Entries) != 1 {
return errors.Errorf("failed to upload %v - not sure why", o) return fmt.Errorf("failed to upload %v - not sure why", o)
} }
return o.setMetaData(&result.Entries[0]) return o.setMetaData(&result.Entries[0])
} }

View File

@@ -8,6 +8,7 @@ import (
"crypto/sha1" "crypto/sha1"
"encoding/base64" "encoding/base64"
"encoding/json" "encoding/json"
"errors"
"fmt" "fmt"
"io" "io"
"net/http" "net/http"
@@ -15,7 +16,6 @@ import (
"sync" "sync"
"time" "time"
"github.com/pkg/errors"
"github.com/rclone/rclone/backend/box/api" "github.com/rclone/rclone/backend/box/api"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting" "github.com/rclone/rclone/fs/accounting"
@@ -140,7 +140,7 @@ outer:
} }
} }
default: default:
return nil, errors.Errorf("unknown HTTP status return %q (%d)", resp.Status, resp.StatusCode) return nil, fmt.Errorf("unknown HTTP status return %q (%d)", resp.Status, resp.StatusCode)
} }
} }
fs.Debugf(o, "commit multipart upload failed %d/%d - trying again in %d seconds (%s)", tries+1, maxTries, delay, why) fs.Debugf(o, "commit multipart upload failed %d/%d - trying again in %d seconds (%s)", tries+1, maxTries, delay, why)
@@ -151,7 +151,7 @@ outer:
} }
err = json.Unmarshal(body, &result) err = json.Unmarshal(body, &result)
if err != nil { if err != nil {
return nil, errors.Wrapf(err, "couldn't decode commit response: %q", body) return nil, fmt.Errorf("couldn't decode commit response: %q: %w", body, err)
} }
return result, nil return result, nil
} }
@@ -177,7 +177,7 @@ func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, leaf, direct
// Create upload session // Create upload session
session, err := o.createUploadSession(ctx, leaf, directoryID, size) session, err := o.createUploadSession(ctx, leaf, directoryID, size)
if err != nil { if err != nil {
return errors.Wrap(err, "multipart upload create session failed") return fmt.Errorf("multipart upload create session failed: %w", err)
} }
chunkSize := session.PartSize chunkSize := session.PartSize
fs.Debugf(o, "Multipart upload session started for %d parts of size %v", session.TotalParts, fs.SizeSuffix(chunkSize)) fs.Debugf(o, "Multipart upload session started for %d parts of size %v", session.TotalParts, fs.SizeSuffix(chunkSize))
@@ -222,7 +222,7 @@ outer:
// Read the chunk // Read the chunk
_, err = io.ReadFull(in, buf) _, err = io.ReadFull(in, buf)
if err != nil { if err != nil {
err = errors.Wrap(err, "multipart upload failed to read source") err = fmt.Errorf("multipart upload failed to read source: %w", err)
break outer break outer
} }
@@ -238,7 +238,7 @@ outer:
fs.Debugf(o, "Uploading part %d/%d offset %v/%v part size %v", part+1, session.TotalParts, fs.SizeSuffix(position), fs.SizeSuffix(size), fs.SizeSuffix(chunkSize)) fs.Debugf(o, "Uploading part %d/%d offset %v/%v part size %v", part+1, session.TotalParts, fs.SizeSuffix(position), fs.SizeSuffix(size), fs.SizeSuffix(chunkSize))
partResponse, err := o.uploadPart(ctx, session.ID, position, size, buf, wrap, options...) partResponse, err := o.uploadPart(ctx, session.ID, position, size, buf, wrap, options...)
if err != nil { if err != nil {
err = errors.Wrap(err, "multipart upload failed to upload part") err = fmt.Errorf("multipart upload failed to upload part: %w", err)
select { select {
case errs <- err: case errs <- err:
default: default:
@@ -266,11 +266,11 @@ outer:
// Finalise the upload session // Finalise the upload session
result, err := o.commitUpload(ctx, session.ID, parts, modTime, hash.Sum(nil)) result, err := o.commitUpload(ctx, session.ID, parts, modTime, hash.Sum(nil))
if err != nil { if err != nil {
return errors.Wrap(err, "multipart upload failed to finalize") return fmt.Errorf("multipart upload failed to finalize: %w", err)
} }
if result.TotalCount != 1 || len(result.Entries) != 1 { if result.TotalCount != 1 || len(result.Entries) != 1 {
return errors.Errorf("multipart upload failed %v - not sure why", o) return fmt.Errorf("multipart upload failed %v - not sure why", o)
} }
return o.setMetaData(&result.Entries[0]) return o.setMetaData(&result.Entries[0])
} }

View File

@@ -1,9 +1,11 @@
//go:build !plan9 && !js
// +build !plan9,!js // +build !plan9,!js
package cache package cache
import ( import (
"context" "context"
"errors"
"fmt" "fmt"
"io" "io"
"math" "math"
@@ -18,7 +20,6 @@ import (
"syscall" "syscall"
"time" "time"
"github.com/pkg/errors"
"github.com/rclone/rclone/backend/crypt" "github.com/rclone/rclone/backend/crypt"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/cache" "github.com/rclone/rclone/fs/cache"
@@ -68,26 +69,26 @@ func init() {
CommandHelp: commandHelp, CommandHelp: commandHelp,
Options: []fs.Option{{ Options: []fs.Option{{
Name: "remote", Name: "remote",
Help: "Remote to cache.\nNormally should contain a ':' and a path, e.g. \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).", Help: "Remote to cache.\n\nNormally should contain a ':' and a path, e.g. \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
Required: true, Required: true,
}, { }, {
Name: "plex_url", Name: "plex_url",
Help: "The URL of the Plex server", Help: "The URL of the Plex server.",
}, { }, {
Name: "plex_username", Name: "plex_username",
Help: "The username of the Plex user", Help: "The username of the Plex user.",
}, { }, {
Name: "plex_password", Name: "plex_password",
Help: "The password of the Plex user", Help: "The password of the Plex user.",
IsPassword: true, IsPassword: true,
}, { }, {
Name: "plex_token", Name: "plex_token",
Help: "The plex token for authentication - auto set normally", Help: "The plex token for authentication - auto set normally.",
Hide: fs.OptionHideBoth, Hide: fs.OptionHideBoth,
Advanced: true, Advanced: true,
}, { }, {
Name: "plex_insecure", Name: "plex_insecure",
Help: "Skip all certificate verification when connecting to the Plex server", Help: "Skip all certificate verification when connecting to the Plex server.",
Advanced: true, Advanced: true,
}, { }, {
Name: "chunk_size", Name: "chunk_size",
@@ -142,12 +143,12 @@ oldest chunks until it goes under this value.`,
}}, }},
}, { }, {
Name: "db_path", Name: "db_path",
Default: filepath.Join(config.CacheDir, "cache-backend"), Default: filepath.Join(config.GetCacheDir(), "cache-backend"),
Help: "Directory to store file structure metadata DB.\nThe remote name is used as the DB file name.", Help: "Directory to store file structure metadata DB.\n\nThe remote name is used as the DB file name.",
Advanced: true, Advanced: true,
}, { }, {
Name: "chunk_path", Name: "chunk_path",
Default: filepath.Join(config.CacheDir, "cache-backend"), Default: filepath.Join(config.GetCacheDir(), "cache-backend"),
Help: `Directory to cache chunk files. Help: `Directory to cache chunk files.
Path to where partial file data (chunks) are stored locally. The remote Path to where partial file data (chunks) are stored locally. The remote
@@ -167,6 +168,7 @@ then "--cache-chunk-path" will use the same path as "--cache-db-path".`,
Name: "chunk_clean_interval", Name: "chunk_clean_interval",
Default: DefCacheChunkCleanInterval, Default: DefCacheChunkCleanInterval,
Help: `How often should the cache perform cleanups of the chunk storage. Help: `How often should the cache perform cleanups of the chunk storage.
The default value should be ok for most people. If you find that the The default value should be ok for most people. If you find that the
cache goes over "cache-chunk-total-size" too often then try to lower cache goes over "cache-chunk-total-size" too often then try to lower
this value to force it to perform cleanups more often.`, this value to force it to perform cleanups more often.`,
@@ -220,7 +222,7 @@ available on the local machine.`,
}, { }, {
Name: "rps", Name: "rps",
Default: int(DefCacheRps), Default: int(DefCacheRps),
Help: `Limits the number of requests per second to the source FS (-1 to disable) Help: `Limits the number of requests per second to the source FS (-1 to disable).
This setting places a hard limit on the number of requests per second This setting places a hard limit on the number of requests per second
that cache will be doing to the cloud provider remote and try to that cache will be doing to the cloud provider remote and try to
@@ -241,7 +243,7 @@ still pass.`,
}, { }, {
Name: "writes", Name: "writes",
Default: DefCacheWrites, Default: DefCacheWrites,
Help: `Cache file data on writes through the FS Help: `Cache file data on writes through the FS.
If you need to read files immediately after you upload them through If you need to read files immediately after you upload them through
cache you can enable this flag to have their data stored in the cache you can enable this flag to have their data stored in the
@@ -262,7 +264,7 @@ provider`,
}, { }, {
Name: "tmp_wait_time", Name: "tmp_wait_time",
Default: DefCacheTmpWaitTime, Default: DefCacheTmpWaitTime,
Help: `How long should files be stored in local cache before being uploaded Help: `How long should files be stored in local cache before being uploaded.
This is the duration that a file must wait in the temporary location This is the duration that a file must wait in the temporary location
_cache-tmp-upload-path_ before it is selected for upload. _cache-tmp-upload-path_ before it is selected for upload.
@@ -273,7 +275,7 @@ to start the upload if a queue formed for this purpose.`,
}, { }, {
Name: "db_wait_time", Name: "db_wait_time",
Default: DefCacheDbWaitTime, Default: DefCacheDbWaitTime,
Help: `How long to wait for the DB to be available - 0 is unlimited Help: `How long to wait for the DB to be available - 0 is unlimited.
Only one process can have the DB open at any one time, so rclone waits Only one process can have the DB open at any one time, so rclone waits
for this duration for the DB to become available before it gives an for this duration for the DB to become available before it gives an
@@ -354,7 +356,7 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
return nil, err return nil, err
} }
if opt.ChunkTotalSize < opt.ChunkSize*fs.SizeSuffix(opt.TotalWorkers) { if opt.ChunkTotalSize < opt.ChunkSize*fs.SizeSuffix(opt.TotalWorkers) {
return nil, errors.Errorf("don't set cache-chunk-total-size(%v) less than cache-chunk-size(%v) * cache-workers(%v)", return nil, fmt.Errorf("don't set cache-chunk-total-size(%v) less than cache-chunk-size(%v) * cache-workers(%v)",
opt.ChunkTotalSize, opt.ChunkSize, opt.TotalWorkers) opt.ChunkTotalSize, opt.ChunkSize, opt.TotalWorkers)
} }
@@ -364,13 +366,13 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
rpath, err := parseRootPath(rootPath) rpath, err := parseRootPath(rootPath)
if err != nil { if err != nil {
return nil, errors.Wrapf(err, "failed to clean root path %q", rootPath) return nil, fmt.Errorf("failed to clean root path %q: %w", rootPath, err)
} }
remotePath := fspath.JoinRootPath(opt.Remote, rootPath) remotePath := fspath.JoinRootPath(opt.Remote, rootPath)
wrappedFs, wrapErr := cache.Get(ctx, remotePath) wrappedFs, wrapErr := cache.Get(ctx, remotePath)
if wrapErr != nil && wrapErr != fs.ErrorIsFile { if wrapErr != nil && wrapErr != fs.ErrorIsFile {
return nil, errors.Wrapf(wrapErr, "failed to make remote %q to wrap", remotePath) return nil, fmt.Errorf("failed to make remote %q to wrap: %w", remotePath, wrapErr)
} }
var fsErr error var fsErr error
fs.Debugf(name, "wrapped %v:%v at root %v", wrappedFs.Name(), wrappedFs.Root(), rpath) fs.Debugf(name, "wrapped %v:%v at root %v", wrappedFs.Name(), wrappedFs.Root(), rpath)
@@ -399,7 +401,7 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
if opt.PlexToken != "" { if opt.PlexToken != "" {
f.plexConnector, err = newPlexConnectorWithToken(f, opt.PlexURL, opt.PlexToken, opt.PlexInsecure) f.plexConnector, err = newPlexConnectorWithToken(f, opt.PlexURL, opt.PlexToken, opt.PlexInsecure)
if err != nil { if err != nil {
return nil, errors.Wrapf(err, "failed to connect to the Plex API %v", opt.PlexURL) return nil, fmt.Errorf("failed to connect to the Plex API %v: %w", opt.PlexURL, err)
} }
} else { } else {
if opt.PlexPassword != "" && opt.PlexUsername != "" { if opt.PlexPassword != "" && opt.PlexUsername != "" {
@@ -411,7 +413,7 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
m.Set("plex_token", token) m.Set("plex_token", token)
}) })
if err != nil { if err != nil {
return nil, errors.Wrapf(err, "failed to connect to the Plex API %v", opt.PlexURL) return nil, fmt.Errorf("failed to connect to the Plex API %v: %w", opt.PlexURL, err)
} }
} }
} }
@@ -420,8 +422,8 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
dbPath := f.opt.DbPath dbPath := f.opt.DbPath
chunkPath := f.opt.ChunkPath chunkPath := f.opt.ChunkPath
// if the dbPath is non default but the chunk path is default, we overwrite the last to follow the same one as dbPath // if the dbPath is non default but the chunk path is default, we overwrite the last to follow the same one as dbPath
if dbPath != filepath.Join(config.CacheDir, "cache-backend") && if dbPath != filepath.Join(config.GetCacheDir(), "cache-backend") &&
chunkPath == filepath.Join(config.CacheDir, "cache-backend") { chunkPath == filepath.Join(config.GetCacheDir(), "cache-backend") {
chunkPath = dbPath chunkPath = dbPath
} }
if filepath.Ext(dbPath) != "" { if filepath.Ext(dbPath) != "" {
@@ -432,11 +434,11 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
} }
err = os.MkdirAll(dbPath, os.ModePerm) err = os.MkdirAll(dbPath, os.ModePerm)
if err != nil { if err != nil {
return nil, errors.Wrapf(err, "failed to create cache directory %v", dbPath) return nil, fmt.Errorf("failed to create cache directory %v: %w", dbPath, err)
} }
err = os.MkdirAll(chunkPath, os.ModePerm) err = os.MkdirAll(chunkPath, os.ModePerm)
if err != nil { if err != nil {
return nil, errors.Wrapf(err, "failed to create cache directory %v", chunkPath) return nil, fmt.Errorf("failed to create cache directory %v: %w", chunkPath, err)
} }
dbPath = filepath.Join(dbPath, name+".db") dbPath = filepath.Join(dbPath, name+".db")
@@ -448,7 +450,7 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
DbWaitTime: time.Duration(opt.DbWaitTime), DbWaitTime: time.Duration(opt.DbWaitTime),
}) })
if err != nil { if err != nil {
return nil, errors.Wrapf(err, "failed to start cache db") return nil, fmt.Errorf("failed to start cache db: %w", err)
} }
// Trap SIGINT and SIGTERM to close the DB handle gracefully // Trap SIGINT and SIGTERM to close the DB handle gracefully
c := make(chan os.Signal, 1) c := make(chan os.Signal, 1)
@@ -482,12 +484,12 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
if f.opt.TempWritePath != "" { if f.opt.TempWritePath != "" {
err = os.MkdirAll(f.opt.TempWritePath, os.ModePerm) err = os.MkdirAll(f.opt.TempWritePath, os.ModePerm)
if err != nil { if err != nil {
return nil, errors.Wrapf(err, "failed to create cache directory %v", f.opt.TempWritePath) return nil, fmt.Errorf("failed to create cache directory %v: %w", f.opt.TempWritePath, err)
} }
f.opt.TempWritePath = filepath.ToSlash(f.opt.TempWritePath) f.opt.TempWritePath = filepath.ToSlash(f.opt.TempWritePath)
f.tempFs, err = cache.Get(ctx, f.opt.TempWritePath) f.tempFs, err = cache.Get(ctx, f.opt.TempWritePath)
if err != nil { if err != nil {
return nil, errors.Wrapf(err, "failed to create temp fs: %v", err) return nil, fmt.Errorf("failed to create temp fs: %w", err)
} }
fs.Infof(name, "Upload Temp Rest Time: %v", f.opt.TempWaitTime) fs.Infof(name, "Upload Temp Rest Time: %v", f.opt.TempWaitTime)
fs.Infof(name, "Upload Temp FS: %v", f.opt.TempWritePath) fs.Infof(name, "Upload Temp FS: %v", f.opt.TempWritePath)
@@ -604,7 +606,7 @@ func (f *Fs) httpStats(ctx context.Context, in rc.Params) (out rc.Params, err er
out = make(rc.Params) out = make(rc.Params)
m, err := f.Stats() m, err := f.Stats()
if err != nil { if err != nil {
return out, errors.Errorf("error while getting cache stats") return out, fmt.Errorf("error while getting cache stats")
} }
out["status"] = "ok" out["status"] = "ok"
out["stats"] = m out["stats"] = m
@@ -631,7 +633,7 @@ func (f *Fs) httpExpireRemote(ctx context.Context, in rc.Params) (out rc.Params,
out = make(rc.Params) out = make(rc.Params)
remoteInt, ok := in["remote"] remoteInt, ok := in["remote"]
if !ok { if !ok {
return out, errors.Errorf("remote is needed") return out, fmt.Errorf("remote is needed")
} }
remote := remoteInt.(string) remote := remoteInt.(string)
withData := false withData := false
@@ -642,7 +644,7 @@ func (f *Fs) httpExpireRemote(ctx context.Context, in rc.Params) (out rc.Params,
remote = f.unwrapRemote(remote) remote = f.unwrapRemote(remote)
if !f.cache.HasEntry(path.Join(f.Root(), remote)) { if !f.cache.HasEntry(path.Join(f.Root(), remote)) {
return out, errors.Errorf("%s doesn't exist in cache", remote) return out, fmt.Errorf("%s doesn't exist in cache", remote)
} }
co := NewObject(f, remote) co := NewObject(f, remote)
@@ -651,7 +653,7 @@ func (f *Fs) httpExpireRemote(ctx context.Context, in rc.Params) (out rc.Params,
cd := NewDirectory(f, remote) cd := NewDirectory(f, remote)
err := f.cache.ExpireDir(cd) err := f.cache.ExpireDir(cd)
if err != nil { if err != nil {
return out, errors.WithMessage(err, "error expiring directory") return out, fmt.Errorf("error expiring directory: %w", err)
} }
// notify vfs too // notify vfs too
f.notifyChangeUpstream(cd.Remote(), fs.EntryDirectory) f.notifyChangeUpstream(cd.Remote(), fs.EntryDirectory)
@@ -662,7 +664,7 @@ func (f *Fs) httpExpireRemote(ctx context.Context, in rc.Params) (out rc.Params,
// expire the entry // expire the entry
err = f.cache.ExpireObject(co, withData) err = f.cache.ExpireObject(co, withData)
if err != nil { if err != nil {
return out, errors.WithMessage(err, "error expiring file") return out, fmt.Errorf("error expiring file: %w", err)
} }
// notify vfs too // notify vfs too
f.notifyChangeUpstream(co.Remote(), fs.EntryObject) f.notifyChangeUpstream(co.Remote(), fs.EntryObject)
@@ -683,24 +685,24 @@ func (f *Fs) rcFetch(ctx context.Context, in rc.Params) (rc.Params, error) {
case 1: case 1:
start, err = strconv.ParseInt(ints[0], 10, 64) start, err = strconv.ParseInt(ints[0], 10, 64)
if err != nil { if err != nil {
return nil, errors.Errorf("invalid range: %q", part) return nil, fmt.Errorf("invalid range: %q", part)
} }
end = start + 1 end = start + 1
case 2: case 2:
if ints[0] != "" { if ints[0] != "" {
start, err = strconv.ParseInt(ints[0], 10, 64) start, err = strconv.ParseInt(ints[0], 10, 64)
if err != nil { if err != nil {
return nil, errors.Errorf("invalid range: %q", part) return nil, fmt.Errorf("invalid range: %q", part)
} }
} }
if ints[1] != "" { if ints[1] != "" {
end, err = strconv.ParseInt(ints[1], 10, 64) end, err = strconv.ParseInt(ints[1], 10, 64)
if err != nil { if err != nil {
return nil, errors.Errorf("invalid range: %q", part) return nil, fmt.Errorf("invalid range: %q", part)
} }
} }
default: default:
return nil, errors.Errorf("invalid range: %q", part) return nil, fmt.Errorf("invalid range: %q", part)
} }
crs = append(crs, chunkRange{start: start, end: end}) crs = append(crs, chunkRange{start: start, end: end})
} }
@@ -755,18 +757,18 @@ func (f *Fs) rcFetch(ctx context.Context, in rc.Params) (rc.Params, error) {
delete(in, "chunks") delete(in, "chunks")
crs, err := parseChunks(s) crs, err := parseChunks(s)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "invalid chunks parameter") return nil, fmt.Errorf("invalid chunks parameter: %w", err)
} }
var files [][2]string var files [][2]string
for k, v := range in { for k, v := range in {
if !strings.HasPrefix(k, "file") { if !strings.HasPrefix(k, "file") {
return nil, errors.Errorf("invalid parameter %s=%s", k, v) return nil, fmt.Errorf("invalid parameter %s=%s", k, v)
} }
switch v := v.(type) { switch v := v.(type) {
case string: case string:
files = append(files, [2]string{v, f.unwrapRemote(v)}) files = append(files, [2]string{v, f.unwrapRemote(v)})
default: default:
return nil, errors.Errorf("invalid parameter %s=%s", k, v) return nil, fmt.Errorf("invalid parameter %s=%s", k, v)
} }
} }
type fileStatus struct { type fileStatus struct {
@@ -1122,7 +1124,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
case fs.Directory: case fs.Directory:
_ = f.cache.AddDir(DirectoryFromOriginal(ctx, f, o)) _ = f.cache.AddDir(DirectoryFromOriginal(ctx, f, o))
default: default:
return errors.Errorf("Unknown object type %T", entry) return fmt.Errorf("Unknown object type %T", entry)
} }
} }

View File

@@ -1,5 +1,5 @@
// +build !plan9,!js //go:build !plan9 && !js && !race
// +build !race // +build !plan9,!js,!race
package cache_test package cache_test
@@ -7,6 +7,7 @@ import (
"bytes" "bytes"
"context" "context"
"encoding/base64" "encoding/base64"
"errors"
goflag "flag" goflag "flag"
"fmt" "fmt"
"io" "io"
@@ -16,12 +17,12 @@ import (
"os" "os"
"path" "path"
"path/filepath" "path/filepath"
"runtime"
"runtime/debug" "runtime/debug"
"strings" "strings"
"testing" "testing"
"time" "time"
"github.com/pkg/errors"
"github.com/rclone/rclone/backend/cache" "github.com/rclone/rclone/backend/cache"
"github.com/rclone/rclone/backend/crypt" "github.com/rclone/rclone/backend/crypt"
_ "github.com/rclone/rclone/backend/drive" _ "github.com/rclone/rclone/backend/drive"
@@ -293,6 +294,9 @@ func TestInternalCachedWrittenContentMatches(t *testing.T) {
} }
func TestInternalDoubleWrittenContentMatches(t *testing.T) { func TestInternalDoubleWrittenContentMatches(t *testing.T) {
if runtime.GOOS == "windows" && runtime.GOARCH == "386" {
t.Skip("Skip test on windows/386")
}
id := fmt.Sprintf("tidwcm%v", time.Now().Unix()) id := fmt.Sprintf("tidwcm%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil) rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb) defer runInstance.cleanupFs(t, rootFs, boltDb)
@@ -442,7 +446,7 @@ func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
return err return err
} }
if coSize != expectedSize { if coSize != expectedSize {
return errors.Errorf("%v <> %v", coSize, expectedSize) return fmt.Errorf("%v <> %v", coSize, expectedSize)
} }
return nil return nil
}, 12, time.Second*10) }, 12, time.Second*10)
@@ -498,7 +502,7 @@ func TestInternalMoveWithNotify(t *testing.T) {
} }
if len(li) != 2 { if len(li) != 2 {
log.Printf("not expected listing /test: %v", li) log.Printf("not expected listing /test: %v", li)
return errors.Errorf("not expected listing /test: %v", li) return fmt.Errorf("not expected listing /test: %v", li)
} }
li, err = runInstance.list(t, rootFs, "test/one") li, err = runInstance.list(t, rootFs, "test/one")
@@ -508,7 +512,7 @@ func TestInternalMoveWithNotify(t *testing.T) {
} }
if len(li) != 0 { if len(li) != 0 {
log.Printf("not expected listing /test/one: %v", li) log.Printf("not expected listing /test/one: %v", li)
return errors.Errorf("not expected listing /test/one: %v", li) return fmt.Errorf("not expected listing /test/one: %v", li)
} }
li, err = runInstance.list(t, rootFs, "test/second") li, err = runInstance.list(t, rootFs, "test/second")
@@ -518,21 +522,21 @@ func TestInternalMoveWithNotify(t *testing.T) {
} }
if len(li) != 1 { if len(li) != 1 {
log.Printf("not expected listing /test/second: %v", li) log.Printf("not expected listing /test/second: %v", li)
return errors.Errorf("not expected listing /test/second: %v", li) return fmt.Errorf("not expected listing /test/second: %v", li)
} }
if fi, ok := li[0].(os.FileInfo); ok { if fi, ok := li[0].(os.FileInfo); ok {
if fi.Name() != "data.bin" { if fi.Name() != "data.bin" {
log.Printf("not expected name: %v", fi.Name()) log.Printf("not expected name: %v", fi.Name())
return errors.Errorf("not expected name: %v", fi.Name()) return fmt.Errorf("not expected name: %v", fi.Name())
} }
} else if di, ok := li[0].(fs.DirEntry); ok { } else if di, ok := li[0].(fs.DirEntry); ok {
if di.Remote() != "test/second/data.bin" { if di.Remote() != "test/second/data.bin" {
log.Printf("not expected remote: %v", di.Remote()) log.Printf("not expected remote: %v", di.Remote())
return errors.Errorf("not expected remote: %v", di.Remote()) return fmt.Errorf("not expected remote: %v", di.Remote())
} }
} else { } else {
log.Printf("unexpected listing: %v", li) log.Printf("unexpected listing: %v", li)
return errors.Errorf("unexpected listing: %v", li) return fmt.Errorf("unexpected listing: %v", li)
} }
log.Printf("complete listing: %v", li) log.Printf("complete listing: %v", li)
@@ -587,17 +591,17 @@ func TestInternalNotifyCreatesEmptyParts(t *testing.T) {
found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test"))) found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test")))
if !found { if !found {
log.Printf("not found /test") log.Printf("not found /test")
return errors.Errorf("not found /test") return fmt.Errorf("not found /test")
} }
found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test"), runInstance.encryptRemoteIfNeeded(t, "one"))) found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test"), runInstance.encryptRemoteIfNeeded(t, "one")))
if !found { if !found {
log.Printf("not found /test/one") log.Printf("not found /test/one")
return errors.Errorf("not found /test/one") return fmt.Errorf("not found /test/one")
} }
found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test"), runInstance.encryptRemoteIfNeeded(t, "one"), runInstance.encryptRemoteIfNeeded(t, "test2"))) found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test"), runInstance.encryptRemoteIfNeeded(t, "one"), runInstance.encryptRemoteIfNeeded(t, "test2")))
if !found { if !found {
log.Printf("not found /test/one/test2") log.Printf("not found /test/one/test2")
return errors.Errorf("not found /test/one/test2") return fmt.Errorf("not found /test/one/test2")
} }
li, err := runInstance.list(t, rootFs, "test/one") li, err := runInstance.list(t, rootFs, "test/one")
if err != nil { if err != nil {
@@ -606,21 +610,21 @@ func TestInternalNotifyCreatesEmptyParts(t *testing.T) {
} }
if len(li) != 1 { if len(li) != 1 {
log.Printf("not expected listing /test/one: %v", li) log.Printf("not expected listing /test/one: %v", li)
return errors.Errorf("not expected listing /test/one: %v", li) return fmt.Errorf("not expected listing /test/one: %v", li)
} }
if fi, ok := li[0].(os.FileInfo); ok { if fi, ok := li[0].(os.FileInfo); ok {
if fi.Name() != "test2" { if fi.Name() != "test2" {
log.Printf("not expected name: %v", fi.Name()) log.Printf("not expected name: %v", fi.Name())
return errors.Errorf("not expected name: %v", fi.Name()) return fmt.Errorf("not expected name: %v", fi.Name())
} }
} else if di, ok := li[0].(fs.DirEntry); ok { } else if di, ok := li[0].(fs.DirEntry); ok {
if di.Remote() != "test/one/test2" { if di.Remote() != "test/one/test2" {
log.Printf("not expected remote: %v", di.Remote()) log.Printf("not expected remote: %v", di.Remote())
return errors.Errorf("not expected remote: %v", di.Remote()) return fmt.Errorf("not expected remote: %v", di.Remote())
} }
} else { } else {
log.Printf("unexpected listing: %v", li) log.Printf("unexpected listing: %v", li)
return errors.Errorf("unexpected listing: %v", li) return fmt.Errorf("unexpected listing: %v", li)
} }
log.Printf("complete listing /test/one/test2") log.Printf("complete listing /test/one/test2")
return nil return nil
@@ -681,6 +685,9 @@ func TestInternalCacheWrites(t *testing.T) {
} }
func TestInternalMaxChunkSizeRespected(t *testing.T) { func TestInternalMaxChunkSizeRespected(t *testing.T) {
if runtime.GOOS == "windows" && runtime.GOARCH == "386" {
t.Skip("Skip test on windows/386")
}
id := fmt.Sprintf("timcsr%v", time.Now().Unix()) id := fmt.Sprintf("timcsr%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, map[string]string{"workers": "1"}) rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, map[string]string{"workers": "1"})
defer runInstance.cleanupFs(t, rootFs, boltDb) defer runInstance.cleanupFs(t, rootFs, boltDb)
@@ -919,9 +926,9 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
} }
} }
runInstance.rootIsCrypt = rootIsCrypt runInstance.rootIsCrypt = rootIsCrypt
runInstance.dbPath = filepath.Join(config.CacheDir, "cache-backend", cacheRemote+".db") runInstance.dbPath = filepath.Join(config.GetCacheDir(), "cache-backend", cacheRemote+".db")
runInstance.chunkPath = filepath.Join(config.CacheDir, "cache-backend", cacheRemote) runInstance.chunkPath = filepath.Join(config.GetCacheDir(), "cache-backend", cacheRemote)
runInstance.vfsCachePath = filepath.Join(config.CacheDir, "vfs", remote) runInstance.vfsCachePath = filepath.Join(config.GetCacheDir(), "vfs", remote)
boltDb, err := cache.GetPersistent(runInstance.dbPath, runInstance.chunkPath, &cache.Features{PurgeDb: true}) boltDb, err := cache.GetPersistent(runInstance.dbPath, runInstance.chunkPath, &cache.Features{PurgeDb: true})
require.NoError(t, err) require.NoError(t, err)
@@ -1055,7 +1062,7 @@ func (r *run) readDataFromRemote(t *testing.T, f fs.Fs, remote string, offset, e
checkSample = r.readDataFromObj(t, co, offset, end, noLengthCheck) checkSample = r.readDataFromObj(t, co, offset, end, noLengthCheck)
if !noLengthCheck && size != int64(len(checkSample)) { if !noLengthCheck && size != int64(len(checkSample)) {
return checkSample, errors.Errorf("read size doesn't match expected: %v <> %v", len(checkSample), size) return checkSample, fmt.Errorf("read size doesn't match expected: %v <> %v", len(checkSample), size)
} }
return checkSample, nil return checkSample, nil
} }
@@ -1250,7 +1257,7 @@ func (r *run) listenForBackgroundUpload(t *testing.T, f fs.Fs, remote string) ch
case state = <-buCh: case state = <-buCh:
// continue // continue
case <-time.After(maxDuration): case <-time.After(maxDuration):
waitCh <- errors.Errorf("Timed out waiting for background upload: %v", remote) waitCh <- fmt.Errorf("Timed out waiting for background upload: %v", remote)
return return
} }
checkRemote := state.Remote checkRemote := state.Remote
@@ -1267,7 +1274,7 @@ func (r *run) listenForBackgroundUpload(t *testing.T, f fs.Fs, remote string) ch
return return
} }
} }
waitCh <- errors.Errorf("Too many attempts to wait for the background upload: %v", remote) waitCh <- fmt.Errorf("Too many attempts to wait for the background upload: %v", remote)
}() }()
return waitCh return waitCh
} }

View File

@@ -1,7 +1,7 @@
// Test Cache filesystem interface // Test Cache filesystem interface
// +build !plan9,!js //go:build !plan9 && !js && !race
// +build !race // +build !plan9,!js,!race
package cache_test package cache_test

View File

@@ -1,6 +1,7 @@
// Build for cache for unsupported platforms to stop go complaining // Build for cache for unsupported platforms to stop go complaining
// about "no buildable Go source files " // about "no buildable Go source files "
//go:build plan9 || js
// +build plan9 js // +build plan9 js
package cache package cache

View File

@@ -1,5 +1,5 @@
// +build !plan9,!js //go:build !plan9 && !js && !race
// +build !race // +build !plan9,!js,!race
package cache_test package cache_test

View File

@@ -1,3 +1,4 @@
//go:build !plan9 && !js
// +build !plan9,!js // +build !plan9,!js
package cache package cache

View File

@@ -1,9 +1,11 @@
//go:build !plan9 && !js
// +build !plan9,!js // +build !plan9,!js
package cache package cache
import ( import (
"context" "context"
"errors"
"fmt" "fmt"
"io" "io"
"path" "path"
@@ -12,7 +14,6 @@ import (
"sync" "sync"
"time" "time"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/operations" "github.com/rclone/rclone/fs/operations"
) )
@@ -242,7 +243,7 @@ func (r *Handle) getChunk(chunkStart int64) ([]byte, error) {
return nil, io.ErrUnexpectedEOF return nil, io.ErrUnexpectedEOF
} }
return nil, errors.Errorf("chunk not found %v", chunkStart) return nil, fmt.Errorf("chunk not found %v", chunkStart)
} }
// first chunk will be aligned with the start // first chunk will be aligned with the start
@@ -322,7 +323,7 @@ func (r *Handle) Seek(offset int64, whence int) (int64, error) {
fs.Debugf(r, "moving offset end (%v) from %v to %v", r.cachedObject.Size(), r.offset, r.cachedObject.Size()+offset) fs.Debugf(r, "moving offset end (%v) from %v to %v", r.cachedObject.Size(), r.offset, r.cachedObject.Size()+offset)
r.offset = r.cachedObject.Size() + offset r.offset = r.cachedObject.Size() + offset
default: default:
err = errors.Errorf("cache: unimplemented seek whence %v", whence) err = fmt.Errorf("cache: unimplemented seek whence %v", whence)
} }
chunkStart := r.offset - (r.offset % int64(r.cacheFs().opt.ChunkSize)) chunkStart := r.offset - (r.offset % int64(r.cacheFs().opt.ChunkSize))

View File

@@ -1,15 +1,16 @@
//go:build !plan9 && !js
// +build !plan9,!js // +build !plan9,!js
package cache package cache
import ( import (
"context" "context"
"fmt"
"io" "io"
"path" "path"
"sync" "sync"
"time" "time"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/readers" "github.com/rclone/rclone/lib/readers"
@@ -177,10 +178,14 @@ func (o *Object) refreshFromSource(ctx context.Context, force bool) error {
} }
if o.isTempFile() { if o.isTempFile() {
liveObject, err = o.ParentFs.NewObject(ctx, o.Remote()) liveObject, err = o.ParentFs.NewObject(ctx, o.Remote())
err = errors.Wrapf(err, "in parent fs %v", o.ParentFs) if err != nil {
err = fmt.Errorf("in parent fs %v: %w", o.ParentFs, err)
}
} else { } else {
liveObject, err = o.CacheFs.Fs.NewObject(ctx, o.Remote()) liveObject, err = o.CacheFs.Fs.NewObject(ctx, o.Remote())
err = errors.Wrapf(err, "in cache fs %v", o.CacheFs.Fs) if err != nil {
err = fmt.Errorf("in cache fs %v: %w", o.CacheFs.Fs, err)
}
} }
if err != nil { if err != nil {
fs.Errorf(o, "error refreshing object in : %v", err) fs.Errorf(o, "error refreshing object in : %v", err)
@@ -252,7 +257,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
defer o.CacheFs.backgroundRunner.play() defer o.CacheFs.backgroundRunner.play()
// don't allow started uploads // don't allow started uploads
if o.isTempFile() && o.tempFileStartedUpload() { if o.isTempFile() && o.tempFileStartedUpload() {
return errors.Errorf("%v is currently uploading, can't update", o) return fmt.Errorf("%v is currently uploading, can't update", o)
} }
} }
fs.Debugf(o, "updating object contents with size %v", src.Size()) fs.Debugf(o, "updating object contents with size %v", src.Size())
@@ -291,7 +296,7 @@ func (o *Object) Remove(ctx context.Context) error {
defer o.CacheFs.backgroundRunner.play() defer o.CacheFs.backgroundRunner.play()
// don't allow started uploads // don't allow started uploads
if o.isTempFile() && o.tempFileStartedUpload() { if o.isTempFile() && o.tempFileStartedUpload() {
return errors.Errorf("%v is currently uploading, can't delete", o) return fmt.Errorf("%v is currently uploading, can't delete", o)
} }
} }
err := o.Object.Remove(ctx) err := o.Object.Remove(ctx)

View File

@@ -1,3 +1,4 @@
//go:build !plan9 && !js
// +build !plan9,!js // +build !plan9,!js
package cache package cache

View File

@@ -1,14 +1,15 @@
//go:build !plan9 && !js
// +build !plan9,!js // +build !plan9,!js
package cache package cache
import ( import (
"fmt"
"strconv" "strconv"
"strings" "strings"
"time" "time"
cache "github.com/patrickmn/go-cache" cache "github.com/patrickmn/go-cache"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
) )
@@ -52,7 +53,7 @@ func (m *Memory) GetChunk(cachedObject *Object, offset int64) ([]byte, error) {
return data, nil return data, nil
} }
return nil, errors.Errorf("couldn't get cached object data at offset %v", offset) return nil, fmt.Errorf("couldn't get cached object data at offset %v", offset)
} }
// AddChunk adds a new chunk of a cached object // AddChunk adds a new chunk of a cached object

View File

@@ -1,3 +1,4 @@
//go:build !plan9 && !js
// +build !plan9,!js // +build !plan9,!js
package cache package cache
@@ -16,7 +17,6 @@ import (
"sync" "sync"
"time" "time"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/walk" "github.com/rclone/rclone/fs/walk"
bolt "go.etcd.io/bbolt" bolt "go.etcd.io/bbolt"
@@ -119,11 +119,11 @@ func (b *Persistent) connect() error {
err = os.MkdirAll(b.dataPath, os.ModePerm) err = os.MkdirAll(b.dataPath, os.ModePerm)
if err != nil { if err != nil {
return errors.Wrapf(err, "failed to create a data directory %q", b.dataPath) return fmt.Errorf("failed to create a data directory %q: %w", b.dataPath, err)
} }
b.db, err = bolt.Open(b.dbPath, 0644, &bolt.Options{Timeout: b.features.DbWaitTime}) b.db, err = bolt.Open(b.dbPath, 0644, &bolt.Options{Timeout: b.features.DbWaitTime})
if err != nil { if err != nil {
return errors.Wrapf(err, "failed to open a cache connection to %q", b.dbPath) return fmt.Errorf("failed to open a cache connection to %q: %w", b.dbPath, err)
} }
if b.features.PurgeDb { if b.features.PurgeDb {
b.Purge() b.Purge()
@@ -175,7 +175,7 @@ func (b *Persistent) GetDir(remote string) (*Directory, error) {
err := b.db.View(func(tx *bolt.Tx) error { err := b.db.View(func(tx *bolt.Tx) error {
bucket := b.getBucket(remote, false, tx) bucket := b.getBucket(remote, false, tx)
if bucket == nil { if bucket == nil {
return errors.Errorf("couldn't open bucket (%v)", remote) return fmt.Errorf("couldn't open bucket (%v)", remote)
} }
data := bucket.Get([]byte(".")) data := bucket.Get([]byte("."))
@@ -183,7 +183,7 @@ func (b *Persistent) GetDir(remote string) (*Directory, error) {
return json.Unmarshal(data, cd) return json.Unmarshal(data, cd)
} }
return errors.Errorf("%v not found", remote) return fmt.Errorf("%v not found", remote)
}) })
return cd, err return cd, err
@@ -208,7 +208,7 @@ func (b *Persistent) AddBatchDir(cachedDirs []*Directory) error {
bucket = b.getBucket(cachedDirs[0].Dir, true, tx) bucket = b.getBucket(cachedDirs[0].Dir, true, tx)
} }
if bucket == nil { if bucket == nil {
return errors.Errorf("couldn't open bucket (%v)", cachedDirs[0].Dir) return fmt.Errorf("couldn't open bucket (%v)", cachedDirs[0].Dir)
} }
for _, cachedDir := range cachedDirs { for _, cachedDir := range cachedDirs {
@@ -225,7 +225,7 @@ func (b *Persistent) AddBatchDir(cachedDirs []*Directory) error {
encoded, err := json.Marshal(cachedDir) encoded, err := json.Marshal(cachedDir)
if err != nil { if err != nil {
return errors.Errorf("couldn't marshal object (%v): %v", cachedDir, err) return fmt.Errorf("couldn't marshal object (%v): %v", cachedDir, err)
} }
err = b.Put([]byte("."), encoded) err = b.Put([]byte("."), encoded)
if err != nil { if err != nil {
@@ -243,17 +243,17 @@ func (b *Persistent) GetDirEntries(cachedDir *Directory) (fs.DirEntries, error)
err := b.db.View(func(tx *bolt.Tx) error { err := b.db.View(func(tx *bolt.Tx) error {
bucket := b.getBucket(cachedDir.abs(), false, tx) bucket := b.getBucket(cachedDir.abs(), false, tx)
if bucket == nil { if bucket == nil {
return errors.Errorf("couldn't open bucket (%v)", cachedDir.abs()) return fmt.Errorf("couldn't open bucket (%v)", cachedDir.abs())
} }
val := bucket.Get([]byte(".")) val := bucket.Get([]byte("."))
if val != nil { if val != nil {
err := json.Unmarshal(val, cachedDir) err := json.Unmarshal(val, cachedDir)
if err != nil { if err != nil {
return errors.Errorf("error during unmarshalling obj: %v", err) return fmt.Errorf("error during unmarshalling obj: %v", err)
} }
} else { } else {
return errors.Errorf("missing cached dir: %v", cachedDir) return fmt.Errorf("missing cached dir: %v", cachedDir)
} }
c := bucket.Cursor() c := bucket.Cursor()
@@ -268,7 +268,7 @@ func (b *Persistent) GetDirEntries(cachedDir *Directory) (fs.DirEntries, error)
// we try to find a cached meta for the dir // we try to find a cached meta for the dir
currentBucket := c.Bucket().Bucket(k) currentBucket := c.Bucket().Bucket(k)
if currentBucket == nil { if currentBucket == nil {
return errors.Errorf("couldn't open bucket (%v)", string(k)) return fmt.Errorf("couldn't open bucket (%v)", string(k))
} }
metaKey := currentBucket.Get([]byte(".")) metaKey := currentBucket.Get([]byte("."))
@@ -317,7 +317,7 @@ func (b *Persistent) RemoveDir(fp string) error {
err = b.db.Update(func(tx *bolt.Tx) error { err = b.db.Update(func(tx *bolt.Tx) error {
bucket := b.getBucket(cleanPath(parentDir), false, tx) bucket := b.getBucket(cleanPath(parentDir), false, tx)
if bucket == nil { if bucket == nil {
return errors.Errorf("couldn't open bucket (%v)", fp) return fmt.Errorf("couldn't open bucket (%v)", fp)
} }
// delete the cached dir // delete the cached dir
err := bucket.DeleteBucket([]byte(cleanPath(dirName))) err := bucket.DeleteBucket([]byte(cleanPath(dirName)))
@@ -377,13 +377,13 @@ func (b *Persistent) GetObject(cachedObject *Object) (err error) {
return b.db.View(func(tx *bolt.Tx) error { return b.db.View(func(tx *bolt.Tx) error {
bucket := b.getBucket(cachedObject.Dir, false, tx) bucket := b.getBucket(cachedObject.Dir, false, tx)
if bucket == nil { if bucket == nil {
return errors.Errorf("couldn't open parent bucket for %v", cachedObject.Dir) return fmt.Errorf("couldn't open parent bucket for %v", cachedObject.Dir)
} }
val := bucket.Get([]byte(cachedObject.Name)) val := bucket.Get([]byte(cachedObject.Name))
if val != nil { if val != nil {
return json.Unmarshal(val, cachedObject) return json.Unmarshal(val, cachedObject)
} }
return errors.Errorf("couldn't find object (%v)", cachedObject.Name) return fmt.Errorf("couldn't find object (%v)", cachedObject.Name)
}) })
} }
@@ -392,16 +392,16 @@ func (b *Persistent) AddObject(cachedObject *Object) error {
return b.db.Update(func(tx *bolt.Tx) error { return b.db.Update(func(tx *bolt.Tx) error {
bucket := b.getBucket(cachedObject.Dir, true, tx) bucket := b.getBucket(cachedObject.Dir, true, tx)
if bucket == nil { if bucket == nil {
return errors.Errorf("couldn't open parent bucket for %v", cachedObject) return fmt.Errorf("couldn't open parent bucket for %v", cachedObject)
} }
// cache Object Info // cache Object Info
encoded, err := json.Marshal(cachedObject) encoded, err := json.Marshal(cachedObject)
if err != nil { if err != nil {
return errors.Errorf("couldn't marshal object (%v) info: %v", cachedObject, err) return fmt.Errorf("couldn't marshal object (%v) info: %v", cachedObject, err)
} }
err = bucket.Put([]byte(cachedObject.Name), encoded) err = bucket.Put([]byte(cachedObject.Name), encoded)
if err != nil { if err != nil {
return errors.Errorf("couldn't cache object (%v) info: %v", cachedObject, err) return fmt.Errorf("couldn't cache object (%v) info: %v", cachedObject, err)
} }
return nil return nil
}) })
@@ -413,7 +413,7 @@ func (b *Persistent) RemoveObject(fp string) error {
return b.db.Update(func(tx *bolt.Tx) error { return b.db.Update(func(tx *bolt.Tx) error {
bucket := b.getBucket(cleanPath(parentDir), false, tx) bucket := b.getBucket(cleanPath(parentDir), false, tx)
if bucket == nil { if bucket == nil {
return errors.Errorf("couldn't open parent bucket for %v", cleanPath(parentDir)) return fmt.Errorf("couldn't open parent bucket for %v", cleanPath(parentDir))
} }
err := bucket.Delete([]byte(cleanPath(objName))) err := bucket.Delete([]byte(cleanPath(objName)))
if err != nil { if err != nil {
@@ -445,7 +445,7 @@ func (b *Persistent) HasEntry(remote string) bool {
err := b.db.View(func(tx *bolt.Tx) error { err := b.db.View(func(tx *bolt.Tx) error {
bucket := b.getBucket(dir, false, tx) bucket := b.getBucket(dir, false, tx)
if bucket == nil { if bucket == nil {
return errors.Errorf("couldn't open parent bucket for %v", remote) return fmt.Errorf("couldn't open parent bucket for %v", remote)
} }
if f := bucket.Bucket([]byte(name)); f != nil { if f := bucket.Bucket([]byte(name)); f != nil {
return nil return nil
@@ -454,7 +454,7 @@ func (b *Persistent) HasEntry(remote string) bool {
return nil return nil
} }
return errors.Errorf("couldn't find object (%v)", remote) return fmt.Errorf("couldn't find object (%v)", remote)
}) })
if err == nil { if err == nil {
return true return true
@@ -554,7 +554,7 @@ func (b *Persistent) CleanChunksBySize(maxSize int64) {
err := b.db.Update(func(tx *bolt.Tx) error { err := b.db.Update(func(tx *bolt.Tx) error {
dataTsBucket := tx.Bucket([]byte(DataTsBucket)) dataTsBucket := tx.Bucket([]byte(DataTsBucket))
if dataTsBucket == nil { if dataTsBucket == nil {
return errors.Errorf("Couldn't open (%v) bucket", DataTsBucket) return fmt.Errorf("Couldn't open (%v) bucket", DataTsBucket)
} }
// iterate through ts // iterate through ts
c := dataTsBucket.Cursor() c := dataTsBucket.Cursor()
@@ -732,7 +732,7 @@ func (b *Persistent) GetChunkTs(path string, offset int64) (time.Time, error) {
return nil return nil
} }
} }
return errors.Errorf("not found %v-%v", path, offset) return fmt.Errorf("not found %v-%v", path, offset)
}) })
return t, err return t, err
@@ -772,7 +772,7 @@ func (b *Persistent) addPendingUpload(destPath string, started bool) error {
return b.db.Update(func(tx *bolt.Tx) error { return b.db.Update(func(tx *bolt.Tx) error {
bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket)) bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket))
if err != nil { if err != nil {
return errors.Errorf("couldn't bucket for %v", tempBucket) return fmt.Errorf("couldn't bucket for %v", tempBucket)
} }
tempObj := &tempUploadInfo{ tempObj := &tempUploadInfo{
DestPath: destPath, DestPath: destPath,
@@ -783,11 +783,11 @@ func (b *Persistent) addPendingUpload(destPath string, started bool) error {
// cache Object Info // cache Object Info
encoded, err := json.Marshal(tempObj) encoded, err := json.Marshal(tempObj)
if err != nil { if err != nil {
return errors.Errorf("couldn't marshal object (%v) info: %v", destPath, err) return fmt.Errorf("couldn't marshal object (%v) info: %v", destPath, err)
} }
err = bucket.Put([]byte(destPath), encoded) err = bucket.Put([]byte(destPath), encoded)
if err != nil { if err != nil {
return errors.Errorf("couldn't cache object (%v) info: %v", destPath, err) return fmt.Errorf("couldn't cache object (%v) info: %v", destPath, err)
} }
return nil return nil
@@ -802,7 +802,7 @@ func (b *Persistent) getPendingUpload(inRoot string, waitTime time.Duration) (de
err = b.db.Update(func(tx *bolt.Tx) error { err = b.db.Update(func(tx *bolt.Tx) error {
bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket)) bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket))
if err != nil { if err != nil {
return errors.Errorf("couldn't bucket for %v", tempBucket) return fmt.Errorf("couldn't bucket for %v", tempBucket)
} }
c := bucket.Cursor() c := bucket.Cursor()
@@ -835,7 +835,7 @@ func (b *Persistent) getPendingUpload(inRoot string, waitTime time.Duration) (de
return nil return nil
} }
return errors.Errorf("no pending upload found") return fmt.Errorf("no pending upload found")
}) })
return destPath, err return destPath, err
@@ -846,14 +846,14 @@ func (b *Persistent) SearchPendingUpload(remote string) (started bool, err error
err = b.db.View(func(tx *bolt.Tx) error { err = b.db.View(func(tx *bolt.Tx) error {
bucket := tx.Bucket([]byte(tempBucket)) bucket := tx.Bucket([]byte(tempBucket))
if bucket == nil { if bucket == nil {
return errors.Errorf("couldn't bucket for %v", tempBucket) return fmt.Errorf("couldn't bucket for %v", tempBucket)
} }
var tempObj = &tempUploadInfo{} var tempObj = &tempUploadInfo{}
v := bucket.Get([]byte(remote)) v := bucket.Get([]byte(remote))
err = json.Unmarshal(v, tempObj) err = json.Unmarshal(v, tempObj)
if err != nil { if err != nil {
return errors.Errorf("pending upload (%v) not found %v", remote, err) return fmt.Errorf("pending upload (%v) not found %v", remote, err)
} }
started = tempObj.Started started = tempObj.Started
@@ -868,7 +868,7 @@ func (b *Persistent) searchPendingUploadFromDir(dir string) (remotes []string, e
err = b.db.View(func(tx *bolt.Tx) error { err = b.db.View(func(tx *bolt.Tx) error {
bucket := tx.Bucket([]byte(tempBucket)) bucket := tx.Bucket([]byte(tempBucket))
if bucket == nil { if bucket == nil {
return errors.Errorf("couldn't bucket for %v", tempBucket) return fmt.Errorf("couldn't bucket for %v", tempBucket)
} }
c := bucket.Cursor() c := bucket.Cursor()
@@ -898,22 +898,22 @@ func (b *Persistent) rollbackPendingUpload(remote string) error {
return b.db.Update(func(tx *bolt.Tx) error { return b.db.Update(func(tx *bolt.Tx) error {
bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket)) bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket))
if err != nil { if err != nil {
return errors.Errorf("couldn't bucket for %v", tempBucket) return fmt.Errorf("couldn't bucket for %v", tempBucket)
} }
var tempObj = &tempUploadInfo{} var tempObj = &tempUploadInfo{}
v := bucket.Get([]byte(remote)) v := bucket.Get([]byte(remote))
err = json.Unmarshal(v, tempObj) err = json.Unmarshal(v, tempObj)
if err != nil { if err != nil {
return errors.Errorf("pending upload (%v) not found %v", remote, err) return fmt.Errorf("pending upload (%v) not found %v", remote, err)
} }
tempObj.Started = false tempObj.Started = false
v2, err := json.Marshal(tempObj) v2, err := json.Marshal(tempObj)
if err != nil { if err != nil {
return errors.Errorf("pending upload not updated %v", err) return fmt.Errorf("pending upload not updated %v", err)
} }
err = bucket.Put([]byte(tempObj.DestPath), v2) err = bucket.Put([]byte(tempObj.DestPath), v2)
if err != nil { if err != nil {
return errors.Errorf("pending upload not updated %v", err) return fmt.Errorf("pending upload not updated %v", err)
} }
return nil return nil
}) })
@@ -926,7 +926,7 @@ func (b *Persistent) removePendingUpload(remote string) error {
return b.db.Update(func(tx *bolt.Tx) error { return b.db.Update(func(tx *bolt.Tx) error {
bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket)) bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket))
if err != nil { if err != nil {
return errors.Errorf("couldn't bucket for %v", tempBucket) return fmt.Errorf("couldn't bucket for %v", tempBucket)
} }
return bucket.Delete([]byte(remote)) return bucket.Delete([]byte(remote))
}) })
@@ -941,17 +941,17 @@ func (b *Persistent) updatePendingUpload(remote string, fn func(item *tempUpload
return b.db.Update(func(tx *bolt.Tx) error { return b.db.Update(func(tx *bolt.Tx) error {
bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket)) bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket))
if err != nil { if err != nil {
return errors.Errorf("couldn't bucket for %v", tempBucket) return fmt.Errorf("couldn't bucket for %v", tempBucket)
} }
var tempObj = &tempUploadInfo{} var tempObj = &tempUploadInfo{}
v := bucket.Get([]byte(remote)) v := bucket.Get([]byte(remote))
err = json.Unmarshal(v, tempObj) err = json.Unmarshal(v, tempObj)
if err != nil { if err != nil {
return errors.Errorf("pending upload (%v) not found %v", remote, err) return fmt.Errorf("pending upload (%v) not found %v", remote, err)
} }
if tempObj.Started { if tempObj.Started {
return errors.Errorf("pending upload already started %v", remote) return fmt.Errorf("pending upload already started %v", remote)
} }
err = fn(tempObj) err = fn(tempObj)
if err != nil { if err != nil {
@@ -969,11 +969,11 @@ func (b *Persistent) updatePendingUpload(remote string, fn func(item *tempUpload
} }
v2, err := json.Marshal(tempObj) v2, err := json.Marshal(tempObj)
if err != nil { if err != nil {
return errors.Errorf("pending upload not updated %v", err) return fmt.Errorf("pending upload not updated %v", err)
} }
err = bucket.Put([]byte(tempObj.DestPath), v2) err = bucket.Put([]byte(tempObj.DestPath), v2)
if err != nil { if err != nil {
return errors.Errorf("pending upload not updated %v", err) return fmt.Errorf("pending upload not updated %v", err)
} }
return nil return nil
@@ -1014,11 +1014,11 @@ func (b *Persistent) ReconcileTempUploads(ctx context.Context, cacheFs *Fs) erro
// cache Object Info // cache Object Info
encoded, err := json.Marshal(tempObj) encoded, err := json.Marshal(tempObj)
if err != nil { if err != nil {
return errors.Errorf("couldn't marshal object (%v) info: %v", queuedEntry, err) return fmt.Errorf("couldn't marshal object (%v) info: %v", queuedEntry, err)
} }
err = bucket.Put([]byte(destPath), encoded) err = bucket.Put([]byte(destPath), encoded)
if err != nil { if err != nil {
return errors.Errorf("couldn't cache object (%v) info: %v", destPath, err) return fmt.Errorf("couldn't cache object (%v) info: %v", destPath, err)
} }
fs.Debugf(cacheFs, "reconciled temporary upload: %v", destPath) fs.Debugf(cacheFs, "reconciled temporary upload: %v", destPath)
} }

View File

@@ -8,6 +8,7 @@ import (
"crypto/sha1" "crypto/sha1"
"encoding/hex" "encoding/hex"
"encoding/json" "encoding/json"
"errors"
"fmt" "fmt"
gohash "hash" gohash "hash"
"io" "io"
@@ -21,7 +22,6 @@ import (
"sync" "sync"
"time" "time"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting" "github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/cache" "github.com/rclone/rclone/fs/cache"
@@ -150,6 +150,7 @@ func init() {
Name: "remote", Name: "remote",
Required: true, Required: true,
Help: `Remote to chunk/unchunk. Help: `Remote to chunk/unchunk.
Normally should contain a ':' and a path, e.g. "myremote:path/to/dir", Normally should contain a ':' and a path, e.g. "myremote:path/to/dir",
"myremote:bucket" or maybe "myremote:" (not recommended).`, "myremote:bucket" or maybe "myremote:" (not recommended).`,
}, { }, {
@@ -163,6 +164,7 @@ Normally should contain a ':' and a path, e.g. "myremote:path/to/dir",
Hide: fs.OptionHideCommandLine, Hide: fs.OptionHideCommandLine,
Default: `*.rclone_chunk.###`, Default: `*.rclone_chunk.###`,
Help: `String format of chunk file names. Help: `String format of chunk file names.
The two placeholders are: base file name (*) and chunk number (#...). The two placeholders are: base file name (*) and chunk number (#...).
There must be one and only one asterisk and one or more consecutive hash characters. There must be one and only one asterisk and one or more consecutive hash characters.
If chunk number has less digits than the number of hashes, it is left-padded by zeros. If chunk number has less digits than the number of hashes, it is left-padded by zeros.
@@ -174,48 +176,57 @@ Possible chunk files are ignored if their name does not match given format.`,
Hide: fs.OptionHideCommandLine, Hide: fs.OptionHideCommandLine,
Default: 1, Default: 1,
Help: `Minimum valid chunk number. Usually 0 or 1. Help: `Minimum valid chunk number. Usually 0 or 1.
By default chunk numbers start from 1.`, By default chunk numbers start from 1.`,
}, { }, {
Name: "meta_format", Name: "meta_format",
Advanced: true, Advanced: true,
Hide: fs.OptionHideCommandLine, Hide: fs.OptionHideCommandLine,
Default: "simplejson", Default: "simplejson",
Help: `Format of the metadata object or "none". By default "simplejson". Help: `Format of the metadata object or "none".
By default "simplejson".
Metadata is a small JSON file named after the composite file.`, Metadata is a small JSON file named after the composite file.`,
Examples: []fs.OptionExample{{ Examples: []fs.OptionExample{{
Value: "none", Value: "none",
Help: `Do not use metadata files at all. Requires hash type "none".`, Help: `Do not use metadata files at all.
Requires hash type "none".`,
}, { }, {
Value: "simplejson", Value: "simplejson",
Help: `Simple JSON supports hash sums and chunk validation. Help: `Simple JSON supports hash sums and chunk validation.
It has the following fields: ver, size, nchunks, md5, sha1.`, It has the following fields: ver, size, nchunks, md5, sha1.`,
}}, }},
}, { }, {
Name: "hash_type", Name: "hash_type",
Advanced: false, Advanced: false,
Default: "md5", Default: "md5",
Help: `Choose how chunker handles hash sums. All modes but "none" require metadata.`, Help: `Choose how chunker handles hash sums.
All modes but "none" require metadata.`,
Examples: []fs.OptionExample{{ Examples: []fs.OptionExample{{
Value: "none", Value: "none",
Help: `Pass any hash supported by wrapped remote for non-chunked files, return nothing otherwise`, Help: `Pass any hash supported by wrapped remote for non-chunked files.
Return nothing otherwise.`,
}, { }, {
Value: "md5", Value: "md5",
Help: `MD5 for composite files`, Help: `MD5 for composite files.`,
}, { }, {
Value: "sha1", Value: "sha1",
Help: `SHA1 for composite files`, Help: `SHA1 for composite files.`,
}, { }, {
Value: "md5all", Value: "md5all",
Help: `MD5 for all files`, Help: `MD5 for all files.`,
}, { }, {
Value: "sha1all", Value: "sha1all",
Help: `SHA1 for all files`, Help: `SHA1 for all files.`,
}, { }, {
Value: "md5quick", Value: "md5quick",
Help: `Copying a file to chunker will request MD5 from the source falling back to SHA1 if unsupported`, Help: `Copying a file to chunker will request MD5 from the source.
Falling back to SHA1 if unsupported.`,
}, { }, {
Value: "sha1quick", Value: "sha1quick",
Help: `Similar to "md5quick" but prefers SHA1 over MD5`, Help: `Similar to "md5quick" but prefers SHA1 over MD5.`,
}}, }},
}, { }, {
Name: "fail_hard", Name: "fail_hard",
@@ -279,13 +290,13 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
baseName, basePath, err := fspath.SplitFs(remote) baseName, basePath, err := fspath.SplitFs(remote)
if err != nil { if err != nil {
return nil, errors.Wrapf(err, "failed to parse remote %q to wrap", remote) return nil, fmt.Errorf("failed to parse remote %q to wrap: %w", remote, err)
} }
// Look for a file first // Look for a file first
remotePath := fspath.JoinRootPath(basePath, rpath) remotePath := fspath.JoinRootPath(basePath, rpath)
baseFs, err := cache.Get(ctx, baseName+remotePath) baseFs, err := cache.Get(ctx, baseName+remotePath)
if err != fs.ErrorIsFile && err != nil { if err != fs.ErrorIsFile && err != nil {
return nil, errors.Wrapf(err, "failed to make remote %q to wrap", baseName+remotePath) return nil, fmt.Errorf("failed to make remote %q to wrap: %w", baseName+remotePath, err)
} }
if !operations.CanServerSideMove(baseFs) { if !operations.CanServerSideMove(baseFs) {
return nil, errors.New("can't use chunker on a backend which doesn't support server-side move or copy") return nil, errors.New("can't use chunker on a backend which doesn't support server-side move or copy")
@@ -375,7 +386,7 @@ type Fs struct {
// configure must be called only from NewFs or by unit tests. // configure must be called only from NewFs or by unit tests.
func (f *Fs) configure(nameFormat, metaFormat, hashType, transactionMode string) error { func (f *Fs) configure(nameFormat, metaFormat, hashType, transactionMode string) error {
if err := f.setChunkNameFormat(nameFormat); err != nil { if err := f.setChunkNameFormat(nameFormat); err != nil {
return errors.Wrapf(err, "invalid name format '%s'", nameFormat) return fmt.Errorf("invalid name format '%s': %w", nameFormat, err)
} }
if err := f.setMetaFormat(metaFormat); err != nil { if err := f.setMetaFormat(metaFormat); err != nil {
return err return err
@@ -432,10 +443,10 @@ func (f *Fs) setHashType(hashType string) error {
f.hashFallback = true f.hashFallback = true
case "md5all": case "md5all":
f.useMD5 = true f.useMD5 = true
f.hashAll = !f.base.Hashes().Contains(hash.MD5) f.hashAll = !f.base.Hashes().Contains(hash.MD5) || f.base.Features().SlowHash
case "sha1all": case "sha1all":
f.useSHA1 = true f.useSHA1 = true
f.hashAll = !f.base.Hashes().Contains(hash.SHA1) f.hashAll = !f.base.Hashes().Contains(hash.SHA1) || f.base.Features().SlowHash
default: default:
return fmt.Errorf("unsupported hash type '%s'", hashType) return fmt.Errorf("unsupported hash type '%s'", hashType)
} }
@@ -812,7 +823,7 @@ func (f *Fs) processEntries(ctx context.Context, origEntries fs.DirEntries, dirP
tempEntries = append(tempEntries, wrapDir) tempEntries = append(tempEntries, wrapDir)
default: default:
if f.opt.FailHard { if f.opt.FailHard {
return nil, fmt.Errorf("Unknown object type %T", entry) return nil, fmt.Errorf("unknown object type %T", entry)
} }
fs.Debugf(f, "unknown object type %T", entry) fs.Debugf(f, "unknown object type %T", entry)
} }
@@ -867,7 +878,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
// ignores non-chunked objects and skips chunk size checks. // ignores non-chunked objects and skips chunk size checks.
func (f *Fs) scanObject(ctx context.Context, remote string, quickScan bool) (fs.Object, error) { func (f *Fs) scanObject(ctx context.Context, remote string, quickScan bool) (fs.Object, error) {
if err := f.forbidChunk(false, remote); err != nil { if err := f.forbidChunk(false, remote); err != nil {
return nil, errors.Wrap(err, "can't access") return nil, fmt.Errorf("can't access: %w", err)
} }
var ( var (
@@ -916,7 +927,7 @@ func (f *Fs) scanObject(ctx context.Context, remote string, quickScan bool) (fs.
case fs.ErrorDirNotFound: case fs.ErrorDirNotFound:
entries = nil entries = nil
default: default:
return nil, errors.Wrap(err, "can't detect composite file") return nil, fmt.Errorf("can't detect composite file: %w", err)
} }
if f.useNoRename { if f.useNoRename {
@@ -1056,7 +1067,7 @@ func (o *Object) readMetadata(ctx context.Context) error {
case ErrMetaTooBig, ErrMetaUnknown: case ErrMetaTooBig, ErrMetaUnknown:
return err // return these errors unwrapped for unit tests return err // return these errors unwrapped for unit tests
default: default:
return errors.Wrap(err, "invalid metadata") return fmt.Errorf("invalid metadata: %w", err)
} }
if o.size != metaInfo.Size() || len(o.chunks) != metaInfo.nChunks { if o.size != metaInfo.Size() || len(o.chunks) != metaInfo.nChunks {
return errors.New("metadata doesn't match file size") return errors.New("metadata doesn't match file size")
@@ -1099,7 +1110,7 @@ func (o *Object) readXactID(ctx context.Context) (xactID string, err error) {
switch o.f.opt.MetaFormat { switch o.f.opt.MetaFormat {
case "simplejson": case "simplejson":
if data != nil && len(data) > maxMetadataSizeWritten { if len(data) > maxMetadataSizeWritten {
return "", nil // this was likely not a metadata object, return empty xactID but don't throw error return "", nil // this was likely not a metadata object, return empty xactID but don't throw error
} }
var metadata metaSimpleJSON var metadata metaSimpleJSON
@@ -1121,7 +1132,7 @@ func (f *Fs) put(
// Perform consistency checks // Perform consistency checks
if err := f.forbidChunk(src, remote); err != nil { if err := f.forbidChunk(src, remote); err != nil {
return nil, errors.Wrap(err, action+" refused") return nil, fmt.Errorf("%s refused: %w", action, err)
} }
if target == nil { if target == nil {
// Get target object with a quick directory scan // Get target object with a quick directory scan
@@ -1135,7 +1146,7 @@ func (f *Fs) put(
obj := target.(*Object) obj := target.(*Object)
if err := obj.readMetadata(ctx); err == ErrMetaUnknown { if err := obj.readMetadata(ctx); err == ErrMetaUnknown {
// refuse to update a file of unsupported format // refuse to update a file of unsupported format
return nil, errors.Wrap(err, "refusing to "+action) return nil, fmt.Errorf("refusing to %s: %w", action, err)
} }
} }
@@ -1214,7 +1225,7 @@ func (f *Fs) put(
// and skips the "EOF" read. Hence, switch to next limit here. // and skips the "EOF" read. Hence, switch to next limit here.
if !(c.chunkLimit == 0 || c.chunkLimit == c.chunkSize || c.sizeTotal == -1 || c.done) { if !(c.chunkLimit == 0 || c.chunkLimit == c.chunkSize || c.sizeTotal == -1 || c.done) {
silentlyRemove(ctx, chunk) silentlyRemove(ctx, chunk)
return nil, fmt.Errorf("Destination ignored %d data bytes", c.chunkLimit) return nil, fmt.Errorf("destination ignored %d data bytes", c.chunkLimit)
} }
c.chunkLimit = c.chunkSize c.chunkLimit = c.chunkSize
@@ -1223,7 +1234,7 @@ func (f *Fs) put(
// Validate uploaded size // Validate uploaded size
if c.sizeTotal != -1 && c.readCount != c.sizeTotal { if c.sizeTotal != -1 && c.readCount != c.sizeTotal {
return nil, fmt.Errorf("Incorrect upload size %d != %d", c.readCount, c.sizeTotal) return nil, fmt.Errorf("incorrect upload size %d != %d", c.readCount, c.sizeTotal)
} }
// Check for input that looks like valid metadata // Check for input that looks like valid metadata
@@ -1260,7 +1271,7 @@ func (f *Fs) put(
sizeTotal += chunk.Size() sizeTotal += chunk.Size()
} }
if sizeTotal != c.readCount { if sizeTotal != c.readCount {
return nil, fmt.Errorf("Incorrect chunks size %d != %d", sizeTotal, c.readCount) return nil, fmt.Errorf("incorrect chunks size %d != %d", sizeTotal, c.readCount)
} }
// If previous object was chunked, remove its chunks // If previous object was chunked, remove its chunks
@@ -1553,7 +1564,7 @@ func (f *Fs) Hashes() hash.Set {
// Shouldn't return an error if it already exists // Shouldn't return an error if it already exists
func (f *Fs) Mkdir(ctx context.Context, dir string) error { func (f *Fs) Mkdir(ctx context.Context, dir string) error {
if err := f.forbidChunk(dir, dir); err != nil { if err := f.forbidChunk(dir, dir); err != nil {
return errors.Wrap(err, "can't mkdir") return fmt.Errorf("can't mkdir: %w", err)
} }
return f.base.Mkdir(ctx, dir) return f.base.Mkdir(ctx, dir)
} }
@@ -1622,7 +1633,7 @@ func (o *Object) Remove(ctx context.Context) (err error) {
if err := o.f.forbidChunk(o, o.Remote()); err != nil { if err := o.f.forbidChunk(o, o.Remote()); err != nil {
// operations.Move can still call Remove if chunker's Move refuses // operations.Move can still call Remove if chunker's Move refuses
// to corrupt file in hard mode. Hence, refuse to Remove, too. // to corrupt file in hard mode. Hence, refuse to Remove, too.
return errors.Wrap(err, "refuse to corrupt") return fmt.Errorf("refuse to corrupt: %w", err)
} }
if err := o.readMetadata(ctx); err == ErrMetaUnknown { if err := o.readMetadata(ctx); err == ErrMetaUnknown {
// Proceed but warn user that unexpected things can happen. // Proceed but warn user that unexpected things can happen.
@@ -1650,12 +1661,12 @@ func (o *Object) Remove(ctx context.Context) (err error) {
// copyOrMove implements copy or move // copyOrMove implements copy or move
func (f *Fs) copyOrMove(ctx context.Context, o *Object, remote string, do copyMoveFn, md5, sha1, opName string) (fs.Object, error) { func (f *Fs) copyOrMove(ctx context.Context, o *Object, remote string, do copyMoveFn, md5, sha1, opName string) (fs.Object, error) {
if err := f.forbidChunk(o, remote); err != nil { if err := f.forbidChunk(o, remote); err != nil {
return nil, errors.Wrapf(err, "can't %s", opName) return nil, fmt.Errorf("can't %s: %w", opName, err)
} }
if err := o.readMetadata(ctx); err != nil { if err := o.readMetadata(ctx); err != nil {
// Refuse to copy/move composite files with invalid or future // Refuse to copy/move composite files with invalid or future
// metadata format which might involve unsupported chunk types. // metadata format which might involve unsupported chunk types.
return nil, errors.Wrapf(err, "can't %s this file", opName) return nil, fmt.Errorf("can't %s this file: %w", opName, err)
} }
if !o.isComposite() { if !o.isComposite() {
fs.Debugf(o, "%s non-chunked object...", opName) fs.Debugf(o, "%s non-chunked object...", opName)
@@ -2152,7 +2163,7 @@ func (o *Object) UnWrap() fs.Object {
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.ReadCloser, err error) { func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.ReadCloser, err error) {
if err := o.readMetadata(ctx); err != nil { if err := o.readMetadata(ctx); err != nil {
// refuse to open unsupported format // refuse to open unsupported format
return nil, errors.Wrap(err, "can't open") return nil, fmt.Errorf("can't open: %w", err)
} }
if !o.isComposite() { if !o.isComposite() {
return o.mainChunk().Open(ctx, options...) // chain to wrapped non-chunked file return o.mainChunk().Open(ctx, options...) // chain to wrapped non-chunked file
@@ -2440,7 +2451,7 @@ func marshalSimpleJSON(ctx context.Context, size int64, nChunks int, md5, sha1,
func unmarshalSimpleJSON(ctx context.Context, metaObject fs.Object, data []byte) (info *ObjectInfo, madeByChunker bool, err error) { func unmarshalSimpleJSON(ctx context.Context, metaObject fs.Object, data []byte) (info *ObjectInfo, madeByChunker bool, err error) {
// Be strict about JSON format // Be strict about JSON format
// to reduce possibility that a random small file resembles metadata. // to reduce possibility that a random small file resembles metadata.
if data != nil && len(data) > maxMetadataSizeWritten { if len(data) > maxMetadataSizeWritten {
return nil, false, ErrMetaTooBig return nil, false, ErrMetaTooBig
} }
if data == nil || len(data) < 2 || data[0] != '{' || data[len(data)-1] != '}' { if data == nil || len(data) < 2 || data[0] != '{' || data[len(data)-1] != '}' {

View File

@@ -12,6 +12,8 @@ import (
"testing" "testing"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/fspath"
"github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/object" "github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/fs/operations" "github.com/rclone/rclone/fs/operations"
@@ -38,6 +40,30 @@ func testPutLarge(t *testing.T, f *Fs, kilobytes int) {
}) })
} }
type settings map[string]interface{}
func deriveFs(ctx context.Context, t *testing.T, f fs.Fs, path string, opts settings) fs.Fs {
fsName := strings.Split(f.Name(), "{")[0] // strip off hash
configMap := configmap.Simple{}
for key, val := range opts {
configMap[key] = fmt.Sprintf("%v", val)
}
rpath := fspath.JoinRootPath(f.Root(), path)
remote := fmt.Sprintf("%s,%s:%s", fsName, configMap.String(), rpath)
fixFs, err := fs.NewFs(ctx, remote)
require.NoError(t, err)
return fixFs
}
var mtime1 = fstest.Time("2001-02-03T04:05:06.499999999Z")
func testPutFile(ctx context.Context, t *testing.T, f fs.Fs, name, contents, message string, check bool) fs.Object {
item := fstest.Item{Path: name, ModTime: mtime1}
_, obj := fstests.PutTestContents(ctx, t, f, &item, contents, check)
assert.NotNil(t, obj, message)
return obj
}
// test chunk name parser // test chunk name parser
func testChunkNameFormat(t *testing.T, f *Fs) { func testChunkNameFormat(t *testing.T, f *Fs) {
saveOpt := f.opt saveOpt := f.opt
@@ -617,22 +643,13 @@ func testMetadataInput(t *testing.T, f *Fs) {
}() }()
f.opt.FailHard = false f.opt.FailHard = false
modTime := fstest.Time("2001-02-03T04:05:06.499999999Z")
putFile := func(f fs.Fs, name, contents, message string, check bool) fs.Object {
item := fstest.Item{Path: name, ModTime: modTime}
_, obj := fstests.PutTestContents(ctx, t, f, &item, contents, check)
assert.NotNil(t, obj, message)
return obj
}
runSubtest := func(contents, name string) { runSubtest := func(contents, name string) {
description := fmt.Sprintf("file with %s metadata", name) description := fmt.Sprintf("file with %s metadata", name)
filename := path.Join(dir, name) filename := path.Join(dir, name)
require.True(t, len(contents) > 2 && len(contents) < minChunkForTest, description+" test data is correct") require.True(t, len(contents) > 2 && len(contents) < minChunkForTest, description+" test data is correct")
part := putFile(f.base, f.makeChunkName(filename, 0, "", ""), "oops", "", true) part := testPutFile(ctx, t, f.base, f.makeChunkName(filename, 0, "", ""), "oops", "", true)
_ = putFile(f, filename, contents, "upload "+description, false) _ = testPutFile(ctx, t, f, filename, contents, "upload "+description, false)
obj, err := f.NewObject(ctx, filename) obj, err := f.NewObject(ctx, filename)
assert.NoError(t, err, "access "+description) assert.NoError(t, err, "access "+description)
@@ -678,7 +695,7 @@ func testMetadataInput(t *testing.T, f *Fs) {
// Test that chunker refuses to change on objects with future/unknown metadata // Test that chunker refuses to change on objects with future/unknown metadata
func testFutureProof(t *testing.T, f *Fs) { func testFutureProof(t *testing.T, f *Fs) {
if f.opt.MetaFormat == "none" { if !f.useMeta {
t.Skip("this test requires metadata support") t.Skip("this test requires metadata support")
} }
@@ -844,6 +861,44 @@ func testChunkerServerSideMove(t *testing.T, f *Fs) {
_ = operations.Purge(ctx, f.base, dir) _ = operations.Purge(ctx, f.base, dir)
} }
// Test that md5all creates metadata even for small files
func testMD5AllSlow(t *testing.T, f *Fs) {
ctx := context.Background()
fsResult := deriveFs(ctx, t, f, "md5all", settings{
"chunk_size": "1P",
"name_format": "*.#",
"hash_type": "md5all",
"transactions": "rename",
"meta_format": "simplejson",
})
chunkFs, ok := fsResult.(*Fs)
require.True(t, ok, "fs must be a chunker remote")
baseFs := chunkFs.base
if !baseFs.Features().SlowHash {
t.Skipf("this test needs a base fs with slow hash, e.g. local")
}
assert.True(t, chunkFs.useMD5, "must use md5")
assert.True(t, chunkFs.hashAll, "must hash all files")
_ = testPutFile(ctx, t, chunkFs, "file", "-", "error", true)
obj, err := chunkFs.NewObject(ctx, "file")
require.NoError(t, err)
sum, err := obj.Hash(ctx, hash.MD5)
assert.NoError(t, err)
assert.Equal(t, "336d5ebc5436534e61d16e63ddfca327", sum)
list, err := baseFs.List(ctx, "")
require.NoError(t, err)
assert.Equal(t, 2, len(list))
_, err = baseFs.NewObject(ctx, "file")
assert.NoError(t, err, "metadata must be created")
_, err = baseFs.NewObject(ctx, "file.1")
assert.NoError(t, err, "first chunk must be created")
require.NoError(t, operations.Purge(ctx, baseFs, ""))
}
// InternalTest dispatches all internal tests // InternalTest dispatches all internal tests
func (f *Fs) InternalTest(t *testing.T) { func (f *Fs) InternalTest(t *testing.T) {
t.Run("PutLarge", func(t *testing.T) { t.Run("PutLarge", func(t *testing.T) {
@@ -876,6 +931,9 @@ func (f *Fs) InternalTest(t *testing.T) {
t.Run("ChunkerServerSideMove", func(t *testing.T) { t.Run("ChunkerServerSideMove", func(t *testing.T) {
testChunkerServerSideMove(t, f) testChunkerServerSideMove(t, f)
}) })
t.Run("MD5AllSlow", func(t *testing.T) {
testMD5AllSlow(t, f)
})
} }
var _ fstests.InternalTester = (*Fs)(nil) var _ fstests.InternalTester = (*Fs)(nil)

View File

@@ -44,6 +44,7 @@ func TestIntegration(t *testing.T) {
"UserInfo", "UserInfo",
"Disconnect", "Disconnect",
}, },
QuickTestOK: true,
} }
if *fstest.RemoteName == "" { if *fstest.RemoteName == "" {
name := "TestChunker" name := "TestChunker"

View File

@@ -10,6 +10,7 @@ import (
"encoding/binary" "encoding/binary"
"encoding/hex" "encoding/hex"
"encoding/json" "encoding/json"
"errors"
"fmt" "fmt"
"io" "io"
"io/ioutil" "io/ioutil"
@@ -21,7 +22,6 @@ import (
"github.com/buengese/sgzip" "github.com/buengese/sgzip"
"github.com/gabriel-vasile/mimetype" "github.com/gabriel-vasile/mimetype"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting" "github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/chunkedreader" "github.com/rclone/rclone/fs/chunkedreader"
@@ -83,23 +83,23 @@ func init() {
Name: "level", Name: "level",
Help: `GZIP compression level (-2 to 9). Help: `GZIP compression level (-2 to 9).
Generally -1 (default, equivalent to 5) is recommended. Generally -1 (default, equivalent to 5) is recommended.
Levels 1 to 9 increase compressiong at the cost of speed.. Going past 6 Levels 1 to 9 increase compression at the cost of speed. Going past 6
generally offers very little return. generally offers very little return.
Level -2 uses Huffmann encoding only. Only use if you now what you Level -2 uses Huffmann encoding only. Only use if you know what you
are doing are doing.
Level 0 turns off compression.`, Level 0 turns off compression.`,
Default: sgzip.DefaultCompression, Default: sgzip.DefaultCompression,
Advanced: true, Advanced: true,
}, { }, {
Name: "ram_cache_limit", Name: "ram_cache_limit",
Help: `Some remotes don't allow the upload of files with unknown size. Help: `Some remotes don't allow the upload of files with unknown size.
In this case the compressed file will need to be cached to determine In this case the compressed file will need to be cached to determine
it's size. it's size.
Files smaller than this limit will be cached in RAM, file larger than Files smaller than this limit will be cached in RAM, files larger than
this limit will be cached on disk`, this limit will be cached on disk.`,
Default: fs.SizeSuffix(20 * 1024 * 1024), Default: fs.SizeSuffix(20 * 1024 * 1024),
Advanced: true, Advanced: true,
}}, }},
@@ -143,7 +143,7 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
wInfo, wName, wPath, wConfig, err := fs.ConfigFs(remote) wInfo, wName, wPath, wConfig, err := fs.ConfigFs(remote)
if err != nil { if err != nil {
return nil, errors.Wrapf(err, "failed to parse remote %q to wrap", remote) return nil, fmt.Errorf("failed to parse remote %q to wrap: %w", remote, err)
} }
// Strip trailing slashes if they exist in rpath // Strip trailing slashes if they exist in rpath
@@ -158,7 +158,7 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
wrappedFs, err = wInfo.NewFs(ctx, wName, remotePath, wConfig) wrappedFs, err = wInfo.NewFs(ctx, wName, remotePath, wConfig)
} }
if err != nil && err != fs.ErrorIsFile { if err != nil && err != fs.ErrorIsFile {
return nil, errors.Wrapf(err, "failed to make remote %s:%q to wrap", wName, remotePath) return nil, fmt.Errorf("failed to make remote %s:%q to wrap: %w", wName, remotePath, err)
} }
// Create the wrapping fs // Create the wrapping fs
@@ -304,7 +304,7 @@ func (f *Fs) processEntries(entries fs.DirEntries) (newEntries fs.DirEntries, er
case fs.Directory: case fs.Directory:
f.addDir(&newEntries, x) f.addDir(&newEntries, x)
default: default:
return nil, errors.Errorf("Unknown object type %T", entry) return nil, fmt.Errorf("Unknown object type %T", entry)
} }
} }
return newEntries, nil return newEntries, nil
@@ -410,7 +410,7 @@ func (f *Fs) verifyObjectHash(ctx context.Context, o fs.Object, hasher *hash.Mul
srcHash := hasher.Sums()[ht] srcHash := hasher.Sums()[ht]
dstHash, err := o.Hash(ctx, ht) dstHash, err := o.Hash(ctx, ht)
if err != nil { if err != nil {
return errors.Wrap(err, "failed to read destination hash") return fmt.Errorf("failed to read destination hash: %w", err)
} }
if srcHash != "" && dstHash != "" && srcHash != dstHash { if srcHash != "" && dstHash != "" && srcHash != dstHash {
// remove object // remove object
@@ -418,7 +418,7 @@ func (f *Fs) verifyObjectHash(ctx context.Context, o fs.Object, hasher *hash.Mul
if err != nil { if err != nil {
fs.Errorf(o, "Failed to remove corrupted object: %v", err) fs.Errorf(o, "Failed to remove corrupted object: %v", err)
} }
return errors.Errorf("corrupted on transfer: %v compressed hashes differ %q vs %q", ht, srcHash, dstHash) return fmt.Errorf("corrupted on transfer: %v compressed hashes differ %q vs %q", ht, srcHash, dstHash)
} }
return nil return nil
} }
@@ -462,10 +462,10 @@ func (f *Fs) rcat(ctx context.Context, dstFileName string, in io.ReadCloser, mod
_ = os.Remove(tempFile.Name()) _ = os.Remove(tempFile.Name())
}() }()
if err != nil { if err != nil {
return nil, errors.Wrap(err, "Failed to create temporary local FS to spool file") return nil, fmt.Errorf("Failed to create temporary local FS to spool file: %w", err)
} }
if _, err = io.Copy(tempFile, in); err != nil { if _, err = io.Copy(tempFile, in); err != nil {
return nil, errors.Wrap(err, "Failed to write temporary local file") return nil, fmt.Errorf("Failed to write temporary local file: %w", err)
} }
if _, err = tempFile.Seek(0, 0); err != nil { if _, err = tempFile.Seek(0, 0); err != nil {
return nil, err return nil, err
@@ -714,7 +714,7 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
if found && (oldObj.(*Object).meta.Mode != Uncompressed || compressible) { if found && (oldObj.(*Object).meta.Mode != Uncompressed || compressible) {
err = oldObj.(*Object).Object.Remove(ctx) err = oldObj.(*Object).Object.Remove(ctx)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "Could remove original object") return nil, fmt.Errorf("Could remove original object: %w", err)
} }
} }
@@ -723,7 +723,7 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
if compressible { if compressible {
wrapObj, err := operations.Move(ctx, f.Fs, nil, f.dataName(src.Remote(), newObj.size, compressible), newObj.Object) wrapObj, err := operations.Move(ctx, f.Fs, nil, f.dataName(src.Remote(), newObj.size, compressible), newObj.Object)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "Couldn't rename streamed Object.") return nil, fmt.Errorf("Couldn't rename streamed Object.: %w", err)
} }
newObj.Object = wrapObj newObj.Object = wrapObj
} }
@@ -1260,7 +1260,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.Read
return o.Object.Open(ctx, options...) return o.Object.Open(ctx, options...)
} }
// Get offset and limit from OpenOptions, pass the rest to the underlying remote // Get offset and limit from OpenOptions, pass the rest to the underlying remote
var openOptions []fs.OpenOption = []fs.OpenOption{&fs.SeekOption{Offset: 0}} var openOptions = []fs.OpenOption{&fs.SeekOption{Offset: 0}}
var offset, limit int64 = 0, -1 var offset, limit int64 = 0, -1
for _, option := range options { for _, option := range options {
switch x := option.(type) { switch x := option.(type) {

View File

@@ -16,6 +16,9 @@ import (
// TestIntegration runs integration tests against the remote // TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) { func TestIntegration(t *testing.T) {
if *fstest.RemoteName == "" {
t.Skip("Skipping as -remote not set")
}
opt := fstests.Opt{ opt := fstests.Opt{
RemoteName: *fstest.RemoteName, RemoteName: *fstest.RemoteName,
NilObject: (*Object)(nil), NilObject: (*Object)(nil),
@@ -61,5 +64,6 @@ func TestRemoteGzip(t *testing.T) {
{Name: name, Key: "remote", Value: tempdir}, {Name: name, Key: "remote", Value: tempdir},
{Name: name, Key: "compression_mode", Value: "gzip"}, {Name: name, Key: "compression_mode", Value: "gzip"},
}, },
QuickTestOK: true,
}) })
} }

View File

@@ -7,6 +7,8 @@ import (
gocipher "crypto/cipher" gocipher "crypto/cipher"
"crypto/rand" "crypto/rand"
"encoding/base32" "encoding/base32"
"encoding/base64"
"errors"
"fmt" "fmt"
"io" "io"
"strconv" "strconv"
@@ -15,7 +17,7 @@ import (
"time" "time"
"unicode/utf8" "unicode/utf8"
"github.com/pkg/errors" "github.com/Max-Sum/base32768"
"github.com/rclone/rclone/backend/crypt/pkcs7" "github.com/rclone/rclone/backend/crypt/pkcs7"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting" "github.com/rclone/rclone/fs/accounting"
@@ -94,12 +96,12 @@ func NewNameEncryptionMode(s string) (mode NameEncryptionMode, err error) {
case "obfuscate": case "obfuscate":
mode = NameEncryptionObfuscated mode = NameEncryptionObfuscated
default: default:
err = errors.Errorf("Unknown file name encryption mode %q", s) err = fmt.Errorf("Unknown file name encryption mode %q", s)
} }
return mode, err return mode, err
} }
// String turns mode into a human readable string // String turns mode into a human-readable string
func (mode NameEncryptionMode) String() (out string) { func (mode NameEncryptionMode) String() (out string) {
switch mode { switch mode {
case NameEncryptionOff: case NameEncryptionOff:
@@ -114,6 +116,57 @@ func (mode NameEncryptionMode) String() (out string) {
return out return out
} }
// fileNameEncoding are the encoding methods dealing with encrypted file names
type fileNameEncoding interface {
EncodeToString(src []byte) string
DecodeString(s string) ([]byte, error)
}
// caseInsensitiveBase32Encoding defines a file name encoding
// using a modified version of standard base32 as described in
// RFC4648
//
// The standard encoding is modified in two ways
// * it becomes lower case (no-one likes upper case filenames!)
// * we strip the padding character `=`
type caseInsensitiveBase32Encoding struct{}
// EncodeToString encodes a strign using the modified version of
// base32 encoding.
func (caseInsensitiveBase32Encoding) EncodeToString(src []byte) string {
encoded := base32.HexEncoding.EncodeToString(src)
encoded = strings.TrimRight(encoded, "=")
return strings.ToLower(encoded)
}
// DecodeString decodes a string as encoded by EncodeToString
func (caseInsensitiveBase32Encoding) DecodeString(s string) ([]byte, error) {
if strings.HasSuffix(s, "=") {
return nil, ErrorBadBase32Encoding
}
// First figure out how many padding characters to add
roundUpToMultipleOf8 := (len(s) + 7) &^ 7
equals := roundUpToMultipleOf8 - len(s)
s = strings.ToUpper(s) + "========"[:equals]
return base32.HexEncoding.DecodeString(s)
}
// NewNameEncoding creates a NameEncoding from a string
func NewNameEncoding(s string) (enc fileNameEncoding, err error) {
s = strings.ToLower(s)
switch s {
case "base32":
enc = caseInsensitiveBase32Encoding{}
case "base64":
enc = base64.RawURLEncoding
case "base32768":
enc = base32768.SafeEncoding
default:
err = fmt.Errorf("Unknown file name encoding mode %q", s)
}
return enc, err
}
// Cipher defines an encoding and decoding cipher for the crypt backend // Cipher defines an encoding and decoding cipher for the crypt backend
type Cipher struct { type Cipher struct {
dataKey [32]byte // Key for secretbox dataKey [32]byte // Key for secretbox
@@ -121,15 +174,17 @@ type Cipher struct {
nameTweak [nameCipherBlockSize]byte // used to tweak the name crypto nameTweak [nameCipherBlockSize]byte // used to tweak the name crypto
block gocipher.Block block gocipher.Block
mode NameEncryptionMode mode NameEncryptionMode
fileNameEnc fileNameEncoding
buffers sync.Pool // encrypt/decrypt buffers buffers sync.Pool // encrypt/decrypt buffers
cryptoRand io.Reader // read crypto random numbers from here cryptoRand io.Reader // read crypto random numbers from here
dirNameEncrypt bool dirNameEncrypt bool
} }
// newCipher initialises the cipher. If salt is "" then it uses a built in salt val // newCipher initialises the cipher. If salt is "" then it uses a built in salt val
func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bool) (*Cipher, error) { func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bool, enc fileNameEncoding) (*Cipher, error) {
c := &Cipher{ c := &Cipher{
mode: mode, mode: mode,
fileNameEnc: enc,
cryptoRand: rand.Reader, cryptoRand: rand.Reader,
dirNameEncrypt: dirNameEncrypt, dirNameEncrypt: dirNameEncrypt,
} }
@@ -187,30 +242,6 @@ func (c *Cipher) putBlock(buf []byte) {
c.buffers.Put(buf) c.buffers.Put(buf)
} }
// encodeFileName encodes a filename using a modified version of
// standard base32 as described in RFC4648
//
// The standard encoding is modified in two ways
// * it becomes lower case (no-one likes upper case filenames!)
// * we strip the padding character `=`
func encodeFileName(in []byte) string {
encoded := base32.HexEncoding.EncodeToString(in)
encoded = strings.TrimRight(encoded, "=")
return strings.ToLower(encoded)
}
// decodeFileName decodes a filename as encoded by encodeFileName
func decodeFileName(in string) ([]byte, error) {
if strings.HasSuffix(in, "=") {
return nil, ErrorBadBase32Encoding
}
// First figure out how many padding characters to add
roundUpToMultipleOf8 := (len(in) + 7) &^ 7
equals := roundUpToMultipleOf8 - len(in)
in = strings.ToUpper(in) + "========"[:equals]
return base32.HexEncoding.DecodeString(in)
}
// encryptSegment encrypts a path segment // encryptSegment encrypts a path segment
// //
// This uses EME with AES // This uses EME with AES
@@ -231,7 +262,7 @@ func (c *Cipher) encryptSegment(plaintext string) string {
} }
paddedPlaintext := pkcs7.Pad(nameCipherBlockSize, []byte(plaintext)) paddedPlaintext := pkcs7.Pad(nameCipherBlockSize, []byte(plaintext))
ciphertext := eme.Transform(c.block, c.nameTweak[:], paddedPlaintext, eme.DirectionEncrypt) ciphertext := eme.Transform(c.block, c.nameTweak[:], paddedPlaintext, eme.DirectionEncrypt)
return encodeFileName(ciphertext) return c.fileNameEnc.EncodeToString(ciphertext)
} }
// decryptSegment decrypts a path segment // decryptSegment decrypts a path segment
@@ -239,7 +270,7 @@ func (c *Cipher) decryptSegment(ciphertext string) (string, error) {
if ciphertext == "" { if ciphertext == "" {
return "", nil return "", nil
} }
rawCiphertext, err := decodeFileName(ciphertext) rawCiphertext, err := c.fileNameEnc.DecodeString(ciphertext)
if err != nil { if err != nil {
return "", err return "", err
} }
@@ -580,7 +611,7 @@ func (n *nonce) pointer() *[fileNonceSize]byte {
func (n *nonce) fromReader(in io.Reader) error { func (n *nonce) fromReader(in io.Reader) error {
read, err := io.ReadFull(in, (*n)[:]) read, err := io.ReadFull(in, (*n)[:])
if read != fileNonceSize { if read != fileNonceSize {
return errors.Wrap(err, "short read of nonce") return fmt.Errorf("short read of nonce: %w", err)
} }
return nil return nil
} }
@@ -956,7 +987,7 @@ func (fh *decrypter) RangeSeek(ctx context.Context, offset int64, whence int, li
// Re-open the underlying object with the offset given // Re-open the underlying object with the offset given
rc, err := fh.open(ctx, underlyingOffset, underlyingLimit) rc, err := fh.open(ctx, underlyingOffset, underlyingLimit)
if err != nil { if err != nil {
return 0, fh.finish(errors.Wrap(err, "couldn't reopen file with offset and limit")) return 0, fh.finish(fmt.Errorf("couldn't reopen file with offset and limit: %w", err))
} }
// Set the file handle // Set the file handle

View File

@@ -4,13 +4,15 @@ import (
"bytes" "bytes"
"context" "context"
"encoding/base32" "encoding/base32"
"encoding/base64"
"errors"
"fmt" "fmt"
"io" "io"
"io/ioutil" "io/ioutil"
"strings" "strings"
"testing" "testing"
"github.com/pkg/errors" "github.com/Max-Sum/base32768"
"github.com/rclone/rclone/backend/crypt/pkcs7" "github.com/rclone/rclone/backend/crypt/pkcs7"
"github.com/rclone/rclone/lib/readers" "github.com/rclone/rclone/lib/readers"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
@@ -45,11 +47,31 @@ func TestNewNameEncryptionModeString(t *testing.T) {
assert.Equal(t, NameEncryptionMode(3).String(), "Unknown mode #3") assert.Equal(t, NameEncryptionMode(3).String(), "Unknown mode #3")
} }
func TestEncodeFileName(t *testing.T) { type EncodingTestCase struct {
for _, test := range []struct { in string
in string expected string
expected string }
}{
func testEncodeFileName(t *testing.T, encoding string, testCases []EncodingTestCase, caseInsensitive bool) {
for _, test := range testCases {
enc, err := NewNameEncoding(encoding)
assert.NoError(t, err, "There should be no error creating name encoder for base32.")
actual := enc.EncodeToString([]byte(test.in))
assert.Equal(t, actual, test.expected, fmt.Sprintf("in=%q", test.in))
recovered, err := enc.DecodeString(test.expected)
assert.NoError(t, err)
assert.Equal(t, string(recovered), test.in, fmt.Sprintf("reverse=%q", test.expected))
if caseInsensitive {
in := strings.ToUpper(test.expected)
recovered, err = enc.DecodeString(in)
assert.NoError(t, err)
assert.Equal(t, string(recovered), test.in, fmt.Sprintf("reverse=%q", in))
}
}
}
func TestEncodeFileNameBase32(t *testing.T) {
testEncodeFileName(t, "base32", []EncodingTestCase{
{"", ""}, {"", ""},
{"1", "64"}, {"1", "64"},
{"12", "64p0"}, {"12", "64p0"},
@@ -67,20 +89,56 @@ func TestEncodeFileName(t *testing.T) {
{"12345678901234", "64p36d1l6orjge9g64p36d0"}, {"12345678901234", "64p36d1l6orjge9g64p36d0"},
{"123456789012345", "64p36d1l6orjge9g64p36d1l"}, {"123456789012345", "64p36d1l6orjge9g64p36d1l"},
{"1234567890123456", "64p36d1l6orjge9g64p36d1l6o"}, {"1234567890123456", "64p36d1l6orjge9g64p36d1l6o"},
} { }, true)
actual := encodeFileName([]byte(test.in))
assert.Equal(t, actual, test.expected, fmt.Sprintf("in=%q", test.in))
recovered, err := decodeFileName(test.expected)
assert.NoError(t, err)
assert.Equal(t, string(recovered), test.in, fmt.Sprintf("reverse=%q", test.expected))
in := strings.ToUpper(test.expected)
recovered, err = decodeFileName(in)
assert.NoError(t, err)
assert.Equal(t, string(recovered), test.in, fmt.Sprintf("reverse=%q", in))
}
} }
func TestDecodeFileName(t *testing.T) { func TestEncodeFileNameBase64(t *testing.T) {
testEncodeFileName(t, "base64", []EncodingTestCase{
{"", ""},
{"1", "MQ"},
{"12", "MTI"},
{"123", "MTIz"},
{"1234", "MTIzNA"},
{"12345", "MTIzNDU"},
{"123456", "MTIzNDU2"},
{"1234567", "MTIzNDU2Nw"},
{"12345678", "MTIzNDU2Nzg"},
{"123456789", "MTIzNDU2Nzg5"},
{"1234567890", "MTIzNDU2Nzg5MA"},
{"12345678901", "MTIzNDU2Nzg5MDE"},
{"123456789012", "MTIzNDU2Nzg5MDEy"},
{"1234567890123", "MTIzNDU2Nzg5MDEyMw"},
{"12345678901234", "MTIzNDU2Nzg5MDEyMzQ"},
{"123456789012345", "MTIzNDU2Nzg5MDEyMzQ1"},
{"1234567890123456", "MTIzNDU2Nzg5MDEyMzQ1Ng"},
}, false)
}
func TestEncodeFileNameBase32768(t *testing.T) {
testEncodeFileName(t, "base32768", []EncodingTestCase{
{"", ""},
{"1", "㼿"},
{"12", "㻙ɟ"},
{"123", "㻙ⲿ"},
{"1234", "㻙ⲍƟ"},
{"12345", "㻙ⲍ⍟"},
{"123456", "㻙ⲍ⍆ʏ"},
{"1234567", "㻙ⲍ⍆觟"},
{"12345678", "㻙ⲍ⍆觓ɧ"},
{"123456789", "㻙ⲍ⍆觓栯"},
{"1234567890", "㻙ⲍ⍆觓栩ɣ"},
{"12345678901", "㻙ⲍ⍆觓栩朧"},
{"123456789012", "㻙ⲍ⍆觓栩朤ʅ"},
{"1234567890123", "㻙ⲍ⍆觓栩朤談"},
{"12345678901234", "㻙ⲍ⍆觓栩朤諆ɔ"},
{"123456789012345", "㻙ⲍ⍆觓栩朤諆媕"},
{"1234567890123456", "㻙ⲍ⍆觓栩朤諆媕䆿"},
}, false)
}
func TestDecodeFileNameBase32(t *testing.T) {
enc, err := NewNameEncoding("base32")
assert.NoError(t, err, "There should be no error creating name encoder for base32.")
// We've tested decoding the valid ones above, now concentrate on the invalid ones // We've tested decoding the valid ones above, now concentrate on the invalid ones
for _, test := range []struct { for _, test := range []struct {
in string in string
@@ -90,17 +148,65 @@ func TestDecodeFileName(t *testing.T) {
{"!", base32.CorruptInputError(0)}, {"!", base32.CorruptInputError(0)},
{"hello=hello", base32.CorruptInputError(5)}, {"hello=hello", base32.CorruptInputError(5)},
} { } {
actual, actualErr := decodeFileName(test.in) actual, actualErr := enc.DecodeString(test.in)
assert.Equal(t, test.expectedErr, actualErr, fmt.Sprintf("in=%q got actual=%q, err = %v %T", test.in, actual, actualErr, actualErr)) assert.Equal(t, test.expectedErr, actualErr, fmt.Sprintf("in=%q got actual=%q, err = %v %T", test.in, actual, actualErr, actualErr))
} }
} }
func TestEncryptSegment(t *testing.T) { func TestDecodeFileNameBase64(t *testing.T) {
c, _ := newCipher(NameEncryptionStandard, "", "", true) enc, err := NewNameEncoding("base64")
assert.NoError(t, err, "There should be no error creating name encoder for base32.")
// We've tested decoding the valid ones above, now concentrate on the invalid ones
for _, test := range []struct { for _, test := range []struct {
in string in string
expected string expectedErr error
}{ }{
{"64=", base64.CorruptInputError(2)},
{"!", base64.CorruptInputError(0)},
{"Hello=Hello", base64.CorruptInputError(5)},
} {
actual, actualErr := enc.DecodeString(test.in)
assert.Equal(t, test.expectedErr, actualErr, fmt.Sprintf("in=%q got actual=%q, err = %v %T", test.in, actual, actualErr, actualErr))
}
}
func TestDecodeFileNameBase32768(t *testing.T) {
enc, err := NewNameEncoding("base32768")
assert.NoError(t, err, "There should be no error creating name encoder for base32.")
// We've tested decoding the valid ones above, now concentrate on the invalid ones
for _, test := range []struct {
in string
expectedErr error
}{
{"㼿c", base32768.CorruptInputError(1)},
{"!", base32768.CorruptInputError(0)},
{"㻙ⲿ=㻙ⲿ", base32768.CorruptInputError(2)},
} {
actual, actualErr := enc.DecodeString(test.in)
assert.Equal(t, test.expectedErr, actualErr, fmt.Sprintf("in=%q got actual=%q, err = %v %T", test.in, actual, actualErr, actualErr))
}
}
func testEncryptSegment(t *testing.T, encoding string, testCases []EncodingTestCase, caseInsensitive bool) {
enc, _ := NewNameEncoding(encoding)
c, _ := newCipher(NameEncryptionStandard, "", "", true, enc)
for _, test := range testCases {
actual := c.encryptSegment(test.in)
assert.Equal(t, test.expected, actual, fmt.Sprintf("Testing %q", test.in))
recovered, err := c.decryptSegment(test.expected)
assert.NoError(t, err, fmt.Sprintf("Testing reverse %q", test.expected))
assert.Equal(t, test.in, recovered, fmt.Sprintf("Testing reverse %q", test.expected))
if caseInsensitive {
in := strings.ToUpper(test.expected)
recovered, err = c.decryptSegment(in)
assert.NoError(t, err, fmt.Sprintf("Testing reverse %q", in))
assert.Equal(t, test.in, recovered, fmt.Sprintf("Testing reverse %q", in))
}
}
}
func TestEncryptSegmentBase32(t *testing.T) {
testEncryptSegment(t, "base32", []EncodingTestCase{
{"", ""}, {"", ""},
{"1", "p0e52nreeaj0a5ea7s64m4j72s"}, {"1", "p0e52nreeaj0a5ea7s64m4j72s"},
{"12", "l42g6771hnv3an9cgc8cr2n1ng"}, {"12", "l42g6771hnv3an9cgc8cr2n1ng"},
@@ -118,26 +224,61 @@ func TestEncryptSegment(t *testing.T) {
{"12345678901234", "moq0uqdlqrblrc5pa5u5c7hq9g"}, {"12345678901234", "moq0uqdlqrblrc5pa5u5c7hq9g"},
{"123456789012345", "eeam3li4rnommi3a762h5n7meg"}, {"123456789012345", "eeam3li4rnommi3a762h5n7meg"},
{"1234567890123456", "mijbj0frqf6ms7frcr6bd9h0env53jv96pjaaoirk7forcgpt70g"}, {"1234567890123456", "mijbj0frqf6ms7frcr6bd9h0env53jv96pjaaoirk7forcgpt70g"},
} { }, true)
actual := c.encryptSegment(test.in)
assert.Equal(t, test.expected, actual, fmt.Sprintf("Testing %q", test.in))
recovered, err := c.decryptSegment(test.expected)
assert.NoError(t, err, fmt.Sprintf("Testing reverse %q", test.expected))
assert.Equal(t, test.in, recovered, fmt.Sprintf("Testing reverse %q", test.expected))
in := strings.ToUpper(test.expected)
recovered, err = c.decryptSegment(in)
assert.NoError(t, err, fmt.Sprintf("Testing reverse %q", in))
assert.Equal(t, test.in, recovered, fmt.Sprintf("Testing reverse %q", in))
}
} }
func TestDecryptSegment(t *testing.T) { func TestEncryptSegmentBase64(t *testing.T) {
testEncryptSegment(t, "base64", []EncodingTestCase{
{"", ""},
{"1", "yBxRX25ypgUVyj8MSxJnFw"},
{"12", "qQUDHOGN_jVdLIMQzYrhvA"},
{"123", "1CxFf2Mti1xIPYlGruDh-A"},
{"1234", "RL-xOTmsxsG7kuTy2XJUxw"},
{"12345", "3FP_GHoeBJdq0yLgaED8IQ"},
{"123456", "Xc4T1Gqrs3OVYnrE6dpEWQ"},
{"1234567", "uZeEzssOnDWHEOzLqjwpog"},
{"12345678", "8noiTP5WkkbEuijsPhOpxQ"},
{"123456789", "GeNxgLA0wiaGAKU3U7qL4Q"},
{"1234567890", "x1DUhdmqoVWYVBLD3dha-A"},
{"12345678901", "iEyP_3BZR6vvv_2WM6NbZw"},
{"123456789012", "4OPGvS4SZdjvS568APUaFw"},
{"1234567890123", "Y8c5Wr8OhYYUo7fPwdojdg"},
{"12345678901234", "tjQPabXW112wuVF8Vh46TA"},
{"123456789012345", "c5Vh1kTd8WtIajmFEtz2dA"},
{"1234567890123456", "tKa5gfvTzW4d-2bMtqYgdf5Rz-k2ZqViW6HfjbIZ6cE"},
}, false)
}
func TestEncryptSegmentBase32768(t *testing.T) {
testEncryptSegment(t, "base32768", []EncodingTestCase{
{"", ""},
{"1", "詮㪗鐮僀伎作㻖㢧⪟"},
{"12", "竢朧䉱虃光塬䟛⣡蓟"},
{"123", "遶㞟鋅缕袡鲅ⵝ蝁ꌟ"},
{"1234", "䢟銮䵵狌㐜燳谒颴詟"},
{"12345", "钉Ꞇ㖃蚩憶狫朰杜㜿"},
{"123456", "啇ᚵⵕ憗䋫➫➓肤卟"},
{"1234567", "茫螓翁連劘樓㶔抉矟"},
{"12345678", "龝☳䘊辄岅較络㧩襟"},
{"123456789", "ⲱ苀㱆犂媐Ꮤ锇惫靟"},
{"1234567890", "計宁憕偵匢皫╛纺ꌟ"},
{"12345678901", "檆䨿鑫㪺藝ꡖ勇䦛婟"},
{"123456789012", "雑頏䰂䲝淚哚鹡魺⪟"},
{"1234567890123", "塃璶繁躸圅㔟䗃肃懟"},
{"12345678901234", "腺ᕚ崚鏕鏥讥鼌䑺䲿"},
{"123456789012345", "怪绕滻蕶肣但⠥荖惟"},
{"1234567890123456", "肳哀旚挶靏鏻㾭䱠慟㪳ꏆ賊兲铧敻塹魀ʟ"},
}, false)
}
func TestDecryptSegmentBase32(t *testing.T) {
// We've tested the forwards above, now concentrate on the errors // We've tested the forwards above, now concentrate on the errors
longName := make([]byte, 3328) longName := make([]byte, 3328)
for i := range longName { for i := range longName {
longName[i] = 'a' longName[i] = 'a'
} }
c, _ := newCipher(NameEncryptionStandard, "", "", true) enc, _ := NewNameEncoding("base32")
c, _ := newCipher(NameEncryptionStandard, "", "", true, enc)
for _, test := range []struct { for _, test := range []struct {
in string in string
expectedErr error expectedErr error
@@ -145,118 +286,371 @@ func TestDecryptSegment(t *testing.T) {
{"64=", ErrorBadBase32Encoding}, {"64=", ErrorBadBase32Encoding},
{"!", base32.CorruptInputError(0)}, {"!", base32.CorruptInputError(0)},
{string(longName), ErrorTooLongAfterDecode}, {string(longName), ErrorTooLongAfterDecode},
{encodeFileName([]byte("a")), ErrorNotAMultipleOfBlocksize}, {enc.EncodeToString([]byte("a")), ErrorNotAMultipleOfBlocksize},
{encodeFileName([]byte("123456789abcdef")), ErrorNotAMultipleOfBlocksize}, {enc.EncodeToString([]byte("123456789abcdef")), ErrorNotAMultipleOfBlocksize},
{encodeFileName([]byte("123456789abcdef0")), pkcs7.ErrorPaddingTooLong}, {enc.EncodeToString([]byte("123456789abcdef0")), pkcs7.ErrorPaddingTooLong},
} { } {
actual, actualErr := c.decryptSegment(test.in) actual, actualErr := c.decryptSegment(test.in)
assert.Equal(t, test.expectedErr, actualErr, fmt.Sprintf("in=%q got actual=%q, err = %v %T", test.in, actual, actualErr, actualErr)) assert.Equal(t, test.expectedErr, actualErr, fmt.Sprintf("in=%q got actual=%q, err = %v %T", test.in, actual, actualErr, actualErr))
} }
} }
func TestEncryptFileName(t *testing.T) { func TestDecryptSegmentBase64(t *testing.T) {
// We've tested the forwards above, now concentrate on the errors
longName := make([]byte, 2816)
for i := range longName {
longName[i] = 'a'
}
enc, _ := NewNameEncoding("base64")
c, _ := newCipher(NameEncryptionStandard, "", "", true, enc)
for _, test := range []struct {
in string
expectedErr error
}{
{"6H=", base64.CorruptInputError(2)},
{"!", base64.CorruptInputError(0)},
{string(longName), ErrorTooLongAfterDecode},
{enc.EncodeToString([]byte("a")), ErrorNotAMultipleOfBlocksize},
{enc.EncodeToString([]byte("123456789abcdef")), ErrorNotAMultipleOfBlocksize},
{enc.EncodeToString([]byte("123456789abcdef0")), pkcs7.ErrorPaddingTooLong},
} {
actual, actualErr := c.decryptSegment(test.in)
assert.Equal(t, test.expectedErr, actualErr, fmt.Sprintf("in=%q got actual=%q, err = %v %T", test.in, actual, actualErr, actualErr))
}
}
func TestDecryptSegmentBase32768(t *testing.T) {
// We've tested the forwards above, now concentrate on the errors
longName := strings.Repeat("怪", 1280)
enc, _ := NewNameEncoding("base32768")
c, _ := newCipher(NameEncryptionStandard, "", "", true, enc)
for _, test := range []struct {
in string
expectedErr error
}{
{"怪=", base32768.CorruptInputError(1)},
{"!", base32768.CorruptInputError(0)},
{longName, ErrorTooLongAfterDecode},
{enc.EncodeToString([]byte("a")), ErrorNotAMultipleOfBlocksize},
{enc.EncodeToString([]byte("123456789abcdef")), ErrorNotAMultipleOfBlocksize},
{enc.EncodeToString([]byte("123456789abcdef0")), pkcs7.ErrorPaddingTooLong},
} {
actual, actualErr := c.decryptSegment(test.in)
assert.Equal(t, test.expectedErr, actualErr, fmt.Sprintf("in=%q got actual=%q, err = %v %T", test.in, actual, actualErr, actualErr))
}
}
func testStandardEncryptFileName(t *testing.T, encoding string, testCasesEncryptDir []EncodingTestCase, testCasesNoEncryptDir []EncodingTestCase) {
// First standard mode // First standard mode
c, _ := newCipher(NameEncryptionStandard, "", "", true) enc, _ := NewNameEncoding(encoding)
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s", c.EncryptFileName("1")) c, _ := newCipher(NameEncryptionStandard, "", "", true, enc)
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng", c.EncryptFileName("1/12")) for _, test := range testCasesEncryptDir {
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng/qgm4avr35m5loi1th53ato71v0", c.EncryptFileName("1/12/123")) assert.Equal(t, test.expected, c.EncryptFileName(test.in))
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s-v2001-02-03-040506-123", c.EncryptFileName("1-v2001-02-03-040506-123")) }
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng-v2001-02-03-040506-123", c.EncryptFileName("1/12-v2001-02-03-040506-123"))
// Standard mode with directory name encryption off // Standard mode with directory name encryption off
c, _ = newCipher(NameEncryptionStandard, "", "", false) c, _ = newCipher(NameEncryptionStandard, "", "", false, enc)
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s", c.EncryptFileName("1")) for _, test := range testCasesNoEncryptDir {
assert.Equal(t, "1/l42g6771hnv3an9cgc8cr2n1ng", c.EncryptFileName("1/12")) assert.Equal(t, test.expected, c.EncryptFileName(test.in))
assert.Equal(t, "1/12/qgm4avr35m5loi1th53ato71v0", c.EncryptFileName("1/12/123")) }
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s-v2001-02-03-040506-123", c.EncryptFileName("1-v2001-02-03-040506-123")) }
assert.Equal(t, "1/l42g6771hnv3an9cgc8cr2n1ng-v2001-02-03-040506-123", c.EncryptFileName("1/12-v2001-02-03-040506-123"))
// Now off mode func TestStandardEncryptFileNameBase32(t *testing.T) {
c, _ = newCipher(NameEncryptionOff, "", "", true) testStandardEncryptFileName(t, "base32", []EncodingTestCase{
{"1", "p0e52nreeaj0a5ea7s64m4j72s"},
{"1/12", "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng"},
{"1/12/123", "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng/qgm4avr35m5loi1th53ato71v0"},
{"1-v2001-02-03-040506-123", "p0e52nreeaj0a5ea7s64m4j72s-v2001-02-03-040506-123"},
{"1/12-v2001-02-03-040506-123", "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng-v2001-02-03-040506-123"},
}, []EncodingTestCase{
{"1", "p0e52nreeaj0a5ea7s64m4j72s"},
{"1/12", "1/l42g6771hnv3an9cgc8cr2n1ng"},
{"1/12/123", "1/12/qgm4avr35m5loi1th53ato71v0"},
{"1-v2001-02-03-040506-123", "p0e52nreeaj0a5ea7s64m4j72s-v2001-02-03-040506-123"},
{"1/12-v2001-02-03-040506-123", "1/l42g6771hnv3an9cgc8cr2n1ng-v2001-02-03-040506-123"},
})
}
func TestStandardEncryptFileNameBase64(t *testing.T) {
testStandardEncryptFileName(t, "base64", []EncodingTestCase{
{"1", "yBxRX25ypgUVyj8MSxJnFw"},
{"1/12", "yBxRX25ypgUVyj8MSxJnFw/qQUDHOGN_jVdLIMQzYrhvA"},
{"1/12/123", "yBxRX25ypgUVyj8MSxJnFw/qQUDHOGN_jVdLIMQzYrhvA/1CxFf2Mti1xIPYlGruDh-A"},
{"1-v2001-02-03-040506-123", "yBxRX25ypgUVyj8MSxJnFw-v2001-02-03-040506-123"},
{"1/12-v2001-02-03-040506-123", "yBxRX25ypgUVyj8MSxJnFw/qQUDHOGN_jVdLIMQzYrhvA-v2001-02-03-040506-123"},
}, []EncodingTestCase{
{"1", "yBxRX25ypgUVyj8MSxJnFw"},
{"1/12", "1/qQUDHOGN_jVdLIMQzYrhvA"},
{"1/12/123", "1/12/1CxFf2Mti1xIPYlGruDh-A"},
{"1-v2001-02-03-040506-123", "yBxRX25ypgUVyj8MSxJnFw-v2001-02-03-040506-123"},
{"1/12-v2001-02-03-040506-123", "1/qQUDHOGN_jVdLIMQzYrhvA-v2001-02-03-040506-123"},
})
}
func TestStandardEncryptFileNameBase32768(t *testing.T) {
testStandardEncryptFileName(t, "base32768", []EncodingTestCase{
{"1", "詮㪗鐮僀伎作㻖㢧⪟"},
{"1/12", "詮㪗鐮僀伎作㻖㢧⪟/竢朧䉱虃光塬䟛⣡蓟"},
{"1/12/123", "詮㪗鐮僀伎作㻖㢧⪟/竢朧䉱虃光塬䟛⣡蓟/遶㞟鋅缕袡鲅ⵝ蝁ꌟ"},
{"1-v2001-02-03-040506-123", "詮㪗鐮僀伎作㻖㢧⪟-v2001-02-03-040506-123"},
{"1/12-v2001-02-03-040506-123", "詮㪗鐮僀伎作㻖㢧⪟/竢朧䉱虃光塬䟛⣡蓟-v2001-02-03-040506-123"},
}, []EncodingTestCase{
{"1", "詮㪗鐮僀伎作㻖㢧⪟"},
{"1/12", "1/竢朧䉱虃光塬䟛⣡蓟"},
{"1/12/123", "1/12/遶㞟鋅缕袡鲅ⵝ蝁ꌟ"},
{"1-v2001-02-03-040506-123", "詮㪗鐮僀伎作㻖㢧⪟-v2001-02-03-040506-123"},
{"1/12-v2001-02-03-040506-123", "1/竢朧䉱虃光塬䟛⣡蓟-v2001-02-03-040506-123"},
})
}
func TestNonStandardEncryptFileName(t *testing.T) {
// Off mode
c, _ := newCipher(NameEncryptionOff, "", "", true, nil)
assert.Equal(t, "1/12/123.bin", c.EncryptFileName("1/12/123")) assert.Equal(t, "1/12/123.bin", c.EncryptFileName("1/12/123"))
// Obfuscation mode // Obfuscation mode
c, _ = newCipher(NameEncryptionObfuscated, "", "", true) c, _ = newCipher(NameEncryptionObfuscated, "", "", true, nil)
assert.Equal(t, "49.6/99.23/150.890/53.!!lipps", c.EncryptFileName("1/12/123/!hello")) assert.Equal(t, "49.6/99.23/150.890/53.!!lipps", c.EncryptFileName("1/12/123/!hello"))
assert.Equal(t, "49.6/99.23/150.890/53-v2001-02-03-040506-123.!!lipps", c.EncryptFileName("1/12/123/!hello-v2001-02-03-040506-123")) assert.Equal(t, "49.6/99.23/150.890/53-v2001-02-03-040506-123.!!lipps", c.EncryptFileName("1/12/123/!hello-v2001-02-03-040506-123"))
assert.Equal(t, "49.6/99.23/150.890/162.uryyB-v2001-02-03-040506-123.GKG", c.EncryptFileName("1/12/123/hello-v2001-02-03-040506-123.txt")) assert.Equal(t, "49.6/99.23/150.890/162.uryyB-v2001-02-03-040506-123.GKG", c.EncryptFileName("1/12/123/hello-v2001-02-03-040506-123.txt"))
assert.Equal(t, "161.\u00e4", c.EncryptFileName("\u00a1")) assert.Equal(t, "161.\u00e4", c.EncryptFileName("\u00a1"))
assert.Equal(t, "160.\u03c2", c.EncryptFileName("\u03a0")) assert.Equal(t, "160.\u03c2", c.EncryptFileName("\u03a0"))
// Obfuscation mode with directory name encryption off // Obfuscation mode with directory name encryption off
c, _ = newCipher(NameEncryptionObfuscated, "", "", false) c, _ = newCipher(NameEncryptionObfuscated, "", "", false, nil)
assert.Equal(t, "1/12/123/53.!!lipps", c.EncryptFileName("1/12/123/!hello")) assert.Equal(t, "1/12/123/53.!!lipps", c.EncryptFileName("1/12/123/!hello"))
assert.Equal(t, "1/12/123/53-v2001-02-03-040506-123.!!lipps", c.EncryptFileName("1/12/123/!hello-v2001-02-03-040506-123")) assert.Equal(t, "1/12/123/53-v2001-02-03-040506-123.!!lipps", c.EncryptFileName("1/12/123/!hello-v2001-02-03-040506-123"))
assert.Equal(t, "161.\u00e4", c.EncryptFileName("\u00a1")) assert.Equal(t, "161.\u00e4", c.EncryptFileName("\u00a1"))
assert.Equal(t, "160.\u03c2", c.EncryptFileName("\u03a0")) assert.Equal(t, "160.\u03c2", c.EncryptFileName("\u03a0"))
} }
func TestDecryptFileName(t *testing.T) { func testStandardDecryptFileName(t *testing.T, encoding string, testCases []EncodingTestCase, caseInsensitive bool) {
for _, test := range []struct { enc, _ := NewNameEncoding(encoding)
mode NameEncryptionMode for _, test := range testCases {
dirNameEncrypt bool // Test when dirNameEncrypt=true
in string c, _ := newCipher(NameEncryptionStandard, "", "", true, enc)
expected string
expectedErr error
}{
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s", "1", nil},
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng", "1/12", nil},
{NameEncryptionStandard, true, "p0e52nreeAJ0A5EA7S64M4J72S/L42G6771HNv3an9cgc8cr2n1ng", "1/12", nil},
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng/qgm4avr35m5loi1th53ato71v0", "1/12/123", nil},
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1/qgm4avr35m5loi1th53ato71v0", "", ErrorNotAMultipleOfBlocksize},
{NameEncryptionStandard, false, "1/12/qgm4avr35m5loi1th53ato71v0", "1/12/123", nil},
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s-v2001-02-03-040506-123", "1-v2001-02-03-040506-123", nil},
{NameEncryptionOff, true, "1/12/123.bin", "1/12/123", nil},
{NameEncryptionOff, true, "1/12/123.bix", "", ErrorNotAnEncryptedFile},
{NameEncryptionOff, true, ".bin", "", ErrorNotAnEncryptedFile},
{NameEncryptionOff, true, "1/12/123-v2001-02-03-040506-123.bin", "1/12/123-v2001-02-03-040506-123", nil},
{NameEncryptionOff, true, "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.bin", "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123", nil},
{NameEncryptionOff, true, "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.txt.bin", "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.txt", nil},
{NameEncryptionObfuscated, true, "!.hello", "hello", nil},
{NameEncryptionObfuscated, true, "hello", "", ErrorNotAnEncryptedFile},
{NameEncryptionObfuscated, true, "161.\u00e4", "\u00a1", nil},
{NameEncryptionObfuscated, true, "160.\u03c2", "\u03a0", nil},
{NameEncryptionObfuscated, false, "1/12/123/53.!!lipps", "1/12/123/!hello", nil},
{NameEncryptionObfuscated, false, "1/12/123/53-v2001-02-03-040506-123.!!lipps", "1/12/123/!hello-v2001-02-03-040506-123", nil},
} {
c, _ := newCipher(test.mode, "", "", test.dirNameEncrypt)
actual, actualErr := c.DecryptFileName(test.in) actual, actualErr := c.DecryptFileName(test.in)
what := fmt.Sprintf("Testing %q (mode=%v)", test.in, test.mode) assert.NoError(t, actualErr)
assert.Equal(t, test.expected, actual, what) assert.Equal(t, test.expected, actual)
assert.Equal(t, test.expectedErr, actualErr, what) if caseInsensitive {
c, _ := newCipher(NameEncryptionStandard, "", "", true, enc)
actual, actualErr := c.DecryptFileName(strings.ToUpper(test.in))
assert.NoError(t, actualErr)
assert.Equal(t, test.expected, actual)
}
// Add a character should raise ErrorNotAMultipleOfBlocksize
actual, actualErr = c.DecryptFileName(enc.EncodeToString([]byte("1")) + test.in)
assert.Equal(t, ErrorNotAMultipleOfBlocksize, actualErr)
assert.Equal(t, "", actual)
// Test when dirNameEncrypt=false
noDirEncryptIn := test.in
if strings.LastIndex(test.expected, "/") != -1 {
noDirEncryptIn = test.expected[:strings.LastIndex(test.expected, "/")] + test.in[strings.LastIndex(test.in, "/"):]
}
c, _ = newCipher(NameEncryptionStandard, "", "", false, enc)
actual, actualErr = c.DecryptFileName(noDirEncryptIn)
assert.NoError(t, actualErr)
assert.Equal(t, test.expected, actual)
}
}
func TestStandardDecryptFileNameBase32(t *testing.T) {
testStandardDecryptFileName(t, "base32", []EncodingTestCase{
{"p0e52nreeaj0a5ea7s64m4j72s", "1"},
{"p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng", "1/12"},
{"p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng/qgm4avr35m5loi1th53ato71v0", "1/12/123"},
}, true)
}
func TestStandardDecryptFileNameBase64(t *testing.T) {
testStandardDecryptFileName(t, "base64", []EncodingTestCase{
{"yBxRX25ypgUVyj8MSxJnFw", "1"},
{"yBxRX25ypgUVyj8MSxJnFw/qQUDHOGN_jVdLIMQzYrhvA", "1/12"},
{"yBxRX25ypgUVyj8MSxJnFw/qQUDHOGN_jVdLIMQzYrhvA/1CxFf2Mti1xIPYlGruDh-A", "1/12/123"},
}, false)
}
func TestStandardDecryptFileNameBase32768(t *testing.T) {
testStandardDecryptFileName(t, "base32768", []EncodingTestCase{
{"詮㪗鐮僀伎作㻖㢧⪟", "1"},
{"詮㪗鐮僀伎作㻖㢧⪟/竢朧䉱虃光塬䟛⣡蓟", "1/12"},
{"詮㪗鐮僀伎作㻖㢧⪟/竢朧䉱虃光塬䟛⣡蓟/遶㞟鋅缕袡鲅ⵝ蝁ꌟ", "1/12/123"},
}, false)
}
func TestNonStandardDecryptFileName(t *testing.T) {
for _, encoding := range []string{"base32", "base64", "base32768"} {
enc, _ := NewNameEncoding(encoding)
for _, test := range []struct {
mode NameEncryptionMode
dirNameEncrypt bool
in string
expected string
expectedErr error
}{
{NameEncryptionOff, true, "1/12/123.bin", "1/12/123", nil},
{NameEncryptionOff, true, "1/12/123.bix", "", ErrorNotAnEncryptedFile},
{NameEncryptionOff, true, ".bin", "", ErrorNotAnEncryptedFile},
{NameEncryptionOff, true, "1/12/123-v2001-02-03-040506-123.bin", "1/12/123-v2001-02-03-040506-123", nil},
{NameEncryptionOff, true, "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.bin", "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123", nil},
{NameEncryptionOff, true, "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.txt.bin", "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.txt", nil},
{NameEncryptionObfuscated, true, "!.hello", "hello", nil},
{NameEncryptionObfuscated, true, "hello", "", ErrorNotAnEncryptedFile},
{NameEncryptionObfuscated, true, "161.\u00e4", "\u00a1", nil},
{NameEncryptionObfuscated, true, "160.\u03c2", "\u03a0", nil},
{NameEncryptionObfuscated, false, "1/12/123/53.!!lipps", "1/12/123/!hello", nil},
{NameEncryptionObfuscated, false, "1/12/123/53-v2001-02-03-040506-123.!!lipps", "1/12/123/!hello-v2001-02-03-040506-123", nil},
} {
c, _ := newCipher(test.mode, "", "", test.dirNameEncrypt, enc)
actual, actualErr := c.DecryptFileName(test.in)
what := fmt.Sprintf("Testing %q (mode=%v)", test.in, test.mode)
assert.Equal(t, test.expected, actual, what)
assert.Equal(t, test.expectedErr, actualErr, what)
}
} }
} }
func TestEncDecMatches(t *testing.T) { func TestEncDecMatches(t *testing.T) {
for _, test := range []struct { for _, encoding := range []string{"base32", "base64", "base32768"} {
mode NameEncryptionMode enc, _ := NewNameEncoding(encoding)
in string for _, test := range []struct {
}{ mode NameEncryptionMode
{NameEncryptionStandard, "1/2/3/4"}, in string
{NameEncryptionOff, "1/2/3/4"}, }{
{NameEncryptionObfuscated, "1/2/3/4/!hello\u03a0"}, {NameEncryptionStandard, "1/2/3/4"},
{NameEncryptionObfuscated, "Avatar The Last Airbender"}, {NameEncryptionOff, "1/2/3/4"},
} { {NameEncryptionObfuscated, "1/2/3/4/!hello\u03a0"},
c, _ := newCipher(test.mode, "", "", true) {NameEncryptionObfuscated, "Avatar The Last Airbender"},
out, err := c.DecryptFileName(c.EncryptFileName(test.in)) } {
what := fmt.Sprintf("Testing %q (mode=%v)", test.in, test.mode) c, _ := newCipher(test.mode, "", "", true, enc)
assert.Equal(t, out, test.in, what) out, err := c.DecryptFileName(c.EncryptFileName(test.in))
assert.Equal(t, err, nil, what) what := fmt.Sprintf("Testing %q (mode=%v)", test.in, test.mode)
assert.Equal(t, out, test.in, what)
assert.Equal(t, err, nil, what)
}
} }
} }
func TestEncryptDirName(t *testing.T) { func testStandardEncryptDirName(t *testing.T, encoding string, testCases []EncodingTestCase) {
enc, _ := NewNameEncoding(encoding)
c, _ := newCipher(NameEncryptionStandard, "", "", true, enc)
// First standard mode // First standard mode
c, _ := newCipher(NameEncryptionStandard, "", "", true) for _, test := range testCases {
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s", c.EncryptDirName("1")) assert.Equal(t, test.expected, c.EncryptDirName(test.in))
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng", c.EncryptDirName("1/12")) }
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng/qgm4avr35m5loi1th53ato71v0", c.EncryptDirName("1/12/123"))
// Standard mode with dir name encryption off
c, _ = newCipher(NameEncryptionStandard, "", "", false)
assert.Equal(t, "1/12", c.EncryptDirName("1/12"))
assert.Equal(t, "1/12/123", c.EncryptDirName("1/12/123"))
// Now off mode
c, _ = newCipher(NameEncryptionOff, "", "", true)
assert.Equal(t, "1/12/123", c.EncryptDirName("1/12/123"))
} }
func TestDecryptDirName(t *testing.T) { func TestStandardEncryptDirNameBase32(t *testing.T) {
testStandardEncryptDirName(t, "base32", []EncodingTestCase{
{"1", "p0e52nreeaj0a5ea7s64m4j72s"},
{"1/12", "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng"},
{"1/12/123", "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng/qgm4avr35m5loi1th53ato71v0"},
})
}
func TestStandardEncryptDirNameBase64(t *testing.T) {
testStandardEncryptDirName(t, "base64", []EncodingTestCase{
{"1", "yBxRX25ypgUVyj8MSxJnFw"},
{"1/12", "yBxRX25ypgUVyj8MSxJnFw/qQUDHOGN_jVdLIMQzYrhvA"},
{"1/12/123", "yBxRX25ypgUVyj8MSxJnFw/qQUDHOGN_jVdLIMQzYrhvA/1CxFf2Mti1xIPYlGruDh-A"},
})
}
func TestStandardEncryptDirNameBase32768(t *testing.T) {
testStandardEncryptDirName(t, "base32768", []EncodingTestCase{
{"1", "詮㪗鐮僀伎作㻖㢧⪟"},
{"1/12", "詮㪗鐮僀伎作㻖㢧⪟/竢朧䉱虃光塬䟛⣡蓟"},
{"1/12/123", "詮㪗鐮僀伎作㻖㢧⪟/竢朧䉱虃光塬䟛⣡蓟/遶㞟鋅缕袡鲅ⵝ蝁ꌟ"},
})
}
func TestNonStandardEncryptDirName(t *testing.T) {
for _, encoding := range []string{"base32", "base64", "base32768"} {
enc, _ := NewNameEncoding(encoding)
c, _ := newCipher(NameEncryptionStandard, "", "", false, enc)
assert.Equal(t, "1/12", c.EncryptDirName("1/12"))
assert.Equal(t, "1/12/123", c.EncryptDirName("1/12/123"))
// Now off mode
c, _ = newCipher(NameEncryptionOff, "", "", true, enc)
assert.Equal(t, "1/12/123", c.EncryptDirName("1/12/123"))
}
}
func testStandardDecryptDirName(t *testing.T, encoding string, testCases []EncodingTestCase, caseInsensitive bool) {
enc, _ := NewNameEncoding(encoding)
for _, test := range testCases {
// Test dirNameEncrypt=true
c, _ := newCipher(NameEncryptionStandard, "", "", true, enc)
actual, actualErr := c.DecryptDirName(test.in)
assert.Equal(t, test.expected, actual)
assert.NoError(t, actualErr)
if caseInsensitive {
actual, actualErr := c.DecryptDirName(strings.ToUpper(test.in))
assert.Equal(t, actual, test.expected)
assert.NoError(t, actualErr)
}
actual, actualErr = c.DecryptDirName(enc.EncodeToString([]byte("1")) + test.in)
assert.Equal(t, "", actual)
assert.Equal(t, ErrorNotAMultipleOfBlocksize, actualErr)
// Test dirNameEncrypt=false
c, _ = newCipher(NameEncryptionStandard, "", "", false, enc)
actual, actualErr = c.DecryptDirName(test.in)
assert.Equal(t, test.in, actual)
assert.NoError(t, actualErr)
actual, actualErr = c.DecryptDirName(test.expected)
assert.Equal(t, test.expected, actual)
assert.NoError(t, actualErr)
// Test dirNameEncrypt=false
}
}
/*
enc, _ := NewNameEncoding(encoding)
for _, test := range []struct {
mode NameEncryptionMode
dirNameEncrypt bool
in string
expected string
expectedErr error
}{
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s", "1", nil},
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng", "1/12", nil},
{NameEncryptionStandard, true, "p0e52nreeAJ0A5EA7S64M4J72S/L42G6771HNv3an9cgc8cr2n1ng", "1/12", nil},
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng/qgm4avr35m5loi1th53ato71v0", "1/12/123", nil},
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1/qgm4avr35m5loi1th53ato71v0", "", ErrorNotAMultipleOfBlocksize},
{NameEncryptionStandard, false, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng", "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng", nil},
{NameEncryptionStandard, false, "1/12/123", "1/12/123", nil},
} {
c, _ := newCipher(test.mode, "", "", test.dirNameEncrypt, enc)
actual, actualErr := c.DecryptDirName(test.in)
what := fmt.Sprintf("Testing %q (mode=%v)", test.in, test.mode)
assert.Equal(t, test.expected, actual, what)
assert.Equal(t, test.expectedErr, actualErr, what)
}
*/
func TestStandardDecryptDirNameBase32(t *testing.T) {
testStandardDecryptDirName(t, "base32", []EncodingTestCase{
{"p0e52nreeaj0a5ea7s64m4j72s", "1"},
{"p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng", "1/12"},
{"p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng/qgm4avr35m5loi1th53ato71v0", "1/12/123"},
}, true)
}
func TestStandardDecryptDirNameBase64(t *testing.T) {
testStandardDecryptDirName(t, "base64", []EncodingTestCase{
{"yBxRX25ypgUVyj8MSxJnFw", "1"},
{"yBxRX25ypgUVyj8MSxJnFw/qQUDHOGN_jVdLIMQzYrhvA", "1/12"},
{"yBxRX25ypgUVyj8MSxJnFw/qQUDHOGN_jVdLIMQzYrhvA/1CxFf2Mti1xIPYlGruDh-A", "1/12/123"},
}, false)
}
func TestStandardDecryptDirNameBase32768(t *testing.T) {
testStandardDecryptDirName(t, "base32768", []EncodingTestCase{
{"詮㪗鐮僀伎作㻖㢧⪟", "1"},
{"詮㪗鐮僀伎作㻖㢧⪟/竢朧䉱虃光塬䟛⣡蓟", "1/12"},
{"詮㪗鐮僀伎作㻖㢧⪟/竢朧䉱虃光塬䟛⣡蓟/遶㞟鋅缕袡鲅ⵝ蝁ꌟ", "1/12/123"},
}, false)
}
func TestNonStandardDecryptDirName(t *testing.T) {
for _, test := range []struct { for _, test := range []struct {
mode NameEncryptionMode mode NameEncryptionMode
dirNameEncrypt bool dirNameEncrypt bool
@@ -264,18 +658,11 @@ func TestDecryptDirName(t *testing.T) {
expected string expected string
expectedErr error expectedErr error
}{ }{
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s", "1", nil},
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng", "1/12", nil},
{NameEncryptionStandard, true, "p0e52nreeAJ0A5EA7S64M4J72S/L42G6771HNv3an9cgc8cr2n1ng", "1/12", nil},
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng/qgm4avr35m5loi1th53ato71v0", "1/12/123", nil},
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1/qgm4avr35m5loi1th53ato71v0", "", ErrorNotAMultipleOfBlocksize},
{NameEncryptionStandard, false, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng", "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng", nil},
{NameEncryptionStandard, false, "1/12/123", "1/12/123", nil},
{NameEncryptionOff, true, "1/12/123.bin", "1/12/123.bin", nil}, {NameEncryptionOff, true, "1/12/123.bin", "1/12/123.bin", nil},
{NameEncryptionOff, true, "1/12/123", "1/12/123", nil}, {NameEncryptionOff, true, "1/12/123", "1/12/123", nil},
{NameEncryptionOff, true, ".bin", ".bin", nil}, {NameEncryptionOff, true, ".bin", ".bin", nil},
} { } {
c, _ := newCipher(test.mode, "", "", test.dirNameEncrypt) c, _ := newCipher(test.mode, "", "", test.dirNameEncrypt, nil)
actual, actualErr := c.DecryptDirName(test.in) actual, actualErr := c.DecryptDirName(test.in)
what := fmt.Sprintf("Testing %q (mode=%v)", test.in, test.mode) what := fmt.Sprintf("Testing %q (mode=%v)", test.in, test.mode)
assert.Equal(t, test.expected, actual, what) assert.Equal(t, test.expected, actual, what)
@@ -284,7 +671,7 @@ func TestDecryptDirName(t *testing.T) {
} }
func TestEncryptedSize(t *testing.T) { func TestEncryptedSize(t *testing.T) {
c, _ := newCipher(NameEncryptionStandard, "", "", true) c, _ := newCipher(NameEncryptionStandard, "", "", true, nil)
for _, test := range []struct { for _, test := range []struct {
in int64 in int64
expected int64 expected int64
@@ -308,7 +695,7 @@ func TestEncryptedSize(t *testing.T) {
func TestDecryptedSize(t *testing.T) { func TestDecryptedSize(t *testing.T) {
// Test the errors since we tested the reverse above // Test the errors since we tested the reverse above
c, _ := newCipher(NameEncryptionStandard, "", "", true) c, _ := newCipher(NameEncryptionStandard, "", "", true, nil)
for _, test := range []struct { for _, test := range []struct {
in int64 in int64
expectedErr error expectedErr error
@@ -637,7 +1024,7 @@ func (r *randomSource) Read(p []byte) (n int, err error) {
func (r *randomSource) Write(p []byte) (n int, err error) { func (r *randomSource) Write(p []byte) (n int, err error) {
for i := range p { for i := range p {
if p[i] != r.next() { if p[i] != r.next() {
return 0, errors.Errorf("Error in stream at %d", r.counter) return 0, fmt.Errorf("Error in stream at %d", r.counter)
} }
} }
return len(p), nil return len(p), nil
@@ -679,7 +1066,7 @@ func (z *zeroes) Read(p []byte) (n int, err error) {
// Test encrypt decrypt with different buffer sizes // Test encrypt decrypt with different buffer sizes
func testEncryptDecrypt(t *testing.T, bufSize int, copySize int64) { func testEncryptDecrypt(t *testing.T, bufSize int, copySize int64) {
c, err := newCipher(NameEncryptionStandard, "", "", true) c, err := newCipher(NameEncryptionStandard, "", "", true, nil)
assert.NoError(t, err) assert.NoError(t, err)
c.cryptoRand = &zeroes{} // zero out the nonce c.cryptoRand = &zeroes{} // zero out the nonce
buf := make([]byte, bufSize) buf := make([]byte, bufSize)
@@ -749,7 +1136,7 @@ func TestEncryptData(t *testing.T) {
{[]byte{1}, file1}, {[]byte{1}, file1},
{[]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}, file16}, {[]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}, file16},
} { } {
c, err := newCipher(NameEncryptionStandard, "", "", true) c, err := newCipher(NameEncryptionStandard, "", "", true, nil)
assert.NoError(t, err) assert.NoError(t, err)
c.cryptoRand = newRandomSource(1e8) // nodge the crypto rand generator c.cryptoRand = newRandomSource(1e8) // nodge the crypto rand generator
@@ -772,7 +1159,7 @@ func TestEncryptData(t *testing.T) {
} }
func TestNewEncrypter(t *testing.T) { func TestNewEncrypter(t *testing.T) {
c, err := newCipher(NameEncryptionStandard, "", "", true) c, err := newCipher(NameEncryptionStandard, "", "", true, nil)
assert.NoError(t, err) assert.NoError(t, err)
c.cryptoRand = newRandomSource(1e8) // nodge the crypto rand generator c.cryptoRand = newRandomSource(1e8) // nodge the crypto rand generator
@@ -788,13 +1175,12 @@ func TestNewEncrypter(t *testing.T) {
fh, err = c.newEncrypter(z, nil) fh, err = c.newEncrypter(z, nil)
assert.Nil(t, fh) assert.Nil(t, fh)
assert.Error(t, err, "short read of nonce") assert.Error(t, err, "short read of nonce")
} }
// Test the stream returning 0, io.ErrUnexpectedEOF - this used to // Test the stream returning 0, io.ErrUnexpectedEOF - this used to
// cause a fatal loop // cause a fatal loop
func TestNewEncrypterErrUnexpectedEOF(t *testing.T) { func TestNewEncrypterErrUnexpectedEOF(t *testing.T) {
c, err := newCipher(NameEncryptionStandard, "", "", true) c, err := newCipher(NameEncryptionStandard, "", "", true, nil)
assert.NoError(t, err) assert.NoError(t, err)
in := &readers.ErrorReader{Err: io.ErrUnexpectedEOF} in := &readers.ErrorReader{Err: io.ErrUnexpectedEOF}
@@ -823,7 +1209,7 @@ func (c *closeDetector) Close() error {
} }
func TestNewDecrypter(t *testing.T) { func TestNewDecrypter(t *testing.T) {
c, err := newCipher(NameEncryptionStandard, "", "", true) c, err := newCipher(NameEncryptionStandard, "", "", true, nil)
assert.NoError(t, err) assert.NoError(t, err)
c.cryptoRand = newRandomSource(1e8) // nodge the crypto rand generator c.cryptoRand = newRandomSource(1e8) // nodge the crypto rand generator
@@ -866,7 +1252,7 @@ func TestNewDecrypter(t *testing.T) {
// Test the stream returning 0, io.ErrUnexpectedEOF // Test the stream returning 0, io.ErrUnexpectedEOF
func TestNewDecrypterErrUnexpectedEOF(t *testing.T) { func TestNewDecrypterErrUnexpectedEOF(t *testing.T) {
c, err := newCipher(NameEncryptionStandard, "", "", true) c, err := newCipher(NameEncryptionStandard, "", "", true, nil)
assert.NoError(t, err) assert.NoError(t, err)
in2 := &readers.ErrorReader{Err: io.ErrUnexpectedEOF} in2 := &readers.ErrorReader{Err: io.ErrUnexpectedEOF}
@@ -882,7 +1268,7 @@ func TestNewDecrypterErrUnexpectedEOF(t *testing.T) {
} }
func TestNewDecrypterSeekLimit(t *testing.T) { func TestNewDecrypterSeekLimit(t *testing.T) {
c, err := newCipher(NameEncryptionStandard, "", "", true) c, err := newCipher(NameEncryptionStandard, "", "", true, nil)
assert.NoError(t, err) assert.NoError(t, err)
c.cryptoRand = &zeroes{} // nodge the crypto rand generator c.cryptoRand = &zeroes{} // nodge the crypto rand generator
@@ -1088,7 +1474,7 @@ func TestDecrypterCalculateUnderlying(t *testing.T) {
} }
func TestDecrypterRead(t *testing.T) { func TestDecrypterRead(t *testing.T) {
c, err := newCipher(NameEncryptionStandard, "", "", true) c, err := newCipher(NameEncryptionStandard, "", "", true, nil)
assert.NoError(t, err) assert.NoError(t, err)
// Test truncating the file at each possible point // Test truncating the file at each possible point
@@ -1152,7 +1538,7 @@ func TestDecrypterRead(t *testing.T) {
} }
func TestDecrypterClose(t *testing.T) { func TestDecrypterClose(t *testing.T) {
c, err := newCipher(NameEncryptionStandard, "", "", true) c, err := newCipher(NameEncryptionStandard, "", "", true, nil)
assert.NoError(t, err) assert.NoError(t, err)
cd := newCloseDetector(bytes.NewBuffer(file16)) cd := newCloseDetector(bytes.NewBuffer(file16))
@@ -1190,7 +1576,7 @@ func TestDecrypterClose(t *testing.T) {
} }
func TestPutGetBlock(t *testing.T) { func TestPutGetBlock(t *testing.T) {
c, err := newCipher(NameEncryptionStandard, "", "", true) c, err := newCipher(NameEncryptionStandard, "", "", true, nil)
assert.NoError(t, err) assert.NoError(t, err)
block := c.getBlock() block := c.getBlock()
@@ -1201,7 +1587,7 @@ func TestPutGetBlock(t *testing.T) {
} }
func TestKey(t *testing.T) { func TestKey(t *testing.T) {
c, err := newCipher(NameEncryptionStandard, "", "", true) c, err := newCipher(NameEncryptionStandard, "", "", true, nil)
assert.NoError(t, err) assert.NoError(t, err)
// Check zero keys OK // Check zero keys OK

View File

@@ -3,13 +3,13 @@ package crypt
import ( import (
"context" "context"
"errors"
"fmt" "fmt"
"io" "io"
"path" "path"
"strings" "strings"
"time" "time"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting" "github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/cache" "github.com/rclone/rclone/fs/cache"
@@ -30,7 +30,7 @@ func init() {
CommandHelp: commandHelp, CommandHelp: commandHelp,
Options: []fs.Option{{ Options: []fs.Option{{
Name: "remote", Name: "remote",
Help: "Remote to encrypt/decrypt.\nNormally should contain a ':' and a path, e.g. \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).", Help: "Remote to encrypt/decrypt.\n\nNormally should contain a ':' and a path, e.g. \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
Required: true, Required: true,
}, { }, {
Name: "filename_encryption", Name: "filename_encryption",
@@ -39,13 +39,13 @@ func init() {
Examples: []fs.OptionExample{ Examples: []fs.OptionExample{
{ {
Value: "standard", Value: "standard",
Help: "Encrypt the filenames see the docs for the details.", Help: "Encrypt the filenames.\nSee the docs for the details.",
}, { }, {
Value: "obfuscate", Value: "obfuscate",
Help: "Very simple filename obfuscation.", Help: "Very simple filename obfuscation.",
}, { }, {
Value: "off", Value: "off",
Help: "Don't encrypt the file names. Adds a \".bin\" extension only.", Help: "Don't encrypt the file names.\nAdds a \".bin\" extension only.",
}, },
}, },
}, { }, {
@@ -71,7 +71,7 @@ NB If filename_encryption is "off" then this option will do nothing.`,
Required: true, Required: true,
}, { }, {
Name: "password2", Name: "password2",
Help: "Password or pass phrase for salt. Optional but recommended.\nShould be different to the previous password.", Help: "Password or pass phrase for salt.\n\nOptional but recommended.\nShould be different to the previous password.",
IsPassword: true, IsPassword: true,
}, { }, {
Name: "server_side_across_configs", Name: "server_side_across_configs",
@@ -116,6 +116,29 @@ names, or for debugging purposes.`,
Help: "Encrypt file data.", Help: "Encrypt file data.",
}, },
}, },
}, {
Name: "filename_encoding",
Help: `How to encode the encrypted filename to text string.
This option could help with shortening the encrypted filename. The
suitable option would depend on the way your remote count the filename
length and if it's case sensitve.`,
Default: "base32",
Examples: []fs.OptionExample{
{
Value: "base32",
Help: "Encode using base32. Suitable for all remote.",
},
{
Value: "base64",
Help: "Encode using base64. Suitable for case sensitive remote.",
},
{
Value: "base32768",
Help: "Encode using base32768. Suitable if your remote counts UTF-16 or\nUnicode codepoint instead of UTF-8 byte length. (Eg. Onedrive)",
},
},
Advanced: true,
}}, }},
}) })
} }
@@ -131,18 +154,22 @@ func newCipherForConfig(opt *Options) (*Cipher, error) {
} }
password, err := obscure.Reveal(opt.Password) password, err := obscure.Reveal(opt.Password)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "failed to decrypt password") return nil, fmt.Errorf("failed to decrypt password: %w", err)
} }
var salt string var salt string
if opt.Password2 != "" { if opt.Password2 != "" {
salt, err = obscure.Reveal(opt.Password2) salt, err = obscure.Reveal(opt.Password2)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "failed to decrypt password2") return nil, fmt.Errorf("failed to decrypt password2: %w", err)
} }
} }
cipher, err := newCipher(mode, password, salt, opt.DirectoryNameEncryption) enc, err := NewNameEncoding(opt.FilenameEncoding)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "failed to make cipher") return nil, err
}
cipher, err := newCipher(mode, password, salt, opt.DirectoryNameEncryption, enc)
if err != nil {
return nil, fmt.Errorf("failed to make cipher: %w", err)
} }
return cipher, nil return cipher, nil
} }
@@ -192,7 +219,7 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
} }
} }
if err != fs.ErrorIsFile && err != nil { if err != fs.ErrorIsFile && err != nil {
return nil, errors.Wrapf(err, "failed to make remote %q to wrap", remote) return nil, fmt.Errorf("failed to make remote %q to wrap: %w", remote, err)
} }
f := &Fs{ f := &Fs{
Fs: wrappedFs, Fs: wrappedFs,
@@ -229,6 +256,7 @@ type Options struct {
Password2 string `config:"password2"` Password2 string `config:"password2"`
ServerSideAcrossConfigs bool `config:"server_side_across_configs"` ServerSideAcrossConfigs bool `config:"server_side_across_configs"`
ShowMapping bool `config:"show_mapping"` ShowMapping bool `config:"show_mapping"`
FilenameEncoding string `config:"filename_encoding"`
} }
// Fs represents a wrapped fs.Fs // Fs represents a wrapped fs.Fs
@@ -300,7 +328,7 @@ func (f *Fs) encryptEntries(ctx context.Context, entries fs.DirEntries) (newEntr
case fs.Directory: case fs.Directory:
f.addDir(ctx, &newEntries, x) f.addDir(ctx, &newEntries, x)
default: default:
return nil, errors.Errorf("Unknown object type %T", entry) return nil, fmt.Errorf("Unknown object type %T", entry)
} }
} }
return newEntries, nil return newEntries, nil
@@ -363,7 +391,11 @@ type putFn func(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ..
// put implements Put or PutStream // put implements Put or PutStream
func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn) (fs.Object, error) { func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn) (fs.Object, error) {
if f.opt.NoDataEncryption { if f.opt.NoDataEncryption {
return put(ctx, in, f.newObjectInfo(src, nonce{}), options...) o, err := put(ctx, in, f.newObjectInfo(src, nonce{}), options...)
if err == nil && o != nil {
o = f.newObject(o)
}
return o, err
} }
// Encrypt the data into wrappedIn // Encrypt the data into wrappedIn
@@ -402,7 +434,7 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
var dstHash string var dstHash string
dstHash, err = o.Hash(ctx, ht) dstHash, err = o.Hash(ctx, ht)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "failed to read destination hash") return nil, fmt.Errorf("failed to read destination hash: %w", err)
} }
if srcHash != "" && dstHash != "" { if srcHash != "" && dstHash != "" {
if srcHash != dstHash { if srcHash != dstHash {
@@ -411,7 +443,7 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
if err != nil { if err != nil {
fs.Errorf(o, "Failed to remove corrupted object: %v", err) fs.Errorf(o, "Failed to remove corrupted object: %v", err)
} }
return nil, errors.Errorf("corrupted on transfer: %v crypted hash differ %q vs %q", ht, srcHash, dstHash) return nil, fmt.Errorf("corrupted on transfer: %v crypted hash differ %q vs %q", ht, srcHash, dstHash)
} }
fs.Debugf(src, "%v = %s OK", ht, srcHash) fs.Debugf(src, "%v = %s OK", ht, srcHash)
} }
@@ -612,24 +644,24 @@ func (f *Fs) computeHashWithNonce(ctx context.Context, nonce nonce, src fs.Objec
// Open the src for input // Open the src for input
in, err := src.Open(ctx) in, err := src.Open(ctx)
if err != nil { if err != nil {
return "", errors.Wrap(err, "failed to open src") return "", fmt.Errorf("failed to open src: %w", err)
} }
defer fs.CheckClose(in, &err) defer fs.CheckClose(in, &err)
// Now encrypt the src with the nonce // Now encrypt the src with the nonce
out, err := f.cipher.newEncrypter(in, &nonce) out, err := f.cipher.newEncrypter(in, &nonce)
if err != nil { if err != nil {
return "", errors.Wrap(err, "failed to make encrypter") return "", fmt.Errorf("failed to make encrypter: %w", err)
} }
// pipe into hash // pipe into hash
m, err := hash.NewMultiHasherTypes(hash.NewHashSet(hashType)) m, err := hash.NewMultiHasherTypes(hash.NewHashSet(hashType))
if err != nil { if err != nil {
return "", errors.Wrap(err, "failed to make hasher") return "", fmt.Errorf("failed to make hasher: %w", err)
} }
_, err = io.Copy(m, out) _, err = io.Copy(m, out)
if err != nil { if err != nil {
return "", errors.Wrap(err, "failed to hash data") return "", fmt.Errorf("failed to hash data: %w", err)
} }
return m.Sums()[hashType], nil return m.Sums()[hashType], nil
@@ -648,12 +680,12 @@ func (f *Fs) ComputeHash(ctx context.Context, o *Object, src fs.Object, hashType
// use a limited read so we only read the header // use a limited read so we only read the header
in, err := o.Object.Open(ctx, &fs.RangeOption{Start: 0, End: int64(fileHeaderSize) - 1}) in, err := o.Object.Open(ctx, &fs.RangeOption{Start: 0, End: int64(fileHeaderSize) - 1})
if err != nil { if err != nil {
return "", errors.Wrap(err, "failed to open object to read nonce") return "", fmt.Errorf("failed to open object to read nonce: %w", err)
} }
d, err := f.cipher.newDecrypter(in) d, err := f.cipher.newDecrypter(in)
if err != nil { if err != nil {
_ = in.Close() _ = in.Close()
return "", errors.Wrap(err, "failed to open object to read nonce") return "", fmt.Errorf("failed to open object to read nonce: %w", err)
} }
nonce := d.nonce nonce := d.nonce
// fs.Debugf(o, "Read nonce % 2x", nonce) // fs.Debugf(o, "Read nonce % 2x", nonce)
@@ -672,7 +704,7 @@ func (f *Fs) ComputeHash(ctx context.Context, o *Object, src fs.Object, hashType
// Close d (and hence in) once we have read the nonce // Close d (and hence in) once we have read the nonce
err = d.Close() err = d.Close()
if err != nil { if err != nil {
return "", errors.Wrap(err, "failed to close nonce read") return "", fmt.Errorf("failed to close nonce read: %w", err)
} }
return f.computeHashWithNonce(ctx, nonce, src, hashType) return f.computeHashWithNonce(ctx, nonce, src, hashType)
@@ -791,7 +823,7 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
for _, encryptedFileName := range arg { for _, encryptedFileName := range arg {
fileName, err := f.DecryptFileName(encryptedFileName) fileName, err := f.DecryptFileName(encryptedFileName)
if err != nil { if err != nil {
return out, errors.Wrap(err, fmt.Sprintf("Failed to decrypt : %s", encryptedFileName)) return out, fmt.Errorf("failed to decrypt: %s: %w", encryptedFileName, err)
} }
out = append(out, fileName) out = append(out, fileName)
} }
@@ -995,6 +1027,9 @@ func (o *ObjectInfo) Size() int64 {
if size < 0 { if size < 0 {
return size return size
} }
if o.f.opt.NoDataEncryption {
return size
}
return o.f.cipher.EncryptedSize(size) return o.f.cipher.EncryptedSize(size)
} }
@@ -1014,6 +1049,10 @@ func (o *ObjectInfo) Hash(ctx context.Context, hash hash.Type) (string, error) {
} }
// if this is wrapping a local object then we work out the hash // if this is wrapping a local object then we work out the hash
if srcObj.Fs().Features().IsLocal { if srcObj.Fs().Features().IsLocal {
if o.f.opt.NoDataEncryption {
// If no encryption, just return the hash of the underlying object
return srcObj.Hash(ctx, hash)
}
// Read the data and encrypt it to calculate the hash // Read the data and encrypt it to calculate the hash
fs.Debugf(o, "Computing %v hash of encrypted source", hash) fs.Debugf(o, "Computing %v hash of encrypted source", hash)
return o.f.computeHashWithNonce(ctx, o.nonce, srcObj, hash) return o.f.computeHashWithNonce(ctx, o.nonce, srcObj, hash)

View File

@@ -77,7 +77,11 @@ func testObjectInfo(t *testing.T, f *Fs, wrap bool) {
enc, err := f.cipher.newEncrypter(inBuf, nil) enc, err := f.cipher.newEncrypter(inBuf, nil)
require.NoError(t, err) require.NoError(t, err)
nonce := enc.nonce // read the nonce at the start nonce := enc.nonce // read the nonce at the start
_, err = io.Copy(&outBuf, enc) if f.opt.NoDataEncryption {
_, err = outBuf.WriteString(contents)
} else {
_, err = io.Copy(&outBuf, enc)
}
require.NoError(t, err) require.NoError(t, err)
var oi fs.ObjectInfo = obj var oi fs.ObjectInfo = obj
@@ -96,7 +100,12 @@ func testObjectInfo(t *testing.T, f *Fs, wrap bool) {
assert.NotEqual(t, path, src.Remote()) assert.NotEqual(t, path, src.Remote())
// Test ObjectInfo.Hash // Test ObjectInfo.Hash
wantHash := md5.Sum(outBuf.Bytes()) var wantHash [md5.Size]byte
if f.opt.NoDataEncryption {
wantHash = md5.Sum([]byte(contents))
} else {
wantHash = md5.Sum(outBuf.Bytes())
}
gotHash, err := src.Hash(ctx, hash.MD5) gotHash, err := src.Hash(ctx, hash.MD5)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, fmt.Sprintf("%x", wantHash), gotHash) assert.Equal(t, fmt.Sprintf("%x", wantHash), gotHash)

View File

@@ -29,7 +29,7 @@ func TestIntegration(t *testing.T) {
} }
// TestStandard runs integration tests against the remote // TestStandard runs integration tests against the remote
func TestStandard(t *testing.T) { func TestStandardBase32(t *testing.T) {
if *fstest.RemoteName != "" { if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set") t.Skip("Skipping as -remote set")
} }
@@ -46,6 +46,51 @@ func TestStandard(t *testing.T) {
}, },
UnimplementableFsMethods: []string{"OpenWriterAt"}, UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"}, UnimplementableObjectMethods: []string{"MimeType"},
QuickTestOK: true,
})
}
func TestStandardBase64(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-standard")
name := "TestCrypt"
fstests.Run(t, &fstests.Opt{
RemoteName: name + ":",
NilObject: (*crypt.Object)(nil),
ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "type", Value: "crypt"},
{Name: name, Key: "remote", Value: tempdir},
{Name: name, Key: "password", Value: obscure.MustObscure("potato")},
{Name: name, Key: "filename_encryption", Value: "standard"},
{Name: name, Key: "filename_encoding", Value: "base64"},
},
UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"},
QuickTestOK: true,
})
}
func TestStandardBase32768(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-standard")
name := "TestCrypt"
fstests.Run(t, &fstests.Opt{
RemoteName: name + ":",
NilObject: (*crypt.Object)(nil),
ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "type", Value: "crypt"},
{Name: name, Key: "remote", Value: tempdir},
{Name: name, Key: "password", Value: obscure.MustObscure("potato")},
{Name: name, Key: "filename_encryption", Value: "standard"},
{Name: name, Key: "filename_encoding", Value: "base32768"},
},
UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"},
QuickTestOK: true,
}) })
} }
@@ -67,6 +112,7 @@ func TestOff(t *testing.T) {
}, },
UnimplementableFsMethods: []string{"OpenWriterAt"}, UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"}, UnimplementableObjectMethods: []string{"MimeType"},
QuickTestOK: true,
}) })
} }
@@ -89,6 +135,7 @@ func TestObfuscate(t *testing.T) {
SkipBadWindowsCharacters: true, SkipBadWindowsCharacters: true,
UnimplementableFsMethods: []string{"OpenWriterAt"}, UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"}, UnimplementableObjectMethods: []string{"MimeType"},
QuickTestOK: true,
}) })
} }
@@ -112,5 +159,6 @@ func TestNoDataObfuscate(t *testing.T) {
SkipBadWindowsCharacters: true, SkipBadWindowsCharacters: true,
UnimplementableFsMethods: []string{"OpenWriterAt"}, UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"}, UnimplementableObjectMethods: []string{"MimeType"},
QuickTestOK: true,
}) })
} }

View File

@@ -4,7 +4,7 @@
// buffers which are a multiple of an underlying crypto block size. // buffers which are a multiple of an underlying crypto block size.
package pkcs7 package pkcs7
import "github.com/pkg/errors" import "errors"
// Errors Unpad can return // Errors Unpad can return
var ( var (

245
backend/drive/drive.go Executable file → Normal file
View File

@@ -11,6 +11,7 @@ import (
"bytes" "bytes"
"context" "context"
"crypto/tls" "crypto/tls"
"errors"
"fmt" "fmt"
"io" "io"
"io/ioutil" "io/ioutil"
@@ -25,13 +26,13 @@ import (
"text/template" "text/template"
"time" "time"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/cache" "github.com/rclone/rclone/fs/cache"
"github.com/rclone/rclone/fs/config" "github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct" "github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure" "github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/filter"
"github.com/rclone/rclone/fs/fserrors" "github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp" "github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/fspath" "github.com/rclone/rclone/fs/fspath"
@@ -67,7 +68,7 @@ const (
defaultScope = "drive" defaultScope = "drive"
// chunkSize is the size of the chunks created during a resumable upload and should be a power of two. // chunkSize is the size of the chunks created during a resumable upload and should be a power of two.
// 1<<18 is the minimum size supported by the Google uploader, and there is no maximum. // 1<<18 is the minimum size supported by the Google uploader, and there is no maximum.
minChunkSize = 256 * fs.Kibi minChunkSize = fs.SizeSuffix(googleapi.MinUploadChunkSize)
defaultChunkSize = 8 * fs.Mebi defaultChunkSize = 8 * fs.Mebi
partialFields = "id,name,size,md5Checksum,trashed,explicitlyTrashed,modifiedTime,createdTime,mimeType,parents,webViewLink,shortcutDetails,exportLinks" partialFields = "id,name,size,md5Checksum,trashed,explicitlyTrashed,modifiedTime,createdTime,mimeType,parents,webViewLink,shortcutDetails,exportLinks"
listRGrouping = 50 // number of IDs to search at once when using ListR listRGrouping = 50 // number of IDs to search at once when using ListR
@@ -187,7 +188,7 @@ func init() {
opt := new(Options) opt := new(Options)
err := configstruct.Set(m, opt) err := configstruct.Set(m, opt)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "couldn't parse config into struct") return nil, fmt.Errorf("couldn't parse config into struct: %w", err)
} }
switch config.State { switch config.State {
@@ -210,15 +211,22 @@ func init() {
if opt.TeamDriveID == "" { if opt.TeamDriveID == "" {
return fs.ConfigConfirm("teamdrive_ok", false, "config_change_team_drive", "Configure this as a Shared Drive (Team Drive)?\n") return fs.ConfigConfirm("teamdrive_ok", false, "config_change_team_drive", "Configure this as a Shared Drive (Team Drive)?\n")
} }
return fs.ConfigConfirm("teamdrive_ok", false, "config_change_team_drive", fmt.Sprintf("Change current Shared Drive (Team Drive) ID %q?\n", opt.TeamDriveID)) return fs.ConfigConfirm("teamdrive_change", false, "config_change_team_drive", fmt.Sprintf("Change current Shared Drive (Team Drive) ID %q?\n", opt.TeamDriveID))
case "teamdrive_ok": case "teamdrive_ok":
if config.Result == "false" { if config.Result == "false" {
m.Set("team_drive", "") m.Set("team_drive", "")
return nil, nil return nil, nil
} }
return fs.ConfigGoto("teamdrive_config")
case "teamdrive_change":
if config.Result == "false" {
return nil, nil
}
return fs.ConfigGoto("teamdrive_config")
case "teamdrive_config":
f, err := newFs(ctx, name, "", m) f, err := newFs(ctx, name, "", m)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "failed to make Fs to list Shared Drives") return nil, fmt.Errorf("failed to make Fs to list Shared Drives: %w", err)
} }
teamDrives, err := f.listTeamDrives(ctx) teamDrives, err := f.listTeamDrives(ctx)
if err != nil { if err != nil {
@@ -262,7 +270,7 @@ func init() {
}}, }},
}, { }, {
Name: "root_folder_id", Name: "root_folder_id",
Help: `ID of the root folder Help: `ID of the root folder.
Leave blank normally. Leave blank normally.
Fill in to access "Computers" folders (see docs), or for rclone to use Fill in to access "Computers" folders (see docs), or for rclone to use
@@ -270,15 +278,15 @@ a non root folder as its starting point.
`, `,
}, { }, {
Name: "service_account_file", Name: "service_account_file",
Help: "Service Account Credentials JSON file path \nLeave blank normally.\nNeeded only if you want use SA instead of interactive login." + env.ShellExpandHelp, Help: "Service Account Credentials JSON file path.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login." + env.ShellExpandHelp,
}, { }, {
Name: "service_account_credentials", Name: "service_account_credentials",
Help: "Service Account Credentials JSON blob\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.", Help: "Service Account Credentials JSON blob.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
Hide: fs.OptionHideConfigurator, Hide: fs.OptionHideConfigurator,
Advanced: true, Advanced: true,
}, { }, {
Name: "team_drive", Name: "team_drive",
Help: "ID of the Shared Drive (Team Drive)", Help: "ID of the Shared Drive (Team Drive).",
Hide: fs.OptionHideConfigurator, Hide: fs.OptionHideConfigurator,
Advanced: true, Advanced: true,
}, { }, {
@@ -289,12 +297,12 @@ a non root folder as its starting point.
}, { }, {
Name: "use_trash", Name: "use_trash",
Default: true, Default: true,
Help: "Send files to the trash instead of deleting permanently.\nDefaults to true, namely sending files to the trash.\nUse `--drive-use-trash=false` to delete files permanently instead.", Help: "Send files to the trash instead of deleting permanently.\n\nDefaults to true, namely sending files to the trash.\nUse `--drive-use-trash=false` to delete files permanently instead.",
Advanced: true, Advanced: true,
}, { }, {
Name: "skip_gdocs", Name: "skip_gdocs",
Default: false, Default: false,
Help: "Skip google documents in all listings.\nIf given, gdocs practically become invisible to rclone.", Help: "Skip google documents in all listings.\n\nIf given, gdocs practically become invisible to rclone.",
Advanced: true, Advanced: true,
}, { }, {
Name: "skip_checksum_gphotos", Name: "skip_checksum_gphotos",
@@ -327,7 +335,7 @@ commands (copy, sync, etc.), and with all other commands too.`,
}, { }, {
Name: "trashed_only", Name: "trashed_only",
Default: false, Default: false,
Help: "Only show files that are in the trash.\nThis will show trashed files in their original directory structure.", Help: "Only show files that are in the trash.\n\nThis will show trashed files in their original directory structure.",
Advanced: true, Advanced: true,
}, { }, {
Name: "starred_only", Name: "starred_only",
@@ -337,7 +345,7 @@ commands (copy, sync, etc.), and with all other commands too.`,
}, { }, {
Name: "formats", Name: "formats",
Default: "", Default: "",
Help: "Deprecated: see export_formats", Help: "Deprecated: See export_formats.",
Advanced: true, Advanced: true,
Hide: fs.OptionHideConfigurator, Hide: fs.OptionHideConfigurator,
}, { }, {
@@ -353,12 +361,12 @@ commands (copy, sync, etc.), and with all other commands too.`,
}, { }, {
Name: "allow_import_name_change", Name: "allow_import_name_change",
Default: false, Default: false,
Help: "Allow the filetype to change when uploading Google docs (e.g. file.doc to file.docx). This will confuse sync and reupload every time.", Help: "Allow the filetype to change when uploading Google docs.\n\nE.g. file.doc to file.docx. This will confuse sync and reupload every time.",
Advanced: true, Advanced: true,
}, { }, {
Name: "use_created_date", Name: "use_created_date",
Default: false, Default: false,
Help: `Use file created date instead of modified date., Help: `Use file created date instead of modified date.
Useful when downloading data and you want the creation date used in Useful when downloading data and you want the creation date used in
place of the last modified date. place of the last modified date.
@@ -392,7 +400,7 @@ date is used.`,
}, { }, {
Name: "list_chunk", Name: "list_chunk",
Default: 1000, Default: 1000,
Help: "Size of listing chunk 100-1000. 0 to disable.", Help: "Size of listing chunk 100-1000, 0 to disable.",
Advanced: true, Advanced: true,
}, { }, {
Name: "impersonate", Name: "impersonate",
@@ -402,17 +410,19 @@ date is used.`,
}, { }, {
Name: "alternate_export", Name: "alternate_export",
Default: false, Default: false,
Help: "Deprecated: no longer needed", Help: "Deprecated: No longer needed.",
Hide: fs.OptionHideBoth, Hide: fs.OptionHideBoth,
}, { }, {
Name: "upload_cutoff", Name: "upload_cutoff",
Default: defaultChunkSize, Default: defaultChunkSize,
Help: "Cutoff for switching to chunked upload", Help: "Cutoff for switching to chunked upload.",
Advanced: true, Advanced: true,
}, { }, {
Name: "chunk_size", Name: "chunk_size",
Default: defaultChunkSize, Default: defaultChunkSize,
Help: `Upload chunk size. Must a power of 2 >= 256k. Help: `Upload chunk size.
Must a power of 2 >= 256k.
Making this larger will improve performance, but note that each chunk Making this larger will improve performance, but note that each chunk
is buffered in memory one per transfer. is buffered in memory one per transfer.
@@ -482,7 +492,7 @@ configurations.`,
}, { }, {
Name: "disable_http2", Name: "disable_http2",
Default: true, Default: true,
Help: `Disable drive using http2 Help: `Disable drive using http2.
There is currently an unsolved issue with the google drive backend and There is currently an unsolved issue with the google drive backend and
HTTP/2. HTTP/2 is therefore disabled by default for the drive backend HTTP/2. HTTP/2 is therefore disabled by default for the drive backend
@@ -496,7 +506,7 @@ See: https://github.com/rclone/rclone/issues/3631
}, { }, {
Name: "stop_on_upload_limit", Name: "stop_on_upload_limit",
Default: false, Default: false,
Help: `Make upload limit errors be fatal Help: `Make upload limit errors be fatal.
At the time of writing it is only possible to upload 750 GiB of data to At the time of writing it is only possible to upload 750 GiB of data to
Google Drive a day (this is an undocumented limit). When this limit is Google Drive a day (this is an undocumented limit). When this limit is
@@ -513,7 +523,7 @@ See: https://github.com/rclone/rclone/issues/3857
}, { }, {
Name: "stop_on_download_limit", Name: "stop_on_download_limit",
Default: false, Default: false,
Help: `Make download limit errors be fatal Help: `Make download limit errors be fatal.
At the time of writing it is only possible to download 10 TiB of data from At the time of writing it is only possible to download 10 TiB of data from
Google Drive a day (this is an undocumented limit). When this limit is Google Drive a day (this is an undocumented limit). When this limit is
@@ -527,7 +537,7 @@ Google don't document so it may break in the future.
Advanced: true, Advanced: true,
}, { }, {
Name: "skip_shortcuts", Name: "skip_shortcuts",
Help: `If set skip shortcut files Help: `If set skip shortcut files.
Normally rclone dereferences shortcut files making them appear as if Normally rclone dereferences shortcut files making them appear as if
they are the original file (see [the shortcuts section](#shortcuts)). they are the original file (see [the shortcuts section](#shortcuts)).
@@ -609,6 +619,7 @@ type Fs struct {
client *http.Client // authorized client client *http.Client // authorized client
rootFolderID string // the id of the root folder rootFolderID string // the id of the root folder
dirCache *dircache.DirCache // Map of directory path to directory id dirCache *dircache.DirCache // Map of directory path to directory id
lastQuery string // Last query string to check in unit tests
pacer *fs.Pacer // To pace the API calls pacer *fs.Pacer // To pace the API calls
exportExtensions []string // preferred extensions to download docs exportExtensions []string // preferred extensions to download docs
importMimeTypes []string // MIME types to convert to docs importMimeTypes []string // MIME types to convert to docs
@@ -744,7 +755,7 @@ func (f *Fs) getFile(ctx context.Context, ID string, fields googleapi.Field) (in
func (f *Fs) getRootID(ctx context.Context) (string, error) { func (f *Fs) getRootID(ctx context.Context) (string, error) {
info, err := f.getFile(ctx, "root", "id") info, err := f.getFile(ctx, "root", "id")
if err != nil { if err != nil {
return "", errors.Wrap(err, "couldn't find root directory ID") return "", fmt.Errorf("couldn't find root directory ID: %w", err)
} }
return info.Id, nil return info.Id, nil
} }
@@ -822,11 +833,31 @@ func (f *Fs) list(ctx context.Context, dirIDs []string, title string, directorie
if filesOnly { if filesOnly {
query = append(query, fmt.Sprintf("mimeType!='%s'", driveFolderType)) query = append(query, fmt.Sprintf("mimeType!='%s'", driveFolderType))
} }
list := f.svc.Files.List()
if len(query) > 0 { // Constrain query using filter if this remote is a sync/copy/walk source.
list.Q(strings.Join(query, " and ")) if fi, use := filter.GetConfig(ctx), filter.GetUseFilter(ctx); fi != nil && use {
// fmt.Printf("list Query = %q\n", query) queryByTime := func(op string, tm time.Time) {
if tm.IsZero() {
return
}
// https://developers.google.com/drive/api/v3/ref-search-terms#operators
// Query times use RFC 3339 format, default timezone is UTC
timeStr := tm.UTC().Format("2006-01-02T15:04:05")
term := fmt.Sprintf("(modifiedTime %s '%s' or mimeType = '%s')", op, timeStr, driveFolderType)
query = append(query, term)
}
queryByTime(">=", fi.ModTimeFrom)
queryByTime("<=", fi.ModTimeTo)
} }
list := f.svc.Files.List()
queryString := strings.Join(query, " and ")
if queryString != "" {
list.Q(queryString)
// fs.Debugf(f, "list query: %q", queryString)
}
f.lastQuery = queryString // for unit tests
if f.opt.ListChunk > 0 { if f.opt.ListChunk > 0 {
list.PageSize(f.opt.ListChunk) list.PageSize(f.opt.ListChunk)
} }
@@ -851,7 +882,7 @@ OUTER:
return f.shouldRetry(ctx, err) return f.shouldRetry(ctx, err)
}) })
if err != nil { if err != nil {
return false, errors.Wrap(err, "couldn't list directory") return false, fmt.Errorf("couldn't list directory: %w", err)
} }
if files.IncompleteSearch { if files.IncompleteSearch {
fs.Errorf(f, "search result INCOMPLETE") fs.Errorf(f, "search result INCOMPLETE")
@@ -873,7 +904,7 @@ OUTER:
} }
item, err = f.resolveShortcut(ctx, item) item, err = f.resolveShortcut(ctx, item)
if err != nil { if err != nil {
return false, errors.Wrap(err, "list") return false, fmt.Errorf("list: %w", err)
} }
} }
// Check the case of items is correct since // Check the case of items is correct since
@@ -934,7 +965,7 @@ func fixMimeType(mimeTypeIn string) string {
mimeTypeOut = mime.FormatMediaType(mediaType, param) mimeTypeOut = mime.FormatMediaType(mediaType, param)
} }
if mimeTypeOut == "" { if mimeTypeOut == "" {
panic(errors.Errorf("unable to fix MIME type %q", mimeTypeIn)) panic(fmt.Errorf("unable to fix MIME type %q", mimeTypeIn))
} }
return mimeTypeOut return mimeTypeOut
} }
@@ -969,7 +1000,7 @@ func parseExtensions(extensionsIn ...string) (extensions, mimeTypes []string, er
} }
mt := mime.TypeByExtension(extension) mt := mime.TypeByExtension(extension)
if mt == "" { if mt == "" {
return extensions, mimeTypes, errors.Errorf("couldn't find MIME type for extension %q", extension) return extensions, mimeTypes, fmt.Errorf("couldn't find MIME type for extension %q", extension)
} }
if !containsString(extensions, extension) { if !containsString(extensions, extension) {
extensions = append(extensions, extension) extensions = append(extensions, extension)
@@ -996,7 +1027,7 @@ func getServiceAccountClient(ctx context.Context, opt *Options, credentialsData
scopes := driveScopes(opt.Scope) scopes := driveScopes(opt.Scope)
conf, err := google.JWTConfigFromJSON(credentialsData, scopes...) conf, err := google.JWTConfigFromJSON(credentialsData, scopes...)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "error processing credentials") return nil, fmt.Errorf("error processing credentials: %w", err)
} }
if opt.Impersonate != "" { if opt.Impersonate != "" {
conf.Subject = opt.Impersonate conf.Subject = opt.Impersonate
@@ -1013,19 +1044,19 @@ func createOAuthClient(ctx context.Context, opt *Options, name string, m configm
if len(opt.ServiceAccountCredentials) == 0 && opt.ServiceAccountFile != "" { if len(opt.ServiceAccountCredentials) == 0 && opt.ServiceAccountFile != "" {
loadedCreds, err := ioutil.ReadFile(env.ShellExpand(opt.ServiceAccountFile)) loadedCreds, err := ioutil.ReadFile(env.ShellExpand(opt.ServiceAccountFile))
if err != nil { if err != nil {
return nil, errors.Wrap(err, "error opening service account credentials file") return nil, fmt.Errorf("error opening service account credentials file: %w", err)
} }
opt.ServiceAccountCredentials = string(loadedCreds) opt.ServiceAccountCredentials = string(loadedCreds)
} }
if opt.ServiceAccountCredentials != "" { if opt.ServiceAccountCredentials != "" {
oAuthClient, err = getServiceAccountClient(ctx, opt, []byte(opt.ServiceAccountCredentials)) oAuthClient, err = getServiceAccountClient(ctx, opt, []byte(opt.ServiceAccountCredentials))
if err != nil { if err != nil {
return nil, errors.Wrap(err, "failed to create oauth client from service account") return nil, fmt.Errorf("failed to create oauth client from service account: %w", err)
} }
} else { } else {
oAuthClient, _, err = oauthutil.NewClientWithBaseClient(ctx, name, m, driveConfig, getClient(ctx, opt)) oAuthClient, _, err = oauthutil.NewClientWithBaseClient(ctx, name, m, driveConfig, getClient(ctx, opt))
if err != nil { if err != nil {
return nil, errors.Wrap(err, "failed to create oauth client") return nil, fmt.Errorf("failed to create oauth client: %w", err)
} }
} }
@@ -1034,10 +1065,10 @@ func createOAuthClient(ctx context.Context, opt *Options, name string, m configm
func checkUploadChunkSize(cs fs.SizeSuffix) error { func checkUploadChunkSize(cs fs.SizeSuffix) error {
if !isPowerOfTwo(int64(cs)) { if !isPowerOfTwo(int64(cs)) {
return errors.Errorf("%v isn't a power of two", cs) return fmt.Errorf("%v isn't a power of two", cs)
} }
if cs < minChunkSize { if cs < minChunkSize {
return errors.Errorf("%s is less than %s", cs, minChunkSize) return fmt.Errorf("%s is less than %s", cs, minChunkSize)
} }
return nil return nil
} }
@@ -1075,16 +1106,16 @@ func newFs(ctx context.Context, name, path string, m configmap.Mapper) (*Fs, err
} }
err = checkUploadCutoff(opt.UploadCutoff) err = checkUploadCutoff(opt.UploadCutoff)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "drive: upload cutoff") return nil, fmt.Errorf("drive: upload cutoff: %w", err)
} }
err = checkUploadChunkSize(opt.ChunkSize) err = checkUploadChunkSize(opt.ChunkSize)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "drive: chunk size") return nil, fmt.Errorf("drive: chunk size: %w", err)
} }
oAuthClient, err := createOAuthClient(ctx, opt, name, m) oAuthClient, err := createOAuthClient(ctx, opt, name, m)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "drive: failed when making oauth client") return nil, fmt.Errorf("drive: failed when making oauth client: %w", err)
} }
root, err := parseDrivePath(path) root, err := parseDrivePath(path)
@@ -1118,13 +1149,13 @@ func newFs(ctx context.Context, name, path string, m configmap.Mapper) (*Fs, err
f.client = oAuthClient f.client = oAuthClient
f.svc, err = drive.New(f.client) f.svc, err = drive.New(f.client)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "couldn't create Drive client") return nil, fmt.Errorf("couldn't create Drive client: %w", err)
} }
if f.opt.V2DownloadMinSize >= 0 { if f.opt.V2DownloadMinSize >= 0 {
f.v2Svc, err = drive_v2.New(f.client) f.v2Svc, err = drive_v2.New(f.client)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "couldn't create Drive v2 client") return nil, fmt.Errorf("couldn't create Drive v2 client: %w", err)
} }
} }
@@ -1149,7 +1180,8 @@ func NewFs(ctx context.Context, name, path string, m configmap.Mapper) (fs.Fs, e
// otherwise look up the actual root ID // otherwise look up the actual root ID
rootID, err := f.getRootID(ctx) rootID, err := f.getRootID(ctx)
if err != nil { if err != nil {
if gerr, ok := errors.Cause(err).(*googleapi.Error); ok && gerr.Code == 404 { var gerr *googleapi.Error
if errors.As(err, &gerr) && gerr.Code == 404 {
// 404 means that this scope does not have permission to get the // 404 means that this scope does not have permission to get the
// root so just use "root" // root so just use "root"
rootID = "root" rootID = "root"
@@ -1291,7 +1323,7 @@ func (f *Fs) newDocumentObject(remote string, info *drive.File, extension, expor
func (f *Fs) newLinkObject(remote string, info *drive.File, extension, exportMimeType string) (fs.Object, error) { func (f *Fs) newLinkObject(remote string, info *drive.File, extension, exportMimeType string) (fs.Object, error) {
t := linkTemplate(exportMimeType) t := linkTemplate(exportMimeType)
if t == nil { if t == nil {
return nil, errors.Errorf("unsupported link type %s", exportMimeType) return nil, fmt.Errorf("unsupported link type %s", exportMimeType)
} }
xdgIcon := _mimeTypeToXDGLinkIcons[info.MimeType] xdgIcon := _mimeTypeToXDGLinkIcons[info.MimeType]
if xdgIcon == "" { if xdgIcon == "" {
@@ -1304,7 +1336,7 @@ func (f *Fs) newLinkObject(remote string, info *drive.File, extension, exportMim
info.WebViewLink, info.Name, xdgIcon, info.WebViewLink, info.Name, xdgIcon,
}) })
if err != nil { if err != nil {
return nil, errors.Wrap(err, "executing template failed") return nil, fmt.Errorf("executing template failed: %w", err)
} }
baseObject := f.newBaseObject(remote+extension, info) baseObject := f.newBaseObject(remote+extension, info)
@@ -1321,8 +1353,8 @@ func (f *Fs) newLinkObject(remote string, info *drive.File, extension, exportMim
// //
// When the drive.File cannot be represented as an fs.Object it will return (nil, nil). // When the drive.File cannot be represented as an fs.Object it will return (nil, nil).
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *drive.File) (fs.Object, error) { func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *drive.File) (fs.Object, error) {
// If item has MD5 sum or a length it is a file stored on drive // If item has MD5 sum it is a file stored on drive
if info.Md5Checksum != "" || info.Size > 0 { if info.Md5Checksum != "" {
return f.newRegularObject(remote, info), nil return f.newRegularObject(remote, info), nil
} }
@@ -1341,11 +1373,11 @@ func (f *Fs) newObjectWithExportInfo(
// will have been resolved so this will do nothing. // will have been resolved so this will do nothing.
info, err = f.resolveShortcut(ctx, info) info, err = f.resolveShortcut(ctx, info)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "new object") return nil, fmt.Errorf("new object: %w", err)
} }
switch { switch {
case info.MimeType == driveFolderType: case info.MimeType == driveFolderType:
return nil, fs.ErrorNotAFile return nil, fs.ErrorIsDir
case info.MimeType == shortcutMimeType: case info.MimeType == shortcutMimeType:
// We can only get here if f.opt.SkipShortcuts is set // We can only get here if f.opt.SkipShortcuts is set
// and not from a listing. This is unlikely. // and not from a listing. This is unlikely.
@@ -1355,8 +1387,8 @@ func (f *Fs) newObjectWithExportInfo(
// Pretend a dangling shortcut is a regular object // Pretend a dangling shortcut is a regular object
// It will error if used, but appear in listings so it can be deleted // It will error if used, but appear in listings so it can be deleted
return f.newRegularObject(remote, info), nil return f.newRegularObject(remote, info), nil
case info.Md5Checksum != "" || info.Size > 0: case info.Md5Checksum != "":
// If item has MD5 sum or a length it is a file stored on drive // If item has MD5 sum it is a file stored on drive
return f.newRegularObject(remote, info), nil return f.newRegularObject(remote, info), nil
case f.opt.SkipGdocs: case f.opt.SkipGdocs:
fs.Debugf(remote, "Skipping google document type %q", info.MimeType) fs.Debugf(remote, "Skipping google document type %q", info.MimeType)
@@ -1984,13 +2016,14 @@ func (f *Fs) resolveShortcut(ctx context.Context, item *drive.File) (newItem *dr
} }
newItem, err = f.getFile(ctx, item.ShortcutDetails.TargetId, f.fileFields) newItem, err = f.getFile(ctx, item.ShortcutDetails.TargetId, f.fileFields)
if err != nil { if err != nil {
if gerr, ok := errors.Cause(err).(*googleapi.Error); ok && gerr.Code == 404 { var gerr *googleapi.Error
if errors.As(err, &gerr) && gerr.Code == 404 {
// 404 means dangling shortcut, so just return the shortcut with the mime type mangled // 404 means dangling shortcut, so just return the shortcut with the mime type mangled
fs.Logf(nil, "Dangling shortcut %q detected", item.Name) fs.Logf(nil, "Dangling shortcut %q detected", item.Name)
item.MimeType = shortcutMimeTypeDangling item.MimeType = shortcutMimeTypeDangling
return item, nil return item, nil
} }
return nil, errors.Wrap(err, "failed to resolve shortcut") return nil, fmt.Errorf("failed to resolve shortcut: %w", err)
} }
// make sure we use the Name, Parents and Trashed from the original item // make sure we use the Name, Parents and Trashed from the original item
newItem.Name = item.Name newItem.Name = item.Name
@@ -2092,10 +2125,10 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
exportExt, _, _ = f.findExportFormatByMimeType(ctx, importMimeType) exportExt, _, _ = f.findExportFormatByMimeType(ctx, importMimeType)
if exportExt == "" { if exportExt == "" {
return nil, errors.Errorf("No export format found for %q", importMimeType) return nil, fmt.Errorf("No export format found for %q", importMimeType)
} }
if exportExt != srcExt && !f.opt.AllowImportNameChange { if exportExt != srcExt && !f.opt.AllowImportNameChange {
return nil, errors.Errorf("Can't convert %q to a document with a different export filetype (%q)", srcExt, exportExt) return nil, fmt.Errorf("Can't convert %q to a document with a different export filetype (%q)", srcExt, exportExt)
} }
} }
} }
@@ -2116,7 +2149,7 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
// Don't retry, return a retry error instead // Don't retry, return a retry error instead
err = f.pacer.CallNoRetry(func() (bool, error) { err = f.pacer.CallNoRetry(func() (bool, error) {
info, err = f.svc.Files.Create(createInfo). info, err = f.svc.Files.Create(createInfo).
Media(in, googleapi.ContentType(srcMimeType)). Media(in, googleapi.ContentType(srcMimeType), googleapi.ChunkSize(0)).
Fields(partialFields). Fields(partialFields).
SupportsAllDrives(true). SupportsAllDrives(true).
KeepRevisionForever(f.opt.KeepRevisionForever). KeepRevisionForever(f.opt.KeepRevisionForever).
@@ -2163,7 +2196,7 @@ func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
return false return false
}) })
if err != nil { if err != nil {
return errors.Wrapf(err, "MergeDirs list failed on %v", srcDir) return fmt.Errorf("MergeDirs list failed on %v: %w", srcDir, err)
} }
// move them into place // move them into place
for _, info := range infos { for _, info := range infos {
@@ -2179,14 +2212,14 @@ func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
return f.shouldRetry(ctx, err) return f.shouldRetry(ctx, err)
}) })
if err != nil { if err != nil {
return errors.Wrapf(err, "MergeDirs move failed on %q in %v", info.Name, srcDir) return fmt.Errorf("MergeDirs move failed on %q in %v: %w", info.Name, srcDir, err)
} }
} }
// rmdir (into trash) the now empty source directory // rmdir (into trash) the now empty source directory
fs.Infof(srcDir, "removing empty directory") fs.Infof(srcDir, "removing empty directory")
err = f.delete(ctx, srcDir.ID(), true) err = f.delete(ctx, srcDir.ID(), true)
if err != nil { if err != nil {
return errors.Wrapf(err, "MergeDirs move failed to rmdir %q", srcDir) return fmt.Errorf("MergeDirs move failed to rmdir %q: %w", srcDir, err)
} }
} }
return nil return nil
@@ -2249,7 +2282,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
return err return err
} }
if found { if found {
return errors.Errorf("directory not empty") return fmt.Errorf("directory not empty")
} }
} }
if root != "" { if root != "" {
@@ -2427,7 +2460,7 @@ func (f *Fs) cleanupTeamDrive(ctx context.Context, dir string, directoryID strin
return false return false
}) })
if err != nil { if err != nil {
err = errors.Wrap(err, "failed to list directory") err = fmt.Errorf("failed to list directory: %w", err)
r.Errors++ r.Errors++
fs.Errorf(dir, "%v", err) fs.Errorf(dir, "%v", err)
} }
@@ -2471,7 +2504,7 @@ func (f *Fs) teamDriveOK(ctx context.Context) (err error) {
return f.shouldRetry(ctx, err) return f.shouldRetry(ctx, err)
}) })
if err != nil { if err != nil {
return errors.Wrap(err, "failed to get Shared Drive info") return fmt.Errorf("failed to get Shared Drive info: %w", err)
} }
fs.Debugf(f, "read info from Shared Drive %q", td.Name) fs.Debugf(f, "read info from Shared Drive %q", td.Name)
return err return err
@@ -2494,7 +2527,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
return f.shouldRetry(ctx, err) return f.shouldRetry(ctx, err)
}) })
if err != nil { if err != nil {
return nil, errors.Wrap(err, "failed to get Drive storageQuota") return nil, fmt.Errorf("failed to get Drive storageQuota: %w", err)
} }
q := about.StorageQuota q := about.StorageQuota
usage := &fs.Usage{ usage := &fs.Usage{
@@ -2818,7 +2851,7 @@ func (f *Fs) Hashes() hash.Set {
func (f *Fs) changeChunkSize(chunkSizeString string) (err error) { func (f *Fs) changeChunkSize(chunkSizeString string) (err error) {
chunkSizeInt, err := strconv.ParseInt(chunkSizeString, 10, 64) chunkSizeInt, err := strconv.ParseInt(chunkSizeString, 10, 64)
if err != nil { if err != nil {
return errors.Wrap(err, "couldn't convert chunk size to int") return fmt.Errorf("couldn't convert chunk size to int: %w", err)
} }
chunkSize := fs.SizeSuffix(chunkSizeInt) chunkSize := fs.SizeSuffix(chunkSizeInt)
if chunkSize == f.opt.ChunkSize { if chunkSize == f.opt.ChunkSize {
@@ -2855,17 +2888,17 @@ func (f *Fs) changeServiceAccountFile(ctx context.Context, file string) (err err
f.opt.ServiceAccountCredentials = "" f.opt.ServiceAccountCredentials = ""
oAuthClient, err := createOAuthClient(ctx, &f.opt, f.name, f.m) oAuthClient, err := createOAuthClient(ctx, &f.opt, f.name, f.m)
if err != nil { if err != nil {
return errors.Wrap(err, "drive: failed when making oauth client") return fmt.Errorf("drive: failed when making oauth client: %w", err)
} }
f.client = oAuthClient f.client = oAuthClient
f.svc, err = drive.New(f.client) f.svc, err = drive.New(f.client)
if err != nil { if err != nil {
return errors.Wrap(err, "couldn't create Drive client") return fmt.Errorf("couldn't create Drive client: %w", err)
} }
if f.opt.V2DownloadMinSize >= 0 { if f.opt.V2DownloadMinSize >= 0 {
f.v2Svc, err = drive_v2.New(f.client) f.v2Svc, err = drive_v2.New(f.client)
if err != nil { if err != nil {
return errors.Wrap(err, "couldn't create Drive v2 client") return fmt.Errorf("couldn't create Drive v2 client: %w", err)
} }
} }
return nil return nil
@@ -2893,13 +2926,13 @@ func (f *Fs) makeShortcut(ctx context.Context, srcPath string, dstFs *Fs, dstPat
} }
isDir = true isDir = true
} else if srcObj, err := srcFs.NewObject(ctx, srcPath); err != nil { } else if srcObj, err := srcFs.NewObject(ctx, srcPath); err != nil {
if err != fs.ErrorNotAFile { if err != fs.ErrorIsDir {
return nil, errors.Wrap(err, "can't find source") return nil, fmt.Errorf("can't find source: %w", err)
} }
// source was a directory // source was a directory
srcID, err = srcFs.dirCache.FindDir(ctx, srcPath, false) srcID, err = srcFs.dirCache.FindDir(ctx, srcPath, false)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "failed to find source dir") return nil, fmt.Errorf("failed to find source dir: %w", err)
} }
isDir = true isDir = true
} else { } else {
@@ -2913,16 +2946,16 @@ func (f *Fs) makeShortcut(ctx context.Context, srcPath string, dstFs *Fs, dstPat
if err != fs.ErrorObjectNotFound { if err != fs.ErrorObjectNotFound {
if err == nil { if err == nil {
err = errors.New("existing file") err = errors.New("existing file")
} else if err == fs.ErrorNotAFile { } else if err == fs.ErrorIsDir {
err = errors.New("existing directory") err = errors.New("existing directory")
} }
return nil, errors.Wrap(err, "not overwriting shortcut target") return nil, fmt.Errorf("not overwriting shortcut target: %w", err)
} }
// Create destination shortcut // Create destination shortcut
createInfo, err := dstFs.createFileInfo(ctx, dstPath, time.Now()) createInfo, err := dstFs.createFileInfo(ctx, dstPath, time.Now())
if err != nil { if err != nil {
return nil, errors.Wrap(err, "shortcut destination failed") return nil, fmt.Errorf("shortcut destination failed: %w", err)
} }
createInfo.MimeType = shortcutMimeType createInfo.MimeType = shortcutMimeType
createInfo.ShortcutDetails = &drive.FileShortcutDetails{ createInfo.ShortcutDetails = &drive.FileShortcutDetails{
@@ -2939,7 +2972,7 @@ func (f *Fs) makeShortcut(ctx context.Context, srcPath string, dstFs *Fs, dstPat
return dstFs.shouldRetry(ctx, err) return dstFs.shouldRetry(ctx, err)
}) })
if err != nil { if err != nil {
return nil, errors.Wrap(err, "shortcut creation failed") return nil, fmt.Errorf("shortcut creation failed: %w", err)
} }
if isDir { if isDir {
return nil, nil return nil, nil
@@ -2959,7 +2992,7 @@ func (f *Fs) listTeamDrives(ctx context.Context) (drives []*drive.Drive, err err
return defaultFs.shouldRetry(ctx, err) return defaultFs.shouldRetry(ctx, err)
}) })
if err != nil { if err != nil {
return drives, errors.Wrap(err, "listing Team Drives failed") return drives, fmt.Errorf("listing Team Drives failed: %w", err)
} }
drives = append(drives, teamDrives.Drives...) drives = append(drives, teamDrives.Drives...)
if teamDrives.NextPageToken == "" { if teamDrives.NextPageToken == "" {
@@ -3002,7 +3035,7 @@ func (f *Fs) unTrash(ctx context.Context, dir string, directoryID string, recurs
return f.shouldRetry(ctx, err) return f.shouldRetry(ctx, err)
}) })
if err != nil { if err != nil {
err = errors.Wrap(err, "failed to restore") err = fmt.Errorf("failed to restore: %w", err)
r.Errors++ r.Errors++
fs.Errorf(remote, "%v", err) fs.Errorf(remote, "%v", err)
} else { } else {
@@ -3019,7 +3052,7 @@ func (f *Fs) unTrash(ctx context.Context, dir string, directoryID string, recurs
return false return false
}) })
if err != nil { if err != nil {
err = errors.Wrap(err, "failed to list directory") err = fmt.Errorf("failed to list directory: %w", err)
r.Errors++ r.Errors++
fs.Errorf(dir, "%v", err) fs.Errorf(dir, "%v", err)
} }
@@ -3043,10 +3076,10 @@ func (f *Fs) unTrashDir(ctx context.Context, dir string, recurse bool) (r unTras
func (f *Fs) copyID(ctx context.Context, id, dest string) (err error) { func (f *Fs) copyID(ctx context.Context, id, dest string) (err error) {
info, err := f.getFile(ctx, id, f.fileFields) info, err := f.getFile(ctx, id, f.fileFields)
if err != nil { if err != nil {
return errors.Wrap(err, "couldn't find id") return fmt.Errorf("couldn't find id: %w", err)
} }
if info.MimeType == driveFolderType { if info.MimeType == driveFolderType {
return errors.Errorf("can't copy directory use: rclone copy --drive-root-folder-id %s %s %s", id, fs.ConfigString(f), dest) return fmt.Errorf("can't copy directory use: rclone copy --drive-root-folder-id %s %s %s", id, fs.ConfigString(f), dest)
} }
info.Name = f.opt.Enc.ToStandardName(info.Name) info.Name = f.opt.Enc.ToStandardName(info.Name)
o, err := f.newObjectWithInfo(ctx, info.Name, info) o, err := f.newObjectWithInfo(ctx, info.Name, info)
@@ -3069,7 +3102,7 @@ func (f *Fs) copyID(ctx context.Context, id, dest string) (err error) {
} }
_, err = operations.Copy(ctx, dstFs, nil, destLeaf, o) _, err = operations.Copy(ctx, dstFs, nil, destLeaf, o)
if err != nil { if err != nil {
return errors.Wrap(err, "copy failed") return fmt.Errorf("copy failed: %w", err)
} }
return nil return nil
} }
@@ -3133,7 +3166,7 @@ account.
Usage: Usage:
rclone backend drives drive: rclone backend [-o config] drives drive:
This will return a JSON list of objects like this This will return a JSON list of objects like this
@@ -3150,6 +3183,22 @@ This will return a JSON list of objects like this
} }
] ]
With the -o config parameter it will output the list in a format
suitable for adding to a config file to make aliases for all the
drives found.
[My Drive]
type = alias
remote = drive,team_drive=0ABCDEF-01234567890,root_folder_id=:
[Test Drive]
type = alias
remote = drive,team_drive=0ABCDEFabcdefghijkl,root_folder_id=:
Adding this to the rclone config file will cause those team drives to
be accessible with the aliases shown. This may require manual editing
of the names.
`, `,
}, { }, {
Name: "untrash", Name: "untrash",
@@ -3252,7 +3301,7 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
if ok { if ok {
targetFs, err := cache.Get(ctx, target) targetFs, err := cache.Get(ctx, target)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "couldn't find target") return nil, fmt.Errorf("couldn't find target: %w", err)
} }
dstFs, ok = targetFs.(*Fs) dstFs, ok = targetFs.(*Fs)
if !ok { if !ok {
@@ -3261,7 +3310,21 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
} }
return f.makeShortcut(ctx, arg[0], dstFs, arg[1]) return f.makeShortcut(ctx, arg[0], dstFs, arg[1])
case "drives": case "drives":
return f.listTeamDrives(ctx) drives, err := f.listTeamDrives(ctx)
if err != nil {
return nil, err
}
if _, ok := opt["config"]; ok {
lines := []string{}
for _, drive := range drives {
lines = append(lines, "")
lines = append(lines, fmt.Sprintf("[%s]", drive.Name))
lines = append(lines, fmt.Sprintf("type = alias"))
lines = append(lines, fmt.Sprintf("remote = %s,team_drive=%s,root_folder_id=:", f.name, drive.Id))
}
return lines, nil
}
return drives, nil
case "untrash": case "untrash":
dir := "" dir := ""
if len(arg) > 0 { if len(arg) > 0 {
@@ -3277,7 +3340,7 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
arg = arg[2:] arg = arg[2:]
err = f.copyID(ctx, id, dest) err = f.copyID(ctx, id, dest)
if err != nil { if err != nil {
return nil, errors.Wrapf(err, "failed copying %q to %q", id, dest) return nil, fmt.Errorf("failed copying %q to %q: %w", id, dest, err)
} }
} }
return nil, nil return nil, nil
@@ -3511,11 +3574,11 @@ func (o *baseObject) open(ctx context.Context, url string, options ...fs.OpenOpt
url += "acknowledgeAbuse=true" url += "acknowledgeAbuse=true"
_, res, err = o.httpResponse(ctx, url, "GET", options) _, res, err = o.httpResponse(ctx, url, "GET", options)
} else { } else {
err = errors.Wrap(err, "Use the --drive-acknowledge-abuse flag to download this file") err = fmt.Errorf("Use the --drive-acknowledge-abuse flag to download this file: %w", err)
} }
} }
if err != nil { if err != nil {
return nil, errors.Wrap(err, "open file failed") return nil, fmt.Errorf("open file failed: %w", err)
} }
} }
return res.Body, nil return res.Body, nil
@@ -3609,7 +3672,7 @@ func (o *baseObject) update(ctx context.Context, updateInfo *drive.File, uploadM
// Don't retry, return a retry error instead // Don't retry, return a retry error instead
err = o.fs.pacer.CallNoRetry(func() (bool, error) { err = o.fs.pacer.CallNoRetry(func() (bool, error) {
info, err = o.fs.svc.Files.Update(actualID(o.id), updateInfo). info, err = o.fs.svc.Files.Update(actualID(o.id), updateInfo).
Media(in, googleapi.ContentType(uploadMimeType)). Media(in, googleapi.ContentType(uploadMimeType), googleapi.ChunkSize(0)).
Fields(partialFields). Fields(partialFields).
SupportsAllDrives(true). SupportsAllDrives(true).
KeepRevisionForever(o.fs.opt.KeepRevisionForever). KeepRevisionForever(o.fs.opt.KeepRevisionForever).
@@ -3679,14 +3742,14 @@ func (o *documentObject) Update(ctx context.Context, in io.Reader, src fs.Object
} }
if o.fs.importMimeTypes == nil || o.fs.opt.SkipGdocs { if o.fs.importMimeTypes == nil || o.fs.opt.SkipGdocs {
return errors.Errorf("can't update google document type without --drive-import-formats") return fmt.Errorf("can't update google document type without --drive-import-formats")
} }
importMimeType = o.fs.findImportFormat(ctx, updateInfo.MimeType) importMimeType = o.fs.findImportFormat(ctx, updateInfo.MimeType)
if importMimeType == "" { if importMimeType == "" {
return errors.Errorf("no import format found for %q", srcMimeType) return fmt.Errorf("no import format found for %q", srcMimeType)
} }
if importMimeType != o.documentMimeType { if importMimeType != o.documentMimeType {
return errors.Errorf("can't change google document type (o: %q, src: %q, import: %q)", o.documentMimeType, srcMimeType, importMimeType) return fmt.Errorf("can't change google document type (o: %q, src: %q, import: %q)", o.documentMimeType, srcMimeType, importMimeType)
} }
updateInfo.MimeType = importMimeType updateInfo.MimeType = importMimeType

View File

@@ -4,6 +4,8 @@ import (
"bytes" "bytes"
"context" "context"
"encoding/json" "encoding/json"
"errors"
"fmt"
"io" "io"
"io/ioutil" "io/ioutil"
"mime" "mime"
@@ -14,11 +16,12 @@ import (
"testing" "testing"
"time" "time"
"github.com/pkg/errors"
_ "github.com/rclone/rclone/backend/local" _ "github.com/rclone/rclone/backend/local"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/filter"
"github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/operations" "github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fs/sync"
"github.com/rclone/rclone/fstest" "github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests" "github.com/rclone/rclone/fstest/fstests"
"github.com/rclone/rclone/lib/random" "github.com/rclone/rclone/lib/random"
@@ -461,6 +464,81 @@ func (f *Fs) InternalTestCopyID(t *testing.T) {
}) })
} }
// TestIntegration/FsMkdir/FsPutFiles/Internal/AgeQuery
func (f *Fs) InternalTestAgeQuery(t *testing.T) {
opt := &filter.Opt{}
err := opt.MaxAge.Set("1h")
assert.NoError(t, err)
flt, err := filter.NewFilter(opt)
assert.NoError(t, err)
defCtx := context.Background()
fltCtx := filter.ReplaceConfig(defCtx, flt)
testCtx1 := fltCtx
testCtx2 := filter.SetUseFilter(testCtx1, true)
testCtx3, testCancel := context.WithCancel(testCtx2)
testCtx4 := filter.SetUseFilter(testCtx3, false)
testCancel()
assert.False(t, filter.GetUseFilter(testCtx1))
assert.True(t, filter.GetUseFilter(testCtx2))
assert.True(t, filter.GetUseFilter(testCtx3))
assert.False(t, filter.GetUseFilter(testCtx4))
subRemote := fmt.Sprintf("%s:%s/%s", f.Name(), f.Root(), "agequery-testdir")
subFsResult, err := fs.NewFs(defCtx, subRemote)
require.NoError(t, err)
subFs, isDriveFs := subFsResult.(*Fs)
require.True(t, isDriveFs)
tempDir1, err := ioutil.TempDir("", "rclone-drive-agequery1-test")
require.NoError(t, err)
defer func() {
_ = os.RemoveAll(tempDir1)
}()
tempFs1, err := fs.NewFs(defCtx, tempDir1)
require.NoError(t, err)
tempDir2, err := ioutil.TempDir("", "rclone-drive-agequery2-test")
require.NoError(t, err)
defer func() {
_ = os.RemoveAll(tempDir2)
}()
tempFs2, err := fs.NewFs(defCtx, tempDir2)
require.NoError(t, err)
file1 := fstest.Item{ModTime: time.Now(), Path: "agequery.txt"}
_, _ = fstests.PutTestContents(defCtx, t, tempFs1, &file1, "abcxyz", true)
// validate sync/copy
const timeQuery = "(modifiedTime >= '"
assert.NoError(t, sync.CopyDir(defCtx, subFs, tempFs1, false))
assert.NotContains(t, subFs.lastQuery, timeQuery)
assert.NoError(t, sync.CopyDir(fltCtx, subFs, tempFs1, false))
assert.Contains(t, subFs.lastQuery, timeQuery)
assert.NoError(t, sync.CopyDir(fltCtx, tempFs2, subFs, false))
assert.Contains(t, subFs.lastQuery, timeQuery)
assert.NoError(t, sync.CopyDir(defCtx, tempFs2, subFs, false))
assert.NotContains(t, subFs.lastQuery, timeQuery)
// validate list/walk
devNull, errOpen := os.OpenFile(os.DevNull, os.O_WRONLY, 0)
require.NoError(t, errOpen)
defer func() {
_ = devNull.Close()
}()
assert.NoError(t, operations.List(defCtx, subFs, devNull))
assert.NotContains(t, subFs.lastQuery, timeQuery)
assert.NoError(t, operations.List(fltCtx, subFs, devNull))
assert.Contains(t, subFs.lastQuery, timeQuery)
}
func (f *Fs) InternalTest(t *testing.T) { func (f *Fs) InternalTest(t *testing.T) {
// These tests all depend on each other so run them as nested tests // These tests all depend on each other so run them as nested tests
t.Run("DocumentImport", func(t *testing.T) { t.Run("DocumentImport", func(t *testing.T) {
@@ -478,6 +556,7 @@ func (f *Fs) InternalTest(t *testing.T) {
t.Run("Shortcuts", f.InternalTestShortcuts) t.Run("Shortcuts", f.InternalTestShortcuts)
t.Run("UnTrash", f.InternalTestUnTrash) t.Run("UnTrash", f.InternalTestUnTrash)
t.Run("CopyID", f.InternalTestCopyID) t.Run("CopyID", f.InternalTestCopyID)
t.Run("AgeQuery", f.InternalTestAgeQuery)
} }
var _ fstests.InternalTester = (*Fs)(nil) var _ fstests.InternalTester = (*Fs)(nil)

View File

@@ -8,13 +8,13 @@ package dropbox
import ( import (
"context" "context"
"errors"
"fmt" "fmt"
"sync" "sync"
"time" "time"
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/async" "github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/async"
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/files" "github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/files"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/fserrors" "github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/lib/atexit" "github.com/rclone/rclone/lib/atexit"
@@ -66,7 +66,7 @@ type batcherResponse struct {
func newBatcher(ctx context.Context, f *Fs, mode string, size int, timeout time.Duration) (*batcher, error) { func newBatcher(ctx context.Context, f *Fs, mode string, size int, timeout time.Duration) (*batcher, error) {
// fs.Debugf(f, "Creating batcher with mode %q, size %d, timeout %v", mode, size, timeout) // fs.Debugf(f, "Creating batcher with mode %q, size %d, timeout %v", mode, size, timeout)
if size > maxBatchSize || size < 0 { if size > maxBatchSize || size < 0 {
return nil, errors.Errorf("dropbox: batch size must be < %d and >= 0 - it is currently %d", maxBatchSize, size) return nil, fmt.Errorf("dropbox: batch size must be < %d and >= 0 - it is currently %d", maxBatchSize, size)
} }
async := false async := false
@@ -91,7 +91,7 @@ func newBatcher(ctx context.Context, f *Fs, mode string, size int, timeout time.
case "off": case "off":
size = 0 size = 0
default: default:
return nil, errors.Errorf("dropbox: batch mode must be sync|async|off not %q", mode) return nil, fmt.Errorf("dropbox: batch mode must be sync|async|off not %q", mode)
} }
b := &batcher{ b := &batcher{
@@ -135,7 +135,7 @@ func (b *batcher) finishBatch(ctx context.Context, items []*files.UploadSessionF
return err != nil, err return err != nil, err
}) })
if err != nil { if err != nil {
return nil, errors.Wrap(err, "batch commit failed") return nil, fmt.Errorf("batch commit failed: %w", err)
} }
return batchStatus, nil return batchStatus, nil
} }
@@ -147,8 +147,14 @@ func (b *batcher) finishBatchJobStatus(ctx context.Context, launchBatchStatus *f
} }
var batchStatus *files.UploadSessionFinishBatchJobStatus var batchStatus *files.UploadSessionFinishBatchJobStatus
sleepTime := 100 * time.Millisecond sleepTime := 100 * time.Millisecond
const maxTries = 120 const maxSleepTime = 1 * time.Second
for try := 1; try <= maxTries; try++ { startTime := time.Now()
try := 1
for {
remaining := time.Duration(b.f.opt.BatchCommitTimeout) - time.Since(startTime)
if remaining < 0 {
break
}
err = b.f.pacer.Call(func() (bool, error) { err = b.f.pacer.Call(func() (bool, error) {
batchStatus, err = b.f.srv.UploadSessionFinishBatchCheck(&async.PollArg{ batchStatus, err = b.f.srv.UploadSessionFinishBatchCheck(&async.PollArg{
AsyncJobId: launchBatchStatus.AsyncJobId, AsyncJobId: launchBatchStatus.AsyncJobId,
@@ -156,23 +162,25 @@ func (b *batcher) finishBatchJobStatus(ctx context.Context, launchBatchStatus *f
return shouldRetry(ctx, err) return shouldRetry(ctx, err)
}) })
if err != nil { if err != nil {
fs.Debugf(b.f, "Wait for batch: sleeping for %v after error: %v: try %d/%d", sleepTime, err, try, maxTries) fs.Debugf(b.f, "Wait for batch: sleeping for %v after error: %v: try %d remaining %v", sleepTime, err, try, remaining)
} else { } else {
if batchStatus.Tag == "complete" { if batchStatus.Tag == "complete" {
fs.Debugf(b.f, "Upload batch completed in %v", time.Since(startTime))
return batchStatus.Complete, nil return batchStatus.Complete, nil
} }
fs.Debugf(b.f, "Wait for batch: sleeping for %v after status: %q: try %d/%d", sleepTime, batchStatus.Tag, try, maxTries) fs.Debugf(b.f, "Wait for batch: sleeping for %v after status: %q: try %d remaining %v", sleepTime, batchStatus.Tag, try, remaining)
} }
time.Sleep(sleepTime) time.Sleep(sleepTime)
sleepTime *= 2 sleepTime *= 2
if sleepTime > time.Second { if sleepTime > maxSleepTime {
sleepTime = time.Second sleepTime = maxSleepTime
} }
try++
} }
if err == nil { if err == nil {
err = errors.New("batch didn't complete") err = errors.New("batch didn't complete")
} }
return nil, errors.Wrapf(err, "wait for batch failed after %d tries", maxTries) return nil, fmt.Errorf("wait for batch failed after %d tries in %v: %w", try, time.Since(startTime), err)
} }
// commit a batch // commit a batch
@@ -208,13 +216,13 @@ func (b *batcher) commitBatch(ctx context.Context, items []*files.UploadSessionF
case "complete": case "complete":
complete = batchStatus.Complete complete = batchStatus.Complete
default: default:
return errors.Errorf("batch returned unknown status %q", batchStatus.Tag) return fmt.Errorf("batch returned unknown status %q", batchStatus.Tag)
} }
// Check we got the right number of entries // Check we got the right number of entries
entries := complete.Entries entries := complete.Entries
if len(entries) != len(results) { if len(entries) != len(results) {
return errors.Errorf("expecting %d items in batch but got %d", len(results), len(entries)) return fmt.Errorf("expecting %d items in batch but got %d", len(results), len(entries))
} }
// Report results to clients // Report results to clients
@@ -242,7 +250,7 @@ func (b *batcher) commitBatch(ctx context.Context, items []*files.UploadSessionF
errorTag += "/" + item.Failure.PropertiesError.Tag errorTag += "/" + item.Failure.PropertiesError.Tag
} }
} }
resp.err = errors.Errorf("batch upload failed: %s", errorTag) resp.err = fmt.Errorf("batch upload failed: %s", errorTag)
} }
if !b.async { if !b.async {
results[i] <- resp results[i] <- resp
@@ -253,7 +261,7 @@ func (b *batcher) commitBatch(ctx context.Context, items []*files.UploadSessionF
// Report an error if any failed in the batch // Report an error if any failed in the batch
if errorTag != "" { if errorTag != "" {
return errors.Errorf("batch had %d errors: last error: %s", errorCount, errorTag) return fmt.Errorf("batch had %d errors: last error: %s", errorCount, errorTag)
} }
fs.Debugf(b.f, "Committed %s", desc) fs.Debugf(b.f, "Committed %s", desc)

142
backend/dropbox/dropbox.go Executable file → Normal file
View File

@@ -23,6 +23,7 @@ of path_display and all will be well.
import ( import (
"context" "context"
"errors"
"fmt" "fmt"
"io" "io"
"path" "path"
@@ -31,14 +32,13 @@ import (
"time" "time"
"unicode/utf8" "unicode/utf8"
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox" "github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox"
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/auth" "github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/auth"
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/common" "github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/common"
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/files" "github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/files"
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/sharing" "github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/sharing"
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/team" "github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/team"
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/users" "github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/users"
"github.com/pkg/errors"
"github.com/rclone/rclone/backend/dropbox/dbhash" "github.com/rclone/rclone/backend/dropbox/dbhash"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config" "github.com/rclone/rclone/fs/config"
@@ -66,7 +66,7 @@ const (
// //
// Speed vs chunk size uploading a 1 GiB file on 2017-11-22 // Speed vs chunk size uploading a 1 GiB file on 2017-11-22
// //
// Chunk Size MiB, Speed MiByte/s, % of max // Chunk Size MiB, Speed MiB/s, % of max
// 1 1.364 11% // 1 1.364 11%
// 2 2.443 19% // 2 2.443 19%
// 4 4.288 33% // 4 4.288 33%
@@ -154,7 +154,7 @@ func init() {
}, },
Options: append(oauthutil.SharedOptions, []fs.Option{{ Options: append(oauthutil.SharedOptions, []fs.Option{{
Name: "chunk_size", Name: "chunk_size",
Help: fmt.Sprintf(`Upload chunk size. (< %v). Help: fmt.Sprintf(`Upload chunk size (< %v).
Any files larger than this will be uploaded in chunks of this size. Any files larger than this will be uploaded in chunks of this size.
@@ -252,7 +252,7 @@ maximise throughput.
Advanced: true, Advanced: true,
}, { }, {
Name: "batch_timeout", Name: "batch_timeout",
Help: `Max time to allow an idle upload batch before uploading Help: `Max time to allow an idle upload batch before uploading.
If an upload batch is idle for more than this long then it will be If an upload batch is idle for more than this long then it will be
uploaded. uploaded.
@@ -266,6 +266,11 @@ default based on the batch_mode in use.
`, `,
Default: fs.Duration(0), Default: fs.Duration(0),
Advanced: true, Advanced: true,
}, {
Name: "batch_commit_timeout",
Help: `Max time to wait for a batch to finish comitting`,
Default: fs.Duration(10 * time.Minute),
Advanced: true,
}, { }, {
Name: config.ConfigEncoding, Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp, Help: config.ConfigEncodingHelp,
@@ -285,15 +290,16 @@ default based on the batch_mode in use.
// Options defines the configuration for this backend // Options defines the configuration for this backend
type Options struct { type Options struct {
ChunkSize fs.SizeSuffix `config:"chunk_size"` ChunkSize fs.SizeSuffix `config:"chunk_size"`
Impersonate string `config:"impersonate"` Impersonate string `config:"impersonate"`
SharedFiles bool `config:"shared_files"` SharedFiles bool `config:"shared_files"`
SharedFolders bool `config:"shared_folders"` SharedFolders bool `config:"shared_folders"`
BatchMode string `config:"batch_mode"` BatchMode string `config:"batch_mode"`
BatchSize int `config:"batch_size"` BatchSize int `config:"batch_size"`
BatchTimeout fs.Duration `config:"batch_timeout"` BatchTimeout fs.Duration `config:"batch_timeout"`
AsyncBatch bool `config:"async_batch"` BatchCommitTimeout fs.Duration `config:"batch_commit_timeout"`
Enc encoder.MultiEncoder `config:"encoding"` AsyncBatch bool `config:"async_batch"`
Enc encoder.MultiEncoder `config:"encoding"`
} }
// Fs represents a remote dropbox server // Fs represents a remote dropbox server
@@ -357,24 +363,24 @@ func shouldRetry(ctx context.Context, err error) (bool, error) {
if err == nil { if err == nil {
return false, err return false, err
} }
baseErrString := errors.Cause(err).Error() errString := err.Error()
// First check for specific errors // First check for specific errors
if strings.Contains(baseErrString, "insufficient_space") { if strings.Contains(errString, "insufficient_space") {
return false, fserrors.FatalError(err) return false, fserrors.FatalError(err)
} else if strings.Contains(baseErrString, "malformed_path") { } else if strings.Contains(errString, "malformed_path") {
return false, fserrors.NoRetryError(err) return false, fserrors.NoRetryError(err)
} }
// Then handle any official Retry-After header from Dropbox's SDK // Then handle any official Retry-After header from Dropbox's SDK
switch e := err.(type) { switch e := err.(type) {
case auth.RateLimitAPIError: case auth.RateLimitAPIError:
if e.RateLimitError.RetryAfter > 0 { if e.RateLimitError.RetryAfter > 0 {
fs.Logf(baseErrString, "Too many requests or write operations. Trying again in %d seconds.", e.RateLimitError.RetryAfter) fs.Logf(errString, "Too many requests or write operations. Trying again in %d seconds.", e.RateLimitError.RetryAfter)
err = pacer.RetryAfterError(err, time.Duration(e.RateLimitError.RetryAfter)*time.Second) err = pacer.RetryAfterError(err, time.Duration(e.RateLimitError.RetryAfter)*time.Second)
} }
return true, err return true, err
} }
// Keep old behavior for backward compatibility // Keep old behavior for backward compatibility
if strings.Contains(baseErrString, "too_many_write_operations") || strings.Contains(baseErrString, "too_many_requests") || baseErrString == "" { if strings.Contains(errString, "too_many_write_operations") || strings.Contains(errString, "too_many_requests") || errString == "" {
return true, err return true, err
} }
return fserrors.ShouldRetry(err), err return fserrors.ShouldRetry(err), err
@@ -383,10 +389,10 @@ func shouldRetry(ctx context.Context, err error) (bool, error) {
func checkUploadChunkSize(cs fs.SizeSuffix) error { func checkUploadChunkSize(cs fs.SizeSuffix) error {
const minChunkSize = fs.SizeSuffixBase const minChunkSize = fs.SizeSuffixBase
if cs < minChunkSize { if cs < minChunkSize {
return errors.Errorf("%s is less than %s", cs, minChunkSize) return fmt.Errorf("%s is less than %s", cs, minChunkSize)
} }
if cs > maxChunkSize { if cs > maxChunkSize {
return errors.Errorf("%s is greater than %s", cs, maxChunkSize) return fmt.Errorf("%s is greater than %s", cs, maxChunkSize)
} }
return nil return nil
} }
@@ -409,7 +415,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
} }
err = checkUploadChunkSize(opt.ChunkSize) err = checkUploadChunkSize(opt.ChunkSize)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "dropbox: chunk size") return nil, fmt.Errorf("dropbox: chunk size: %w", err)
} }
// Convert the old token if it exists. The old token was just // Convert the old token if it exists. The old token was just
@@ -421,13 +427,13 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
newToken := fmt.Sprintf(`{"access_token":"%s","token_type":"bearer","expiry":"0001-01-01T00:00:00Z"}`, oldToken) newToken := fmt.Sprintf(`{"access_token":"%s","token_type":"bearer","expiry":"0001-01-01T00:00:00Z"}`, oldToken)
err := config.SetValueAndSave(name, config.ConfigToken, newToken) err := config.SetValueAndSave(name, config.ConfigToken, newToken)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "NewFS convert token") return nil, fmt.Errorf("NewFS convert token: %w", err)
} }
} }
oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, getOauthConfig(m)) oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, getOauthConfig(m))
if err != nil { if err != nil {
return nil, errors.Wrap(err, "failed to configure dropbox") return nil, fmt.Errorf("failed to configure dropbox: %w", err)
} }
ci := fs.GetConfig(ctx) ci := fs.GetConfig(ctx)
@@ -468,7 +474,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
memberIds, err := f.team.MembersGetInfo(args) memberIds, err := f.team.MembersGetInfo(args)
if err != nil { if err != nil {
return nil, errors.Wrapf(err, "invalid dropbox team member: %q", opt.Impersonate) return nil, fmt.Errorf("invalid dropbox team member: %q: %w", opt.Impersonate, err)
} }
cfg.AsMemberID = memberIds[0].MemberInfo.Profile.MemberProfile.TeamMemberId cfg.AsMemberID = memberIds[0].MemberInfo.Profile.MemberProfile.TeamMemberId
@@ -545,7 +551,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
return shouldRetry(ctx, err) return shouldRetry(ctx, err)
}) })
if err != nil { if err != nil {
return nil, errors.Wrap(err, "get current account failed") return nil, fmt.Errorf("get current account failed: %w", err)
} }
switch x := acc.RootInfo.(type) { switch x := acc.RootInfo.(type) {
case *common.TeamRootInfo: case *common.TeamRootInfo:
@@ -553,28 +559,30 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
case *common.UserRootInfo: case *common.UserRootInfo:
f.ns = x.RootNamespaceId f.ns = x.RootNamespaceId
default: default:
return nil, errors.Errorf("unknown RootInfo type %v %T", acc.RootInfo, acc.RootInfo) return nil, fmt.Errorf("unknown RootInfo type %v %T", acc.RootInfo, acc.RootInfo)
} }
fs.Debugf(f, "Using root namespace %q", f.ns) fs.Debugf(f, "Using root namespace %q", f.ns)
} }
f.setRoot(root) f.setRoot(root)
// See if the root is actually an object // See if the root is actually an object
_, err = f.getFileMetadata(ctx, f.slashRoot) if f.root != "" {
if err == nil { _, err = f.getFileMetadata(ctx, f.slashRoot)
newRoot := path.Dir(f.root) if err == nil {
if newRoot == "." { newRoot := path.Dir(f.root)
newRoot = "" if newRoot == "." {
newRoot = ""
}
f.setRoot(newRoot)
// return an error with an fs which points to the parent
return f, fs.ErrorIsFile
} }
f.setRoot(newRoot)
// return an error with an fs which points to the parent
return f, fs.ErrorIsFile
} }
return f, nil return f, nil
} }
// headerGenerator for dropbox sdk // headerGenerator for dropbox sdk
func (f *Fs) headerGenerator(hostType string, style string, namespace string, route string) map[string]string { func (f *Fs) headerGenerator(hostType string, namespace string, route string) map[string]string {
if f.ns == "" { if f.ns == "" {
return map[string]string{} return map[string]string{}
} }
@@ -624,6 +632,9 @@ func (f *Fs) getFileMetadata(ctx context.Context, filePath string) (fileInfo *fi
} }
fileInfo, ok := entry.(*files.FileMetadata) fileInfo, ok := entry.(*files.FileMetadata)
if !ok { if !ok {
if _, ok = entry.(*files.FolderMetadata); ok {
return nil, fs.ErrorIsDir
}
return nil, fs.ErrorNotAFile return nil, fs.ErrorNotAFile
} }
return fileInfo, nil return fileInfo, nil
@@ -701,7 +712,7 @@ func (f *Fs) listSharedFolders(ctx context.Context) (entries fs.DirEntries, err
return shouldRetry(ctx, err) return shouldRetry(ctx, err)
}) })
if err != nil { if err != nil {
return nil, errors.Wrap(err, "list continue") return nil, fmt.Errorf("list continue: %w", err)
} }
} }
for _, entry := range res.Entries { for _, entry := range res.Entries {
@@ -775,7 +786,7 @@ func (f *Fs) listReceivedFiles(ctx context.Context) (entries fs.DirEntries, err
return shouldRetry(ctx, err) return shouldRetry(ctx, err)
}) })
if err != nil { if err != nil {
return nil, errors.Wrap(err, "list continue") return nil, fmt.Errorf("list continue: %w", err)
} }
} }
for _, entry := range res.Entries { for _, entry := range res.Entries {
@@ -785,7 +796,7 @@ func (f *Fs) listReceivedFiles(ctx context.Context) (entries fs.DirEntries, err
fs: f, fs: f,
url: entry.PreviewUrl, url: entry.PreviewUrl,
remote: entryPath, remote: entryPath,
modTime: entry.TimeInvited, modTime: *entry.TimeInvited,
} }
if err != nil { if err != nil {
return nil, err return nil, err
@@ -841,6 +852,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
arg := files.ListFolderArg{ arg := files.ListFolderArg{
Path: f.opt.Enc.FromStandardPath(root), Path: f.opt.Enc.FromStandardPath(root),
Recursive: false, Recursive: false,
Limit: 1000,
} }
if root == "/" { if root == "/" {
arg.Path = "" // Specify root folder as empty string arg.Path = "" // Specify root folder as empty string
@@ -868,7 +880,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
return shouldRetry(ctx, err) return shouldRetry(ctx, err)
}) })
if err != nil { if err != nil {
return nil, errors.Wrap(err, "list continue") return nil, fmt.Errorf("list continue: %w", err)
} }
} }
for _, entry := range res.Entries { for _, entry := range res.Entries {
@@ -980,7 +992,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error)
// check directory exists // check directory exists
_, err = f.getDirMetadata(ctx, root) _, err = f.getDirMetadata(ctx, root)
if err != nil { if err != nil {
return errors.Wrap(err, "Rmdir") return fmt.Errorf("Rmdir: %w", err)
} }
root = f.opt.Enc.FromStandardPath(root) root = f.opt.Enc.FromStandardPath(root)
@@ -998,7 +1010,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error)
return shouldRetry(ctx, err) return shouldRetry(ctx, err)
}) })
if err != nil { if err != nil {
return errors.Wrap(err, "Rmdir") return fmt.Errorf("Rmdir: %w", err)
} }
if len(res.Entries) != 0 { if len(res.Entries) != 0 {
return errors.New("directory not empty") return errors.New("directory not empty")
@@ -1064,7 +1076,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
return shouldRetry(ctx, err) return shouldRetry(ctx, err)
}) })
if err != nil { if err != nil {
return nil, errors.Wrap(err, "copy failed") return nil, fmt.Errorf("copy failed: %w", err)
} }
// Set the metadata // Set the metadata
@@ -1074,7 +1086,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
} }
err = dstObj.setMetadataFromEntry(fileInfo) err = dstObj.setMetadataFromEntry(fileInfo)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "copy failed") return nil, fmt.Errorf("copy failed: %w", err)
} }
return dstObj, nil return dstObj, nil
@@ -1125,7 +1137,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
return shouldRetry(ctx, err) return shouldRetry(ctx, err)
}) })
if err != nil { if err != nil {
return nil, errors.Wrap(err, "move failed") return nil, fmt.Errorf("move failed: %w", err)
} }
// Set the metadata // Set the metadata
@@ -1135,7 +1147,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
} }
err = dstObj.setMetadataFromEntry(fileInfo) err = dstObj.setMetadataFromEntry(fileInfo)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "move failed") return nil, fmt.Errorf("move failed: %w", err)
} }
return dstObj, nil return dstObj, nil
} }
@@ -1160,14 +1172,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
} }
if expire < fs.DurationOff { if expire < fs.DurationOff {
expiryTime := time.Now().Add(time.Duration(expire)).UTC().Round(time.Second) expiryTime := time.Now().Add(time.Duration(expire)).UTC().Round(time.Second)
createArg.Settings.Expires = expiryTime createArg.Settings.Expires = &expiryTime
}
// FIXME note we can't set Settings for non enterprise dropbox
// because of https://github.com/dropbox/dropbox-sdk-go-unofficial/issues/75
// however this only goes wrong when we set Expires, so as a
// work-around remove Settings unless expire is set.
if expire == fs.DurationOff {
createArg.Settings = nil
} }
var linkRes sharing.IsSharedLinkMetadata var linkRes sharing.IsSharedLinkMetadata
@@ -1250,7 +1255,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
return shouldRetry(ctx, err) return shouldRetry(ctx, err)
}) })
if err != nil { if err != nil {
return errors.Wrap(err, "MoveDir failed") return fmt.Errorf("MoveDir failed: %w", err)
} }
return nil return nil
@@ -1264,7 +1269,7 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
return shouldRetry(ctx, err) return shouldRetry(ctx, err)
}) })
if err != nil { if err != nil {
return nil, errors.Wrap(err, "about failed") return nil, fmt.Errorf("about failed: %w", err)
} }
var total uint64 var total uint64
if q.Allocation != nil { if q.Allocation != nil {
@@ -1404,7 +1409,7 @@ func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.
return shouldRetry(ctx, err) return shouldRetry(ctx, err)
}) })
if err != nil { if err != nil {
return "", errors.Wrap(err, "list continue") return "", fmt.Errorf("list continue: %w", err)
} }
cursor = changeList.Cursor cursor = changeList.Cursor
var entryType fs.EntryType var entryType fs.EntryType
@@ -1483,7 +1488,7 @@ func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
} }
err := o.readMetaData(ctx) err := o.readMetaData(ctx)
if err != nil { if err != nil {
return "", errors.Wrap(err, "failed to read hash from metadata") return "", fmt.Errorf("failed to read hash from metadata: %w", err)
} }
return o.hash, nil return o.hash, nil
} }
@@ -1736,12 +1741,13 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
} }
remote := o.remotePath() remote := o.remotePath()
if ignoredFiles.MatchString(remote) { if ignoredFiles.MatchString(remote) {
return fserrors.NoRetryError(errors.Errorf("file name %q is disallowed - not uploading", path.Base(remote))) return fserrors.NoRetryError(fmt.Errorf("file name %q is disallowed - not uploading", path.Base(remote)))
} }
commitInfo := files.NewCommitInfo(o.fs.opt.Enc.FromStandardPath(o.remotePath())) commitInfo := files.NewCommitInfo(o.fs.opt.Enc.FromStandardPath(o.remotePath()))
commitInfo.Mode.Tag = "overwrite" commitInfo.Mode.Tag = "overwrite"
// The Dropbox API only accepts timestamps in UTC with second precision. // The Dropbox API only accepts timestamps in UTC with second precision.
commitInfo.ClientModified = src.ModTime(ctx).UTC().Round(time.Second) clientModified := src.ModTime(ctx).UTC().Round(time.Second)
commitInfo.ClientModified = &clientModified
// Don't attempt to create filenames that are too long // Don't attempt to create filenames that are too long
if cErr := checkPathLength(commitInfo.Path); cErr != nil { if cErr := checkPathLength(commitInfo.Path); cErr != nil {
return cErr return cErr
@@ -1759,14 +1765,14 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
}) })
} }
if err != nil { if err != nil {
return errors.Wrap(err, "upload failed") return fmt.Errorf("upload failed: %w", err)
} }
// If we haven't received data back from batch upload then fake it // If we haven't received data back from batch upload then fake it
// //
// This will only happen if we are uploading async batches // This will only happen if we are uploading async batches
if entry == nil { if entry == nil {
o.bytes = size o.bytes = size
o.modTime = commitInfo.ClientModified o.modTime = *commitInfo.ClientModified
o.hash = "" // we don't have this o.hash = "" // we don't have this
return nil return nil
} }

View File

@@ -2,6 +2,8 @@ package fichier
import ( import (
"context" "context"
"errors"
"fmt"
"io" "io"
"net/http" "net/http"
"net/url" "net/url"
@@ -10,7 +12,6 @@ import (
"strings" "strings"
"time" "time"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/fserrors" "github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/lib/rest" "github.com/rclone/rclone/lib/rest"
@@ -81,12 +82,17 @@ func (f *Fs) readFileInfo(ctx context.Context, url string) (*File, error) {
return shouldRetry(ctx, resp, err) return shouldRetry(ctx, resp, err)
}) })
if err != nil { if err != nil {
return nil, errors.Wrap(err, "couldn't read file info") return nil, fmt.Errorf("couldn't read file info: %w", err)
} }
return &file, err return &file, err
} }
// maybe do some actual validation later if necessary
func validToken(token *GetTokenResponse) bool {
return token.Status == "OK"
}
func (f *Fs) getDownloadToken(ctx context.Context, url string) (*GetTokenResponse, error) { func (f *Fs) getDownloadToken(ctx context.Context, url string) (*GetTokenResponse, error) {
request := DownloadRequest{ request := DownloadRequest{
URL: url, URL: url,
@@ -101,10 +107,11 @@ func (f *Fs) getDownloadToken(ctx context.Context, url string) (*GetTokenRespons
var token GetTokenResponse var token GetTokenResponse
err := f.pacer.Call(func() (bool, error) { err := f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, &request, &token) resp, err := f.rest.CallJSON(ctx, &opts, &request, &token)
return shouldRetry(ctx, resp, err) doretry, err := shouldRetry(ctx, resp, err)
return doretry || !validToken(&token), err
}) })
if err != nil { if err != nil {
return nil, errors.Wrap(err, "couldn't list files") return nil, fmt.Errorf("couldn't list files: %w", err)
} }
return &token, nil return &token, nil
@@ -138,7 +145,7 @@ func (f *Fs) listSharedFiles(ctx context.Context, id string) (entries fs.DirEntr
return shouldRetry(ctx, resp, err) return shouldRetry(ctx, resp, err)
}) })
if err != nil { if err != nil {
return nil, errors.Wrap(err, "couldn't list files") return nil, fmt.Errorf("couldn't list files: %w", err)
} }
entries = make([]fs.DirEntry, len(sharedFiles)) entries = make([]fs.DirEntry, len(sharedFiles))
@@ -167,7 +174,7 @@ func (f *Fs) listFiles(ctx context.Context, directoryID int) (filesList *FilesLi
return shouldRetry(ctx, resp, err) return shouldRetry(ctx, resp, err)
}) })
if err != nil { if err != nil {
return nil, errors.Wrap(err, "couldn't list files") return nil, fmt.Errorf("couldn't list files: %w", err)
} }
for i := range filesList.Items { for i := range filesList.Items {
item := &filesList.Items[i] item := &filesList.Items[i]
@@ -195,7 +202,7 @@ func (f *Fs) listFolders(ctx context.Context, directoryID int) (foldersList *Fol
return shouldRetry(ctx, resp, err) return shouldRetry(ctx, resp, err)
}) })
if err != nil { if err != nil {
return nil, errors.Wrap(err, "couldn't list folders") return nil, fmt.Errorf("couldn't list folders: %w", err)
} }
foldersList.Name = f.opt.Enc.ToStandardName(foldersList.Name) foldersList.Name = f.opt.Enc.ToStandardName(foldersList.Name)
for i := range foldersList.SubFolders { for i := range foldersList.SubFolders {
@@ -289,7 +296,7 @@ func (f *Fs) makeFolder(ctx context.Context, leaf string, folderID int) (respons
return shouldRetry(ctx, resp, err) return shouldRetry(ctx, resp, err)
}) })
if err != nil { if err != nil {
return nil, errors.Wrap(err, "couldn't create folder") return nil, fmt.Errorf("couldn't create folder: %w", err)
} }
// fs.Debugf(f, "Created Folder `%s` in id `%s`", name, directoryID) // fs.Debugf(f, "Created Folder `%s` in id `%s`", name, directoryID)
@@ -316,10 +323,10 @@ func (f *Fs) removeFolder(ctx context.Context, name string, folderID int) (respo
return shouldRetry(ctx, resp, err) return shouldRetry(ctx, resp, err)
}) })
if err != nil { if err != nil {
return nil, errors.Wrap(err, "couldn't remove folder") return nil, fmt.Errorf("couldn't remove folder: %w", err)
} }
if response.Status != "OK" { if response.Status != "OK" {
return nil, errors.Errorf("can't remove folder: %s", response.Message) return nil, fmt.Errorf("can't remove folder: %s", response.Message)
} }
// fs.Debugf(f, "Removed Folder with id `%s`", directoryID) // fs.Debugf(f, "Removed Folder with id `%s`", directoryID)
@@ -346,7 +353,7 @@ func (f *Fs) deleteFile(ctx context.Context, url string) (response *GenericOKRes
}) })
if err != nil { if err != nil {
return nil, errors.Wrap(err, "couldn't remove file") return nil, fmt.Errorf("couldn't remove file: %w", err)
} }
// fs.Debugf(f, "Removed file with url `%s`", url) // fs.Debugf(f, "Removed file with url `%s`", url)
@@ -373,7 +380,7 @@ func (f *Fs) moveFile(ctx context.Context, url string, folderID int, rename stri
}) })
if err != nil { if err != nil {
return nil, errors.Wrap(err, "couldn't copy file") return nil, fmt.Errorf("couldn't copy file: %w", err)
} }
return response, nil return response, nil
@@ -398,7 +405,7 @@ func (f *Fs) copyFile(ctx context.Context, url string, folderID int, rename stri
}) })
if err != nil { if err != nil {
return nil, errors.Wrap(err, "couldn't copy file") return nil, fmt.Errorf("couldn't copy file: %w", err)
} }
return response, nil return response, nil
@@ -426,7 +433,7 @@ func (f *Fs) renameFile(ctx context.Context, url string, newName string) (respon
}) })
if err != nil { if err != nil {
return nil, errors.Wrap(err, "couldn't rename file") return nil, fmt.Errorf("couldn't rename file: %w", err)
} }
return response, nil return response, nil
@@ -447,7 +454,7 @@ func (f *Fs) getUploadNode(ctx context.Context) (response *GetUploadNodeResponse
return shouldRetry(ctx, resp, err) return shouldRetry(ctx, resp, err)
}) })
if err != nil { if err != nil {
return nil, errors.Wrap(err, "didnt got an upload node") return nil, fmt.Errorf("didnt got an upload node: %w", err)
} }
// fs.Debugf(f, "Got Upload node") // fs.Debugf(f, "Got Upload node")
@@ -491,7 +498,7 @@ func (f *Fs) uploadFile(ctx context.Context, in io.Reader, size int64, fileName,
}) })
if err != nil { if err != nil {
return nil, errors.Wrap(err, "couldn't upload file") return nil, fmt.Errorf("couldn't upload file: %w", err)
} }
// fs.Debugf(f, "Uploaded File `%s`", fileName) // fs.Debugf(f, "Uploaded File `%s`", fileName)
@@ -525,7 +532,7 @@ func (f *Fs) endUpload(ctx context.Context, uploadID string, nodeurl string) (re
}) })
if err != nil { if err != nil {
return nil, errors.Wrap(err, "couldn't finish file upload") return nil, fmt.Errorf("couldn't finish file upload: %w", err)
} }
return response, err return response, err

View File

@@ -2,6 +2,7 @@ package fichier
import ( import (
"context" "context"
"errors"
"fmt" "fmt"
"io" "io"
"net/http" "net/http"
@@ -9,7 +10,6 @@ import (
"strings" "strings"
"time" "time"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config" "github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/config/configmap"
@@ -37,21 +37,21 @@ func init() {
Description: "1Fichier", Description: "1Fichier",
NewFs: NewFs, NewFs: NewFs,
Options: []fs.Option{{ Options: []fs.Option{{
Help: "Your API Key, get it from https://1fichier.com/console/params.pl", Help: "Your API Key, get it from https://1fichier.com/console/params.pl.",
Name: "api_key", Name: "api_key",
}, { }, {
Help: "If you want to download a shared folder, add this parameter", Help: "If you want to download a shared folder, add this parameter.",
Name: "shared_folder", Name: "shared_folder",
Required: false, Required: false,
Advanced: true, Advanced: true,
}, { }, {
Help: "If you want to download a shared file that is password protected, add this parameter", Help: "If you want to download a shared file that is password protected, add this parameter.",
Name: "file_password", Name: "file_password",
Required: false, Required: false,
Advanced: true, Advanced: true,
IsPassword: true, IsPassword: true,
}, { }, {
Help: "If you want to list the files in a shared folder that is password protected, add this parameter", Help: "If you want to list the files in a shared folder that is password protected, add this parameter.",
Name: "folder_password", Name: "folder_password",
Required: false, Required: false,
Advanced: true, Advanced: true,
@@ -454,10 +454,10 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
if currentDirectoryID == directoryID { if currentDirectoryID == directoryID {
resp, err := f.renameFile(ctx, srcObj.file.URL, leaf) resp, err := f.renameFile(ctx, srcObj.file.URL, leaf)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "couldn't rename file") return nil, fmt.Errorf("couldn't rename file: %w", err)
} }
if resp.Status != "OK" { if resp.Status != "OK" {
return nil, errors.Errorf("couldn't rename file: %s", resp.Message) return nil, fmt.Errorf("couldn't rename file: %s", resp.Message)
} }
url = resp.URLs[0].URL url = resp.URLs[0].URL
} else { } else {
@@ -467,10 +467,10 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
} }
resp, err := f.moveFile(ctx, srcObj.file.URL, folderID, leaf) resp, err := f.moveFile(ctx, srcObj.file.URL, folderID, leaf)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "couldn't move file") return nil, fmt.Errorf("couldn't move file: %w", err)
} }
if resp.Status != "OK" { if resp.Status != "OK" {
return nil, errors.Errorf("couldn't move file: %s", resp.Message) return nil, fmt.Errorf("couldn't move file: %s", resp.Message)
} }
url = resp.URLs[0] url = resp.URLs[0]
} }
@@ -503,10 +503,10 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
} }
resp, err := f.copyFile(ctx, srcObj.file.URL, folderID, leaf) resp, err := f.copyFile(ctx, srcObj.file.URL, folderID, leaf)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "couldn't move file") return nil, fmt.Errorf("couldn't move file: %w", err)
} }
if resp.Status != "OK" { if resp.Status != "OK" {
return nil, errors.Errorf("couldn't move file: %s", resp.Message) return nil, fmt.Errorf("couldn't move file: %s", resp.Message)
} }
file, err := f.readFileInfo(ctx, resp.URLs[0].ToURL) file, err := f.readFileInfo(ctx, resp.URLs[0].ToURL)

View File

@@ -2,11 +2,12 @@ package fichier
import ( import (
"context" "context"
"errors"
"fmt"
"io" "io"
"net/http" "net/http"
"time" "time"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/rest" "github.com/rclone/rclone/lib/rest"
@@ -122,7 +123,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// Delete duplicate after successful upload // Delete duplicate after successful upload
err = o.Remove(ctx) err = o.Remove(ctx)
if err != nil { if err != nil {
return errors.Wrap(err, "failed to remove old version") return fmt.Errorf("failed to remove old version: %w", err)
} }
// Replace guts of old object with new one // Replace guts of old object with new one

View File

@@ -69,11 +69,29 @@ func (i *Int) UnmarshalJSON(data []byte) error {
return json.Unmarshal(data, (*int)(i)) return json.Unmarshal(data, (*int)(i))
} }
// String represents an string which can be represented in JSON as a
// quoted string or an integer.
type String string
// MarshalJSON turns a String into JSON
func (s *String) MarshalJSON() (out []byte, err error) {
return json.Marshal((*string)(s))
}
// UnmarshalJSON turns JSON into a String
func (s *String) UnmarshalJSON(data []byte) error {
err := json.Unmarshal(data, (*string)(s))
if err != nil {
*s = String(data)
}
return nil
}
// Status return returned in all status responses // Status return returned in all status responses
type Status struct { type Status struct {
Code string `json:"status"` Code string `json:"status"`
Message string `json:"statusmessage"` Message string `json:"statusmessage"`
TaskID string `json:"taskid"` TaskID String `json:"taskid"`
// Warning string `json:"warning"` // obsolete // Warning string `json:"warning"` // obsolete
} }

View File

@@ -17,6 +17,7 @@ import (
"bytes" "bytes"
"context" "context"
"encoding/base64" "encoding/base64"
"errors"
"fmt" "fmt"
"io" "io"
"io/ioutil" "io/ioutil"
@@ -32,7 +33,6 @@ import (
"github.com/rclone/rclone/lib/encoder" "github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/random" "github.com/rclone/rclone/lib/random"
"github.com/pkg/errors"
"github.com/rclone/rclone/backend/filefabric/api" "github.com/rclone/rclone/backend/filefabric/api"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config" "github.com/rclone/rclone/fs/config"
@@ -65,7 +65,7 @@ func init() {
NewFs: NewFs, NewFs: NewFs,
Options: []fs.Option{{ Options: []fs.Option{{
Name: "url", Name: "url",
Help: "URL of the Enterprise File Fabric to connect to", Help: "URL of the Enterprise File Fabric to connect to.",
Required: true, Required: true,
Examples: []fs.OptionExample{{ Examples: []fs.OptionExample{{
Value: "https://storagemadeeasy.com", Value: "https://storagemadeeasy.com",
@@ -79,14 +79,15 @@ func init() {
}}, }},
}, { }, {
Name: "root_folder_id", Name: "root_folder_id",
Help: `ID of the root folder Help: `ID of the root folder.
Leave blank normally. Leave blank normally.
Fill in to make rclone start with directory of a given ID. Fill in to make rclone start with directory of a given ID.
`, `,
}, { }, {
Name: "permanent_token", Name: "permanent_token",
Help: `Permanent Authentication Token Help: `Permanent Authentication Token.
A Permanent Authentication Token can be created in the Enterprise File A Permanent Authentication Token can be created in the Enterprise File
Fabric, on the users Dashboard under Security, there is an entry Fabric, on the users Dashboard under Security, there is an entry
@@ -99,7 +100,7 @@ For more info see: https://docs.storagemadeeasy.com/organisationcloud/api-tokens
`, `,
}, { }, {
Name: "token", Name: "token",
Help: `Session Token Help: `Session Token.
This is a session token which rclone caches in the config file. It is This is a session token which rclone caches in the config file. It is
usually valid for 1 hour. usually valid for 1 hour.
@@ -109,14 +110,14 @@ Don't set this value - rclone will set it automatically.
Advanced: true, Advanced: true,
}, { }, {
Name: "token_expiry", Name: "token_expiry",
Help: `Token expiry time Help: `Token expiry time.
Don't set this value - rclone will set it automatically. Don't set this value - rclone will set it automatically.
`, `,
Advanced: true, Advanced: true,
}, { }, {
Name: "version", Name: "version",
Help: `Version read from the file fabric Help: `Version read from the file fabric.
Don't set this value - rclone will set it automatically. Don't set this value - rclone will set it automatically.
`, `,
@@ -222,13 +223,14 @@ var retryStatusCodes = []struct {
// delete in that folder. Please try again later or use // delete in that folder. Please try again later or use
// another name. (error_background) // another name. (error_background)
code: "error_background", code: "error_background",
sleep: 6 * time.Second, sleep: 1 * time.Second,
}, },
} }
// shouldRetry returns a boolean as to whether this resp and err // shouldRetry returns a boolean as to whether this resp and err
// deserve to be retried. It returns the err as a convenience // deserve to be retried. It returns the err as a convenience
func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error, status api.OKError) (bool, error) { // try should be the number of the tries so far, counting up from 1
func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error, status api.OKError, try int) (bool, error) {
if fserrors.ContextError(ctx, &err) { if fserrors.ContextError(ctx, &err) {
return false, err return false, err
} }
@@ -244,9 +246,10 @@ func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error, st
for _, retryCode := range retryStatusCodes { for _, retryCode := range retryStatusCodes {
if code == retryCode.code { if code == retryCode.code {
if retryCode.sleep > 0 { if retryCode.sleep > 0 {
// make this thread only sleep extra time // make this thread only sleep exponentially increasing extra time
fs.Debugf(f, "Sleeping for %v to wait for %q error to clear", retryCode.sleep, retryCode.code) sleepTime := retryCode.sleep << (try - 1)
time.Sleep(retryCode.sleep) fs.Debugf(f, "Sleeping for %v to wait for %q error to clear", sleepTime, retryCode.code)
time.Sleep(sleepTime)
} }
return true, err return true, err
} }
@@ -264,7 +267,7 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, rootID string, path string
"pid": rootID, "pid": rootID,
}, &resp, nil) }, &resp, nil)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "failed to check path exists") return nil, fmt.Errorf("failed to check path exists: %w", err)
} }
if resp.Exists != "y" { if resp.Exists != "y" {
return nil, fs.ErrorObjectNotFound return nil, fs.ErrorObjectNotFound
@@ -305,7 +308,7 @@ func (f *Fs) getApplianceInfo(ctx context.Context) error {
"token": "*", "token": "*",
}, &applianceInfo, nil) }, &applianceInfo, nil)
if err != nil { if err != nil {
return errors.Wrap(err, "failed to read appliance version") return fmt.Errorf("failed to read appliance version: %w", err)
} }
f.opt.Version = applianceInfo.SoftwareVersionLabel f.opt.Version = applianceInfo.SoftwareVersionLabel
f.m.Set("version", f.opt.Version) f.m.Set("version", f.opt.Version)
@@ -346,7 +349,7 @@ func (f *Fs) getToken(ctx context.Context) (token string, err error) {
"authtoken": f.opt.PermanentToken, "authtoken": f.opt.PermanentToken,
}, &info, nil) }, &info, nil)
if err != nil { if err != nil {
return "", errors.Wrap(err, "failed to get session token") return "", fmt.Errorf("failed to get session token: %w", err)
} }
refreshed = true refreshed = true
now = now.Add(tokenLifeTime) now = now.Add(tokenLifeTime)
@@ -400,11 +403,13 @@ func (f *Fs) rpc(ctx context.Context, function string, p params, result api.OKEr
ContentType: "application/x-www-form-urlencoded", ContentType: "application/x-www-form-urlencoded",
Options: options, Options: options,
} }
try := 0
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
try++
// Refresh the body each retry // Refresh the body each retry
opts.Body = strings.NewReader(data.Encode()) opts.Body = strings.NewReader(data.Encode())
resp, err = f.srv.CallJSON(ctx, &opts, nil, result) resp, err = f.srv.CallJSON(ctx, &opts, nil, result)
return f.shouldRetry(ctx, resp, err, result) return f.shouldRetry(ctx, resp, err, result, try)
}) })
if err != nil { if err != nil {
return resp, err return resp, err
@@ -557,7 +562,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
"fi_name": f.opt.Enc.FromStandardName(leaf), "fi_name": f.opt.Enc.FromStandardName(leaf),
}, &info, nil) }, &info, nil)
if err != nil { if err != nil {
return "", errors.Wrap(err, "failed to create directory") return "", fmt.Errorf("failed to create directory: %w", err)
} }
// fmt.Printf("...Id %q\n", *info.Id) // fmt.Printf("...Id %q\n", *info.Id)
return info.Item.ID, nil return info.Item.ID, nil
@@ -590,7 +595,7 @@ OUTER:
var info api.GetFolderContentsResponse var info api.GetFolderContentsResponse
_, err = f.rpc(ctx, "getFolderContents", p, &info, nil) _, err = f.rpc(ctx, "getFolderContents", p, &info, nil)
if err != nil { if err != nil {
return false, errors.Wrap(err, "failed to list directory") return false, fmt.Errorf("failed to list directory: %w", err)
} }
for i := range info.Items { for i := range info.Items {
item := &info.Items[i] item := &info.Items[i]
@@ -721,7 +726,7 @@ func (f *Fs) deleteObject(ctx context.Context, id string) (err error) {
"completedeletion": "n", "completedeletion": "n",
}, &info, nil) }, &info, nil)
if err != nil { if err != nil {
return errors.Wrap(err, "failed to delete file") return fmt.Errorf("failed to delete file: %w", err)
} }
return nil return nil
} }
@@ -758,7 +763,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
}, &info, nil) }, &info, nil)
f.dirCache.FlushDir(dir) f.dirCache.FlushDir(dir)
if err != nil { if err != nil {
return errors.Wrap(err, "failed to remove directory") return fmt.Errorf("failed to remove directory: %w", err)
} }
return nil return nil
} }
@@ -820,7 +825,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
} }
_, err = f.rpc(ctx, "doCopyFile", p, &info, nil) _, err = f.rpc(ctx, "doCopyFile", p, &info, nil)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "failed to copy file") return nil, fmt.Errorf("failed to copy file: %w", err)
} }
err = dstObj.setMetaData(&info.Item) err = dstObj.setMetaData(&info.Item)
if err != nil { if err != nil {
@@ -839,7 +844,7 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
} }
// Wait for the the background task to complete if necessary // Wait for the the background task to complete if necessary
func (f *Fs) waitForBackgroundTask(ctx context.Context, taskID string) (err error) { func (f *Fs) waitForBackgroundTask(ctx context.Context, taskID api.String) (err error) {
if taskID == "" || taskID == "0" { if taskID == "" || taskID == "0" {
// No task to wait for // No task to wait for
return nil return nil
@@ -852,7 +857,7 @@ func (f *Fs) waitForBackgroundTask(ctx context.Context, taskID string) (err erro
"taskid": taskID, "taskid": taskID,
}, &info, nil) }, &info, nil)
if err != nil { if err != nil {
return errors.Wrapf(err, "failed to wait for task %s to complete", taskID) return fmt.Errorf("failed to wait for task %s to complete: %w", taskID, err)
} }
if len(info.Tasks) == 0 { if len(info.Tasks) == 0 {
// task has finished // task has finished
@@ -885,7 +890,7 @@ func (f *Fs) renameLeaf(ctx context.Context, isDir bool, id string, newLeaf stri
"fi_name": newLeaf, "fi_name": newLeaf,
}, &info, nil) }, &info, nil)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "failed to rename leaf") return nil, fmt.Errorf("failed to rename leaf: %w", err)
} }
err = f.waitForBackgroundTask(ctx, info.Status.TaskID) err = f.waitForBackgroundTask(ctx, info.Status.TaskID)
if err != nil { if err != nil {
@@ -929,7 +934,7 @@ func (f *Fs) move(ctx context.Context, isDir bool, id, oldLeaf, newLeaf, oldDire
"dir_id": newDirectoryID, "dir_id": newDirectoryID,
}, &info, nil) }, &info, nil)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "failed to move file to new directory") return nil, fmt.Errorf("failed to move file to new directory: %w", err)
} }
item = &info.Item item = &info.Item
err = f.waitForBackgroundTask(ctx, info.Status.TaskID) err = f.waitForBackgroundTask(ctx, info.Status.TaskID)
@@ -1032,7 +1037,7 @@ func (f *Fs) CleanUp(ctx context.Context) (err error) {
var info api.EmptyResponse var info api.EmptyResponse
_, err = f.rpc(ctx, "emptyTrashInBackground", params{}, &info, nil) _, err = f.rpc(ctx, "emptyTrashInBackground", params{}, &info, nil)
if err != nil { if err != nil {
return errors.Wrap(err, "failed to empty trash") return fmt.Errorf("failed to empty trash: %w", err)
} }
return nil return nil
} }
@@ -1089,7 +1094,7 @@ func (o *Object) Size() int64 {
// setMetaData sets the metadata from info // setMetaData sets the metadata from info
func (o *Object) setMetaData(info *api.Item) (err error) { func (o *Object) setMetaData(info *api.Item) (err error) {
if info.Type != api.ItemTypeFile { if info.Type != api.ItemTypeFile {
return errors.Wrapf(fs.ErrorNotAFile, "%q is %q", o.remote, info.Type) return fs.ErrorIsDir
} }
o.hasMetaData = true o.hasMetaData = true
o.size = info.Size o.size = info.Size
@@ -1159,7 +1164,7 @@ func (o *Object) modifyFile(ctx context.Context, keyValues [][2]string) error {
"data": data.String(), "data": data.String(),
}, &info, nil) }, &info, nil)
if err != nil { if err != nil {
return errors.Wrap(err, "failed to update metadata") return fmt.Errorf("failed to update metadata: %w", err)
} }
return o.setMetaData(&info.Item) return o.setMetaData(&info.Item)
} }
@@ -1242,7 +1247,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
} }
_, err = o.fs.rpc(ctx, "doInitUpload", p, &upload, nil) _, err = o.fs.rpc(ctx, "doInitUpload", p, &upload, nil)
if err != nil { if err != nil {
return errors.Wrap(err, "failed to initialize upload") return fmt.Errorf("failed to initialize upload: %w", err)
} }
// Cancel the upload if aborted or it fails // Cancel the upload if aborted or it fails
@@ -1278,18 +1283,20 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
var contentLength = size var contentLength = size
opts.ContentLength = &contentLength // NB CallJSON scribbles on this which is naughty opts.ContentLength = &contentLength // NB CallJSON scribbles on this which is naughty
} }
try := 0
err = o.fs.pacer.CallNoRetry(func() (bool, error) { err = o.fs.pacer.CallNoRetry(func() (bool, error) {
try++
resp, err := o.fs.srv.CallJSON(ctx, &opts, nil, &uploader) resp, err := o.fs.srv.CallJSON(ctx, &opts, nil, &uploader)
return o.fs.shouldRetry(ctx, resp, err, nil) return o.fs.shouldRetry(ctx, resp, err, nil, try)
}) })
if err != nil { if err != nil {
return errors.Wrap(err, "failed to upload") return fmt.Errorf("failed to upload: %w", err)
} }
if uploader.Success != "y" { if uploader.Success != "y" {
return errors.Errorf("upload failed") return fmt.Errorf("upload failed")
} }
if size > 0 && uploader.FileSize != size { if size > 0 && uploader.FileSize != size {
return errors.Errorf("upload failed: size mismatch: want %d got %d", size, uploader.FileSize) return fmt.Errorf("upload failed: size mismatch: want %d got %d", size, uploader.FileSize)
} }
// Now finalize the file // Now finalize the file
@@ -1301,7 +1308,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
} }
_, err = o.fs.rpc(ctx, "doCompleteUpload", p, &finalize, nil) _, err = o.fs.rpc(ctx, "doCompleteUpload", p, &finalize, nil)
if err != nil { if err != nil {
return errors.Wrap(err, "failed to finalize upload") return fmt.Errorf("failed to finalize upload: %w", err)
} }
finalized = true finalized = true

View File

@@ -4,6 +4,8 @@ package ftp
import ( import (
"context" "context"
"crypto/tls" "crypto/tls"
"errors"
"fmt"
"io" "io"
"net" "net"
"net/textproto" "net/textproto"
@@ -14,7 +16,6 @@ import (
"time" "time"
"github.com/jlaffaye/ftp" "github.com/jlaffaye/ftp"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting" "github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/config" "github.com/rclone/rclone/fs/config"
@@ -48,26 +49,22 @@ func init() {
NewFs: NewFs, NewFs: NewFs,
Options: []fs.Option{{ Options: []fs.Option{{
Name: "host", Name: "host",
Help: "FTP host to connect to", Help: "FTP host to connect to.\n\nE.g. \"ftp.example.com\".",
Required: true, Required: true,
Examples: []fs.OptionExample{{
Value: "ftp.example.com",
Help: "Connect to ftp.example.com",
}},
}, { }, {
Name: "user", Name: "user",
Help: "FTP username, leave blank for current username, " + currentUser, Help: "FTP username, leave blank for current username, " + currentUser + ".",
}, { }, {
Name: "port", Name: "port",
Help: "FTP port, leave blank to use default (21)", Help: "FTP port, leave blank to use default (21).",
}, { }, {
Name: "pass", Name: "pass",
Help: "FTP password", Help: "FTP password.",
IsPassword: true, IsPassword: true,
Required: true,
}, { }, {
Name: "tls", Name: "tls",
Help: `Use Implicit FTPS (FTP over TLS) Help: `Use Implicit FTPS (FTP over TLS).
When using implicit FTP over TLS the client connects using TLS When using implicit FTP over TLS the client connects using TLS
right from the start which breaks compatibility with right from the start which breaks compatibility with
non-TLS-aware servers. This is usually served over port 990 rather non-TLS-aware servers. This is usually served over port 990 rather
@@ -75,35 +72,41 @@ than port 21. Cannot be used in combination with explicit FTP.`,
Default: false, Default: false,
}, { }, {
Name: "explicit_tls", Name: "explicit_tls",
Help: `Use Explicit FTPS (FTP over TLS) Help: `Use Explicit FTPS (FTP over TLS).
When using explicit FTP over TLS the client explicitly requests When using explicit FTP over TLS the client explicitly requests
security from the server in order to upgrade a plain text connection security from the server in order to upgrade a plain text connection
to an encrypted one. Cannot be used in combination with implicit FTP.`, to an encrypted one. Cannot be used in combination with implicit FTP.`,
Default: false, Default: false,
}, { }, {
Name: "concurrency", Name: "concurrency",
Help: "Maximum number of FTP simultaneous connections, 0 for unlimited", Help: "Maximum number of FTP simultaneous connections, 0 for unlimited.",
Default: 0, Default: 0,
Advanced: true, Advanced: true,
}, { }, {
Name: "no_check_certificate", Name: "no_check_certificate",
Help: "Do not verify the TLS certificate of the server", Help: "Do not verify the TLS certificate of the server.",
Default: false, Default: false,
Advanced: true, Advanced: true,
}, { }, {
Name: "disable_epsv", Name: "disable_epsv",
Help: "Disable using EPSV even if server advertises support", Help: "Disable using EPSV even if server advertises support.",
Default: false, Default: false,
Advanced: true, Advanced: true,
}, { }, {
Name: "disable_mlsd", Name: "disable_mlsd",
Help: "Disable using MLSD even if server advertises support", Help: "Disable using MLSD even if server advertises support.",
Default: false,
Advanced: true,
}, {
Name: "writing_mdtm",
Help: "Use MDTM to set modification time (VsFtpd quirk)",
Default: false, Default: false,
Advanced: true, Advanced: true,
}, { }, {
Name: "idle_timeout", Name: "idle_timeout",
Default: fs.Duration(60 * time.Second), Default: fs.Duration(60 * time.Second),
Help: `Max time before closing idle connections Help: `Max time before closing idle connections.
If no connections have been returned to the connection pool in the time If no connections have been returned to the connection pool in the time
given, rclone will empty the connection pool. given, rclone will empty the connection pool.
@@ -116,17 +119,51 @@ Set to 0 to keep connections indefinitely.
Help: "Maximum time to wait for a response to close.", Help: "Maximum time to wait for a response to close.",
Default: fs.Duration(60 * time.Second), Default: fs.Duration(60 * time.Second),
Advanced: true, Advanced: true,
}, {
Name: "tls_cache_size",
Help: `Size of TLS session cache for all control and data connections.
TLS cache allows to resume TLS sessions and reuse PSK between connections.
Increase if default size is not enough resulting in TLS resumption errors.
Enabled by default. Use 0 to disable.`,
Default: 32,
Advanced: true,
}, {
Name: "disable_tls13",
Help: "Disable TLS 1.3 (workaround for FTP servers with buggy TLS)",
Default: false,
Advanced: true,
}, {
Name: "shut_timeout",
Help: "Maximum time to wait for data connection closing status.",
Default: fs.Duration(60 * time.Second),
Advanced: true,
}, {
Name: "ask_password",
Default: false,
Help: `Allow asking for FTP password when needed.
If this is set and no password is supplied then rclone will ask for a password
`,
Advanced: true,
}, { }, {
Name: config.ConfigEncoding, Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp, Help: config.ConfigEncodingHelp,
Advanced: true, Advanced: true,
// The FTP protocol can't handle trailing spaces (for instance // The FTP protocol can't handle trailing spaces
// pureftpd turns them into _) // (for instance, pureftpd turns them into '_')
//
// proftpd can't handle '*' in file names
// pureftpd can't handle '[', ']' or '*'
Default: (encoder.Display | Default: (encoder.Display |
encoder.EncodeRightSpace), encoder.EncodeRightSpace),
Examples: []fs.OptionExample{{
Value: "Asterisk,Ctl,Dot,Slash",
Help: "ProFTPd can't handle '*' in file names",
}, {
Value: "BackSlash,Ctl,Del,Dot,RightSpace,Slash,SquareBracket",
Help: "PureFTPd can't handle '[]' or '*' in file names",
}, {
Value: "Ctl,LeftPeriod,Slash",
Help: "VsFTPd can't handle file names starting with dot",
}},
}}, }},
}) })
} }
@@ -139,12 +176,17 @@ type Options struct {
Port string `config:"port"` Port string `config:"port"`
TLS bool `config:"tls"` TLS bool `config:"tls"`
ExplicitTLS bool `config:"explicit_tls"` ExplicitTLS bool `config:"explicit_tls"`
TLSCacheSize int `config:"tls_cache_size"`
DisableTLS13 bool `config:"disable_tls13"`
Concurrency int `config:"concurrency"` Concurrency int `config:"concurrency"`
SkipVerifyTLSCert bool `config:"no_check_certificate"` SkipVerifyTLSCert bool `config:"no_check_certificate"`
DisableEPSV bool `config:"disable_epsv"` DisableEPSV bool `config:"disable_epsv"`
DisableMLSD bool `config:"disable_mlsd"` DisableMLSD bool `config:"disable_mlsd"`
WritingMDTM bool `config:"writing_mdtm"`
IdleTimeout fs.Duration `config:"idle_timeout"` IdleTimeout fs.Duration `config:"idle_timeout"`
CloseTimeout fs.Duration `config:"close_timeout"` CloseTimeout fs.Duration `config:"close_timeout"`
ShutTimeout fs.Duration `config:"shut_timeout"`
AskPassword bool `config:"ask_password"`
Enc encoder.MultiEncoder `config:"encoding"` Enc encoder.MultiEncoder `config:"encoding"`
} }
@@ -165,6 +207,9 @@ type Fs struct {
tokens *pacer.TokenDispenser tokens *pacer.TokenDispenser
tlsConf *tls.Config tlsConf *tls.Config
pacer *fs.Pacer // pacer for FTP connections pacer *fs.Pacer // pacer for FTP connections
fGetTime bool // true if the ftp library accepts GetTime
fSetTime bool // true if the ftp library accepts SetTime
fLstTime bool // true if the List call returns precise time
} }
// Object describes an FTP file // Object describes an FTP file
@@ -179,6 +224,7 @@ type FileInfo struct {
Name string Name string
Size uint64 Size uint64
ModTime time.Time ModTime time.Time
precise bool // true if the time is precise
IsDir bool IsDir bool
} }
@@ -290,6 +336,12 @@ func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
if f.opt.DisableMLSD { if f.opt.DisableMLSD {
ftpConfig = append(ftpConfig, ftp.DialWithDisabledMLSD(true)) ftpConfig = append(ftpConfig, ftp.DialWithDisabledMLSD(true))
} }
if f.opt.ShutTimeout != 0 && f.opt.ShutTimeout != fs.DurationOff {
ftpConfig = append(ftpConfig, ftp.DialWithShutTimeout(time.Duration(f.opt.ShutTimeout)))
}
if f.opt.WritingMDTM {
ftpConfig = append(ftpConfig, ftp.DialWithWritingMDTM(true))
}
if f.ci.Dump&(fs.DumpHeaders|fs.DumpBodies|fs.DumpRequests|fs.DumpResponses) != 0 { if f.ci.Dump&(fs.DumpHeaders|fs.DumpBodies|fs.DumpRequests|fs.DumpResponses) != 0 {
ftpConfig = append(ftpConfig, ftp.DialWithDebugOutput(&debugLog{auth: f.ci.Dump&fs.DumpAuth != 0})) ftpConfig = append(ftpConfig, ftp.DialWithDebugOutput(&debugLog{auth: f.ci.Dump&fs.DumpAuth != 0}))
} }
@@ -306,7 +358,7 @@ func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
return false, nil return false, nil
}) })
if err != nil { if err != nil {
err = errors.Wrapf(err, "failed to make FTP connection to %q", f.dialAddr) err = fmt.Errorf("failed to make FTP connection to %q: %w", f.dialAddr, err)
} }
return c, err return c, err
} }
@@ -353,8 +405,8 @@ func (f *Fs) putFtpConnection(pc **ftp.ServerConn, err error) {
*pc = nil *pc = nil
if err != nil { if err != nil {
// If not a regular FTP error code then check the connection // If not a regular FTP error code then check the connection
_, isRegularError := errors.Cause(err).(*textproto.Error) var tpErr *textproto.Error
if !isRegularError { if !errors.As(err, &tpErr) {
nopErr := c.NoOp() nopErr := c.NoOp()
if nopErr != nil { if nopErr != nil {
fs.Debugf(f, "Connection failed, closing: %v", nopErr) fs.Debugf(f, "Connection failed, closing: %v", nopErr)
@@ -400,9 +452,14 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
if err != nil { if err != nil {
return nil, err return nil, err
} }
pass, err := obscure.Reveal(opt.Pass) pass := ""
if err != nil { if opt.AskPassword && opt.Pass == "" {
return nil, errors.Wrap(err, "NewFS decrypt password") pass = config.GetPassword("FTP server password")
} else {
pass, err = obscure.Reveal(opt.Pass)
if err != nil {
return nil, fmt.Errorf("NewFS decrypt password: %w", err)
}
} }
user := opt.User user := opt.User
if user == "" { if user == "" {
@@ -427,6 +484,12 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
ServerName: opt.Host, ServerName: opt.Host,
InsecureSkipVerify: opt.SkipVerifyTLSCert, InsecureSkipVerify: opt.SkipVerifyTLSCert,
} }
if opt.TLSCacheSize > 0 {
tlsConfig.ClientSessionCache = tls.NewLRUClientSessionCache(opt.TLSCacheSize)
}
if opt.DisableTLS13 {
tlsConfig.MaxVersion = tls.VersionTLS12
}
} }
u := protocol + path.Join(dialAddr+"/", root) u := protocol + path.Join(dialAddr+"/", root)
ci := fs.GetConfig(ctx) ci := fs.GetConfig(ctx)
@@ -453,7 +516,13 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
// Make a connection and pool it to return errors early // Make a connection and pool it to return errors early
c, err := f.getFtpConnection(ctx) c, err := f.getFtpConnection(ctx)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "NewFs") return nil, fmt.Errorf("NewFs: %w", err)
}
f.fGetTime = c.IsGetTimeSupported()
f.fSetTime = c.IsSetTimeSupported()
f.fLstTime = c.IsTimePreciseInList()
if !f.fLstTime && f.fGetTime {
f.features.SlowModTime = true
} }
f.putFtpConnection(&c, nil) f.putFtpConnection(&c, nil)
if root != "" { if root != "" {
@@ -465,7 +534,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
} }
_, err := f.NewObject(ctx, remote) _, err := f.NewObject(ctx, remote)
if err != nil { if err != nil {
if err == fs.ErrorObjectNotFound || errors.Cause(err) == fs.ErrorNotAFile { if err == fs.ErrorObjectNotFound || errors.Is(err, fs.ErrorNotAFile) {
// File doesn't exist so return old f // File doesn't exist so return old f
f.root = root f.root = root
return f, nil return f, nil
@@ -544,7 +613,7 @@ func (f *Fs) findItem(ctx context.Context, remote string) (entry *ftp.Entry, err
c, err := f.getFtpConnection(ctx) c, err := f.getFtpConnection(ctx)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "findItem") return nil, fmt.Errorf("findItem: %w", err)
} }
files, err := c.List(f.dirFromStandardPath(dir)) files, err := c.List(f.dirFromStandardPath(dir))
f.putFtpConnection(&c, err) f.putFtpConnection(&c, err)
@@ -573,13 +642,12 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err err
fs: f, fs: f,
remote: remote, remote: remote,
} }
info := &FileInfo{ o.info = &FileInfo{
Name: remote, Name: remote,
Size: entry.Size, Size: entry.Size,
ModTime: entry.Time, ModTime: entry.Time,
precise: f.fLstTime,
} }
o.info = info
return o, nil return o, nil
} }
return nil, fs.ErrorObjectNotFound return nil, fs.ErrorObjectNotFound
@@ -589,7 +657,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err err
func (f *Fs) dirExists(ctx context.Context, remote string) (exists bool, err error) { func (f *Fs) dirExists(ctx context.Context, remote string) (exists bool, err error) {
entry, err := f.findItem(ctx, remote) entry, err := f.findItem(ctx, remote)
if err != nil { if err != nil {
return false, errors.Wrap(err, "dirExists") return false, fmt.Errorf("dirExists: %w", err)
} }
if entry != nil && entry.Type == ftp.EntryTypeFolder { if entry != nil && entry.Type == ftp.EntryTypeFolder {
return true, nil return true, nil
@@ -610,7 +678,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// defer log.Trace(dir, "dir=%q", dir)("entries=%v, err=%v", &entries, &err) // defer log.Trace(dir, "dir=%q", dir)("entries=%v, err=%v", &entries, &err)
c, err := f.getFtpConnection(ctx) c, err := f.getFtpConnection(ctx)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "list") return nil, fmt.Errorf("list: %w", err)
} }
var listErr error var listErr error
@@ -648,7 +716,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
if len(files) == 0 { if len(files) == 0 {
exists, err := f.dirExists(ctx, dir) exists, err := f.dirExists(ctx, dir)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "list") return nil, fmt.Errorf("list: %w", err)
} }
if !exists { if !exists {
return nil, fs.ErrorDirNotFound return nil, fs.ErrorDirNotFound
@@ -674,6 +742,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
Name: newremote, Name: newremote,
Size: object.Size, Size: object.Size,
ModTime: object.Time, ModTime: object.Time,
precise: f.fLstTime,
} }
o.info = info o.info = info
entries = append(entries, o) entries = append(entries, o)
@@ -687,8 +756,18 @@ func (f *Fs) Hashes() hash.Set {
return 0 return 0
} }
// Precision shows Modified Time not supported // Precision shows whether modified time is supported or not depending on the
// FTP server capabilities, namely whether FTP server:
// - accepts the MDTM command to get file time (fGetTime)
// or supports MLSD returning precise file time in the list (fLstTime)
// - accepts the MFMT command to set file time (fSetTime)
// or non-standard form of the MDTM command (fSetTime, too)
// used by VsFtpd for the same purpose (WritingMDTM)
// See "mdtm_write" in https://security.appspot.com/vsftpd/vsftpd_conf.html
func (f *Fs) Precision() time.Duration { func (f *Fs) Precision() time.Duration {
if (f.fGetTime || f.fLstTime) && f.fSetTime {
return time.Second
}
return fs.ModTimeNotSupported return fs.ModTimeNotSupported
} }
@@ -701,7 +780,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
// fs.Debugf(f, "Trying to put file %s", src.Remote()) // fs.Debugf(f, "Trying to put file %s", src.Remote())
err := f.mkParentDir(ctx, src.Remote()) err := f.mkParentDir(ctx, src.Remote())
if err != nil { if err != nil {
return nil, errors.Wrap(err, "Put mkParentDir failed") return nil, fmt.Errorf("Put mkParentDir failed: %w", err)
} }
o := &Object{ o := &Object{
fs: f, fs: f,
@@ -724,7 +803,7 @@ func (f *Fs) getInfo(ctx context.Context, remote string) (fi *FileInfo, err erro
c, err := f.getFtpConnection(ctx) c, err := f.getFtpConnection(ctx)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "getInfo") return nil, fmt.Errorf("getInfo: %w", err)
} }
files, err := c.List(f.dirFromStandardPath(dir)) files, err := c.List(f.dirFromStandardPath(dir))
f.putFtpConnection(&c, err) f.putFtpConnection(&c, err)
@@ -740,6 +819,7 @@ func (f *Fs) getInfo(ctx context.Context, remote string) (fi *FileInfo, err erro
Name: remote, Name: remote,
Size: file.Size, Size: file.Size,
ModTime: file.Time, ModTime: file.Time,
precise: f.fLstTime,
IsDir: file.Type == ftp.EntryTypeFolder, IsDir: file.Type == ftp.EntryTypeFolder,
} }
return info, nil return info, nil
@@ -761,7 +841,7 @@ func (f *Fs) mkdir(ctx context.Context, abspath string) error {
} }
return fs.ErrorIsFile return fs.ErrorIsFile
} else if err != fs.ErrorObjectNotFound { } else if err != fs.ErrorObjectNotFound {
return errors.Wrapf(err, "mkdir %q failed", abspath) return fmt.Errorf("mkdir %q failed: %w", abspath, err)
} }
parent := path.Dir(abspath) parent := path.Dir(abspath)
err = f.mkdir(ctx, parent) err = f.mkdir(ctx, parent)
@@ -770,7 +850,7 @@ func (f *Fs) mkdir(ctx context.Context, abspath string) error {
} }
c, connErr := f.getFtpConnection(ctx) c, connErr := f.getFtpConnection(ctx)
if connErr != nil { if connErr != nil {
return errors.Wrap(connErr, "mkdir") return fmt.Errorf("mkdir: %w", connErr)
} }
err = c.MakeDir(f.dirFromStandardPath(abspath)) err = c.MakeDir(f.dirFromStandardPath(abspath))
f.putFtpConnection(&c, err) f.putFtpConnection(&c, err)
@@ -806,7 +886,7 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
func (f *Fs) Rmdir(ctx context.Context, dir string) error { func (f *Fs) Rmdir(ctx context.Context, dir string) error {
c, err := f.getFtpConnection(ctx) c, err := f.getFtpConnection(ctx)
if err != nil { if err != nil {
return errors.Wrap(translateErrorFile(err), "Rmdir") return fmt.Errorf("Rmdir: %w", translateErrorFile(err))
} }
err = c.RemoveDir(f.dirFromStandardPath(path.Join(f.root, dir))) err = c.RemoveDir(f.dirFromStandardPath(path.Join(f.root, dir)))
f.putFtpConnection(&c, err) f.putFtpConnection(&c, err)
@@ -822,11 +902,11 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
} }
err := f.mkParentDir(ctx, remote) err := f.mkParentDir(ctx, remote)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "Move mkParentDir failed") return nil, fmt.Errorf("Move mkParentDir failed: %w", err)
} }
c, err := f.getFtpConnection(ctx) c, err := f.getFtpConnection(ctx)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "Move") return nil, fmt.Errorf("Move: %w", err)
} }
err = c.Rename( err = c.Rename(
f.opt.Enc.FromStandardPath(path.Join(srcObj.fs.root, srcObj.remote)), f.opt.Enc.FromStandardPath(path.Join(srcObj.fs.root, srcObj.remote)),
@@ -834,11 +914,11 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
) )
f.putFtpConnection(&c, err) f.putFtpConnection(&c, err)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "Move Rename failed") return nil, fmt.Errorf("Move Rename failed: %w", err)
} }
dstObj, err := f.NewObject(ctx, remote) dstObj, err := f.NewObject(ctx, remote)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "Move NewObject failed") return nil, fmt.Errorf("Move NewObject failed: %w", err)
} }
return dstObj, nil return dstObj, nil
} }
@@ -868,19 +948,19 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
} }
return fs.ErrorIsFile return fs.ErrorIsFile
} else if err != fs.ErrorObjectNotFound { } else if err != fs.ErrorObjectNotFound {
return errors.Wrapf(err, "DirMove getInfo failed") return fmt.Errorf("DirMove getInfo failed: %w", err)
} }
// Make sure the parent directory exists // Make sure the parent directory exists
err = f.mkdir(ctx, path.Dir(dstPath)) err = f.mkdir(ctx, path.Dir(dstPath))
if err != nil { if err != nil {
return errors.Wrap(err, "DirMove mkParentDir dst failed") return fmt.Errorf("DirMove mkParentDir dst failed: %w", err)
} }
// Do the move // Do the move
c, err := f.getFtpConnection(ctx) c, err := f.getFtpConnection(ctx)
if err != nil { if err != nil {
return errors.Wrap(err, "DirMove") return fmt.Errorf("DirMove: %w", err)
} }
err = c.Rename( err = c.Rename(
f.dirFromStandardPath(srcPath), f.dirFromStandardPath(srcPath),
@@ -888,7 +968,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
) )
f.putFtpConnection(&c, err) f.putFtpConnection(&c, err)
if err != nil { if err != nil {
return errors.Wrapf(err, "DirMove Rename(%q,%q) failed", srcPath, dstPath) return fmt.Errorf("DirMove Rename(%q,%q) failed: %w", srcPath, dstPath, err)
} }
return nil return nil
} }
@@ -925,12 +1005,41 @@ func (o *Object) Size() int64 {
// ModTime returns the modification time of the object // ModTime returns the modification time of the object
func (o *Object) ModTime(ctx context.Context) time.Time { func (o *Object) ModTime(ctx context.Context) time.Time {
if !o.info.precise && o.fs.fGetTime {
c, err := o.fs.getFtpConnection(ctx)
if err == nil {
path := path.Join(o.fs.root, o.remote)
path = o.fs.opt.Enc.FromStandardPath(path)
modTime, err := c.GetTime(path)
if err == nil && o.info != nil {
o.info.ModTime = modTime
o.info.precise = true
}
o.fs.putFtpConnection(&c, err)
}
}
return o.info.ModTime return o.info.ModTime
} }
// SetModTime sets the modification time of the object // SetModTime sets the modification time of the object
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error { func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
return nil if !o.fs.fSetTime {
fs.Errorf(o.fs, "SetModTime is not supported")
return nil
}
c, err := o.fs.getFtpConnection(ctx)
if err != nil {
return err
}
path := path.Join(o.fs.root, o.remote)
path = o.fs.opt.Enc.FromStandardPath(path)
err = c.SetTime(path, modTime.In(time.UTC))
if err == nil && o.info != nil {
o.info.ModTime = modTime
o.info.precise = true
}
o.fs.putFtpConnection(&c, err)
return err
} }
// Storable returns a boolean as to whether this object is storable // Storable returns a boolean as to whether this object is storable
@@ -963,7 +1072,11 @@ func (f *ftpReadCloser) Close() error {
errchan <- f.rc.Close() errchan <- f.rc.Close()
}() }()
// Wait for Close for up to 60 seconds by default // Wait for Close for up to 60 seconds by default
timer := time.NewTimer(time.Duration(f.f.opt.CloseTimeout)) closeTimeout := f.f.opt.CloseTimeout
if closeTimeout == 0 {
closeTimeout = fs.DurationOff
}
timer := time.NewTimer(time.Duration(closeTimeout))
select { select {
case err = <-errchan: case err = <-errchan:
timer.Stop() timer.Stop()
@@ -1012,12 +1125,12 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.Read
} }
c, err := o.fs.getFtpConnection(ctx) c, err := o.fs.getFtpConnection(ctx)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "open") return nil, fmt.Errorf("open: %w", err)
} }
fd, err := c.RetrFrom(o.fs.opt.Enc.FromStandardPath(path), uint64(offset)) fd, err := c.RetrFrom(o.fs.opt.Enc.FromStandardPath(path), uint64(offset))
if err != nil { if err != nil {
o.fs.putFtpConnection(&c, err) o.fs.putFtpConnection(&c, err)
return nil, errors.Wrap(err, "open") return nil, fmt.Errorf("open: %w", err)
} }
rc = &ftpReadCloser{rc: readers.NewLimitedReadCloser(fd, limit), c: c, f: o.fs} rc = &ftpReadCloser{rc: readers.NewLimitedReadCloser(fd, limit), c: c, f: o.fs}
return rc, nil return rc, nil
@@ -1047,19 +1160,33 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
} }
c, err := o.fs.getFtpConnection(ctx) c, err := o.fs.getFtpConnection(ctx)
if err != nil { if err != nil {
return errors.Wrap(err, "Update") return fmt.Errorf("Update: %w", err)
} }
err = c.Stor(o.fs.opt.Enc.FromStandardPath(path), in) err = c.Stor(o.fs.opt.Enc.FromStandardPath(path), in)
// Ignore error 250 here - send by some servers
if err != nil {
switch errX := err.(type) {
case *textproto.Error:
switch errX.Code {
case ftp.StatusRequestedFileActionOK:
err = nil
}
}
}
if err != nil { if err != nil {
_ = c.Quit() // toss this connection to avoid sync errors _ = c.Quit() // toss this connection to avoid sync errors
remove() // recycle connection in advance to let remove() find free token
o.fs.putFtpConnection(nil, err) o.fs.putFtpConnection(nil, err)
return errors.Wrap(err, "update stor") remove()
return fmt.Errorf("update stor: %w", err)
} }
o.fs.putFtpConnection(&c, nil) o.fs.putFtpConnection(&c, nil)
if err = o.SetModTime(ctx, src.ModTime(ctx)); err != nil {
return fmt.Errorf("SetModTime: %w", err)
}
o.info, err = o.fs.getInfo(ctx, path) o.info, err = o.fs.getInfo(ctx, path)
if err != nil { if err != nil {
return errors.Wrap(err, "update getinfo") return fmt.Errorf("update getinfo: %w", err)
} }
return nil return nil
} }
@@ -1078,7 +1205,7 @@ func (o *Object) Remove(ctx context.Context) (err error) {
} else { } else {
c, err := o.fs.getFtpConnection(ctx) c, err := o.fs.getFtpConnection(ctx)
if err != nil { if err != nil {
return errors.Wrap(err, "Remove") return fmt.Errorf("Remove: %w", err)
} }
err = c.Delete(o.fs.opt.Enc.FromStandardPath(path)) err = c.Delete(o.fs.opt.Enc.FromStandardPath(path))
o.fs.putFtpConnection(&c, err) o.fs.putFtpConnection(&c, err)

View File

@@ -0,0 +1,115 @@
package ftp
import (
"context"
"fmt"
"strings"
"testing"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
"github.com/rclone/rclone/lib/readers"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
type settings map[string]interface{}
func deriveFs(ctx context.Context, t *testing.T, f fs.Fs, opts settings) fs.Fs {
fsName := strings.Split(f.Name(), "{")[0] // strip off hash
configMap := configmap.Simple{}
for key, val := range opts {
configMap[key] = fmt.Sprintf("%v", val)
}
remote := fmt.Sprintf("%s,%s:%s", fsName, configMap.String(), f.Root())
fixFs, err := fs.NewFs(ctx, remote)
require.NoError(t, err)
return fixFs
}
// test that big file uploads do not cause network i/o timeout
func (f *Fs) testUploadTimeout(t *testing.T) {
const (
fileSize = 100000000 // 100 MiB
idleTimeout = 40 * time.Millisecond // small because test server is local
maxTime = 10 * time.Second // prevent test hangup
)
if testing.Short() {
t.Skip("not running with -short")
}
ctx := context.Background()
ci := fs.GetConfig(ctx)
saveLowLevelRetries := ci.LowLevelRetries
saveTimeout := ci.Timeout
defer func() {
ci.LowLevelRetries = saveLowLevelRetries
ci.Timeout = saveTimeout
}()
ci.LowLevelRetries = 1
ci.Timeout = idleTimeout
upload := func(concurrency int, shutTimeout time.Duration) (obj fs.Object, err error) {
fixFs := deriveFs(ctx, t, f, settings{
"concurrency": concurrency,
"shut_timeout": shutTimeout,
})
// Make test object
fileTime := fstest.Time("2020-03-08T09:30:00.000000000Z")
meta := object.NewStaticObjectInfo("upload-timeout.test", fileTime, int64(fileSize), true, nil, nil)
data := readers.NewPatternReader(int64(fileSize))
// Run upload and ensure maximum time
done := make(chan bool)
deadline := time.After(maxTime)
go func() {
obj, err = fixFs.Put(ctx, data, meta)
done <- true
}()
select {
case <-done:
case <-deadline:
t.Fatalf("Upload got stuck for %v !", maxTime)
}
return obj, err
}
// non-zero shut_timeout should fix i/o errors
obj, err := upload(f.opt.Concurrency, time.Second)
assert.NoError(t, err)
assert.NotNil(t, obj)
if obj != nil {
_ = obj.Remove(ctx)
}
}
// rclone must support precise time with ProFtpd and PureFtpd out of the box.
// The VsFtpd server does not support the MFMT command to set file time like
// other servers but by default supports the MDTM command in the non-standard
// two-argument form for the same purpose.
// See "mdtm_write" in https://security.appspot.com/vsftpd/vsftpd_conf.html
func (f *Fs) testTimePrecision(t *testing.T) {
name := f.Name()
if pos := strings.Index(name, "{"); pos != -1 {
name = name[:pos]
}
switch name {
case "TestFTPProftpd", "TestFTPPureftpd", "TestFTPVsftpd":
assert.LessOrEqual(t, f.Precision(), time.Second)
}
}
// InternalTest dispatches all internal tests
func (f *Fs) InternalTest(t *testing.T) {
t.Run("UploadTimeout", f.testUploadTimeout)
t.Run("TimePrecision", f.testTimePrecision)
}
var _ fstests.InternalTester = (*Fs)(nil)

View File

@@ -9,25 +9,27 @@ import (
"github.com/rclone/rclone/fstest/fstests" "github.com/rclone/rclone/fstest/fstests"
) )
// TestIntegration runs integration tests against the remote // TestIntegration runs integration tests against rclone FTP server
func TestIntegration(t *testing.T) { func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestFTPProftpd:",
NilObject: (*ftp.Object)(nil),
})
}
func TestIntegration2(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("skipping as -remote is set")
}
fstests.Run(t, &fstests.Opt{ fstests.Run(t, &fstests.Opt{
RemoteName: "TestFTPRclone:", RemoteName: "TestFTPRclone:",
NilObject: (*ftp.Object)(nil), NilObject: (*ftp.Object)(nil),
}) })
} }
func TestIntegration3(t *testing.T) { // TestIntegrationProftpd runs integration tests against proFTPd
func TestIntegrationProftpd(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("skipping as -remote is set")
}
fstests.Run(t, &fstests.Opt{
RemoteName: "TestFTPProftpd:",
NilObject: (*ftp.Object)(nil),
})
}
// TestIntegrationPureftpd runs integration tests against pureFTPd
func TestIntegrationPureftpd(t *testing.T) {
if *fstest.RemoteName != "" { if *fstest.RemoteName != "" {
t.Skip("skipping as -remote is set") t.Skip("skipping as -remote is set")
} }
@@ -37,12 +39,13 @@ func TestIntegration3(t *testing.T) {
}) })
} }
// func TestIntegration4(t *testing.T) { // TestIntegrationVsftpd runs integration tests against vsFTPd
// if *fstest.RemoteName != "" { func TestIntegrationVsftpd(t *testing.T) {
// t.Skip("skipping as -remote is set") if *fstest.RemoteName != "" {
// } t.Skip("skipping as -remote is set")
// fstests.Run(t, &fstests.Opt{ }
// RemoteName: "TestFTPVsftpd:", fstests.Run(t, &fstests.Opt{
// NilObject: (*ftp.Object)(nil), RemoteName: "TestFTPVsftpd:",
// }) NilObject: (*ftp.Object)(nil),
// } })
}

View File

@@ -16,6 +16,7 @@ import (
"context" "context"
"encoding/base64" "encoding/base64"
"encoding/hex" "encoding/hex"
"errors"
"fmt" "fmt"
"io" "io"
"io/ioutil" "io/ioutil"
@@ -25,7 +26,6 @@ import (
"strings" "strings"
"time" "time"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config" "github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/config/configmap"
@@ -89,58 +89,58 @@ func init() {
}, },
Options: append(oauthutil.SharedOptions, []fs.Option{{ Options: append(oauthutil.SharedOptions, []fs.Option{{
Name: "project_number", Name: "project_number",
Help: "Project number.\nOptional - needed only for list/create/delete buckets - see your developer console.", Help: "Project number.\n\nOptional - needed only for list/create/delete buckets - see your developer console.",
}, { }, {
Name: "service_account_file", Name: "service_account_file",
Help: "Service Account Credentials JSON file path\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login." + env.ShellExpandHelp, Help: "Service Account Credentials JSON file path.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login." + env.ShellExpandHelp,
}, { }, {
Name: "service_account_credentials", Name: "service_account_credentials",
Help: "Service Account Credentials JSON blob\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.", Help: "Service Account Credentials JSON blob.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
Hide: fs.OptionHideBoth, Hide: fs.OptionHideBoth,
}, { }, {
Name: "anonymous", Name: "anonymous",
Help: "Access public buckets and objects without credentials\nSet to 'true' if you just want to download files and don't configure credentials.", Help: "Access public buckets and objects without credentials.\n\nSet to 'true' if you just want to download files and don't configure credentials.",
Default: false, Default: false,
}, { }, {
Name: "object_acl", Name: "object_acl",
Help: "Access Control List for new objects.", Help: "Access Control List for new objects.",
Examples: []fs.OptionExample{{ Examples: []fs.OptionExample{{
Value: "authenticatedRead", Value: "authenticatedRead",
Help: "Object owner gets OWNER access, and all Authenticated Users get READER access.", Help: "Object owner gets OWNER access.\nAll Authenticated Users get READER access.",
}, { }, {
Value: "bucketOwnerFullControl", Value: "bucketOwnerFullControl",
Help: "Object owner gets OWNER access, and project team owners get OWNER access.", Help: "Object owner gets OWNER access.\nProject team owners get OWNER access.",
}, { }, {
Value: "bucketOwnerRead", Value: "bucketOwnerRead",
Help: "Object owner gets OWNER access, and project team owners get READER access.", Help: "Object owner gets OWNER access.\nProject team owners get READER access.",
}, { }, {
Value: "private", Value: "private",
Help: "Object owner gets OWNER access [default if left blank].", Help: "Object owner gets OWNER access.\nDefault if left blank.",
}, { }, {
Value: "projectPrivate", Value: "projectPrivate",
Help: "Object owner gets OWNER access, and project team members get access according to their roles.", Help: "Object owner gets OWNER access.\nProject team members get access according to their roles.",
}, { }, {
Value: "publicRead", Value: "publicRead",
Help: "Object owner gets OWNER access, and all Users get READER access.", Help: "Object owner gets OWNER access.\nAll Users get READER access.",
}}, }},
}, { }, {
Name: "bucket_acl", Name: "bucket_acl",
Help: "Access Control List for new buckets.", Help: "Access Control List for new buckets.",
Examples: []fs.OptionExample{{ Examples: []fs.OptionExample{{
Value: "authenticatedRead", Value: "authenticatedRead",
Help: "Project team owners get OWNER access, and all Authenticated Users get READER access.", Help: "Project team owners get OWNER access.\nAll Authenticated Users get READER access.",
}, { }, {
Value: "private", Value: "private",
Help: "Project team owners get OWNER access [default if left blank].", Help: "Project team owners get OWNER access.\nDefault if left blank.",
}, { }, {
Value: "projectPrivate", Value: "projectPrivate",
Help: "Project team members get access according to their roles.", Help: "Project team members get access according to their roles.",
}, { }, {
Value: "publicRead", Value: "publicRead",
Help: "Project team owners get OWNER access, and all Users get READER access.", Help: "Project team owners get OWNER access.\nAll Users get READER access.",
}, { }, {
Value: "publicReadWrite", Value: "publicReadWrite",
Help: "Project team owners get OWNER access, and all Users get WRITER access.", Help: "Project team owners get OWNER access.\nAll Users get WRITER access.",
}}, }},
}, { }, {
Name: "bucket_policy_only", Name: "bucket_policy_only",
@@ -163,64 +163,64 @@ Docs: https://cloud.google.com/storage/docs/bucket-policy-only
Help: "Location for the newly created buckets.", Help: "Location for the newly created buckets.",
Examples: []fs.OptionExample{{ Examples: []fs.OptionExample{{
Value: "", Value: "",
Help: "Empty for default location (US).", Help: "Empty for default location (US)",
}, { }, {
Value: "asia", Value: "asia",
Help: "Multi-regional location for Asia.", Help: "Multi-regional location for Asia",
}, { }, {
Value: "eu", Value: "eu",
Help: "Multi-regional location for Europe.", Help: "Multi-regional location for Europe",
}, { }, {
Value: "us", Value: "us",
Help: "Multi-regional location for United States.", Help: "Multi-regional location for United States",
}, { }, {
Value: "asia-east1", Value: "asia-east1",
Help: "Taiwan.", Help: "Taiwan",
}, { }, {
Value: "asia-east2", Value: "asia-east2",
Help: "Hong Kong.", Help: "Hong Kong",
}, { }, {
Value: "asia-northeast1", Value: "asia-northeast1",
Help: "Tokyo.", Help: "Tokyo",
}, { }, {
Value: "asia-south1", Value: "asia-south1",
Help: "Mumbai.", Help: "Mumbai",
}, { }, {
Value: "asia-southeast1", Value: "asia-southeast1",
Help: "Singapore.", Help: "Singapore",
}, { }, {
Value: "australia-southeast1", Value: "australia-southeast1",
Help: "Sydney.", Help: "Sydney",
}, { }, {
Value: "europe-north1", Value: "europe-north1",
Help: "Finland.", Help: "Finland",
}, { }, {
Value: "europe-west1", Value: "europe-west1",
Help: "Belgium.", Help: "Belgium",
}, { }, {
Value: "europe-west2", Value: "europe-west2",
Help: "London.", Help: "London",
}, { }, {
Value: "europe-west3", Value: "europe-west3",
Help: "Frankfurt.", Help: "Frankfurt",
}, { }, {
Value: "europe-west4", Value: "europe-west4",
Help: "Netherlands.", Help: "Netherlands",
}, { }, {
Value: "us-central1", Value: "us-central1",
Help: "Iowa.", Help: "Iowa",
}, { }, {
Value: "us-east1", Value: "us-east1",
Help: "South Carolina.", Help: "South Carolina",
}, { }, {
Value: "us-east4", Value: "us-east4",
Help: "Northern Virginia.", Help: "Northern Virginia",
}, { }, {
Value: "us-west1", Value: "us-west1",
Help: "Oregon.", Help: "Oregon",
}, { }, {
Value: "us-west2", Value: "us-west2",
Help: "California.", Help: "California",
}}, }},
}, { }, {
Name: "storage_class", Name: "storage_class",
@@ -375,7 +375,7 @@ func (o *Object) split() (bucket, bucketPath string) {
func getServiceAccountClient(ctx context.Context, credentialsData []byte) (*http.Client, error) { func getServiceAccountClient(ctx context.Context, credentialsData []byte) (*http.Client, error) {
conf, err := google.JWTConfigFromJSON(credentialsData, storageConfig.Scopes...) conf, err := google.JWTConfigFromJSON(credentialsData, storageConfig.Scopes...)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "error processing credentials") return nil, fmt.Errorf("error processing credentials: %w", err)
} }
ctxWithSpecialClient := oauthutil.Context(ctx, fshttp.NewClient(ctx)) ctxWithSpecialClient := oauthutil.Context(ctx, fshttp.NewClient(ctx))
return oauth2.NewClient(ctxWithSpecialClient, conf.TokenSource(ctxWithSpecialClient)), nil return oauth2.NewClient(ctxWithSpecialClient, conf.TokenSource(ctxWithSpecialClient)), nil
@@ -408,7 +408,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
if opt.ServiceAccountCredentials == "" && opt.ServiceAccountFile != "" { if opt.ServiceAccountCredentials == "" && opt.ServiceAccountFile != "" {
loadedCreds, err := ioutil.ReadFile(env.ShellExpand(opt.ServiceAccountFile)) loadedCreds, err := ioutil.ReadFile(env.ShellExpand(opt.ServiceAccountFile))
if err != nil { if err != nil {
return nil, errors.Wrap(err, "error opening service account credentials file") return nil, fmt.Errorf("error opening service account credentials file: %w", err)
} }
opt.ServiceAccountCredentials = string(loadedCreds) opt.ServiceAccountCredentials = string(loadedCreds)
} }
@@ -417,7 +417,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
} else if opt.ServiceAccountCredentials != "" { } else if opt.ServiceAccountCredentials != "" {
oAuthClient, err = getServiceAccountClient(ctx, []byte(opt.ServiceAccountCredentials)) oAuthClient, err = getServiceAccountClient(ctx, []byte(opt.ServiceAccountCredentials))
if err != nil { if err != nil {
return nil, errors.Wrap(err, "failed configuring Google Cloud Storage Service Account") return nil, fmt.Errorf("failed configuring Google Cloud Storage Service Account: %w", err)
} }
} else { } else {
oAuthClient, _, err = oauthutil.NewClient(ctx, name, m, storageConfig) oAuthClient, _, err = oauthutil.NewClient(ctx, name, m, storageConfig)
@@ -425,7 +425,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
ctx := context.Background() ctx := context.Background()
oAuthClient, err = google.DefaultClient(ctx, storage.DevstorageFullControlScope) oAuthClient, err = google.DefaultClient(ctx, storage.DevstorageFullControlScope)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "failed to configure Google Cloud Storage") return nil, fmt.Errorf("failed to configure Google Cloud Storage: %w", err)
} }
} }
} }
@@ -449,7 +449,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
f.client = oAuthClient f.client = oAuthClient
f.svc, err = storage.New(f.client) f.svc, err = storage.New(f.client)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "couldn't create Google Cloud Storage client") return nil, fmt.Errorf("couldn't create Google Cloud Storage client: %w", err)
} }
if f.rootBucket != "" && f.rootDirectory != "" { if f.rootBucket != "" && f.rootDirectory != "" {
@@ -759,10 +759,10 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) (err error) {
return nil return nil
} else if gErr, ok := err.(*googleapi.Error); ok { } else if gErr, ok := err.(*googleapi.Error); ok {
if gErr.Code != http.StatusNotFound { if gErr.Code != http.StatusNotFound {
return errors.Wrap(err, "failed to get bucket") return fmt.Errorf("failed to get bucket: %w", err)
} }
} else { } else {
return errors.Wrap(err, "failed to get bucket") return fmt.Errorf("failed to get bucket: %w", err)
} }
if f.opt.ProjectNumber == "" { if f.opt.ProjectNumber == "" {
@@ -1065,7 +1065,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
_, isRanging := req.Header["Range"] _, isRanging := req.Header["Range"]
if !(res.StatusCode == http.StatusOK || (isRanging && res.StatusCode == http.StatusPartialContent)) { if !(res.StatusCode == http.StatusOK || (isRanging && res.StatusCode == http.StatusPartialContent)) {
_ = res.Body.Close() // ignore error _ = res.Body.Close() // ignore error
return nil, errors.Errorf("bad response: %d: %s", res.StatusCode, res.Status) return nil, fmt.Errorf("bad response: %d: %s", res.StatusCode, res.Status)
} }
return res.Body, nil return res.Body, nil
} }

View File

@@ -6,6 +6,7 @@ package googlephotos
import ( import (
"context" "context"
"encoding/json" "encoding/json"
"errors"
"fmt" "fmt"
"io" "io"
"net/http" "net/http"
@@ -17,9 +18,9 @@ import (
"sync" "sync"
"time" "time"
"github.com/pkg/errors"
"github.com/rclone/rclone/backend/googlephotos/api" "github.com/rclone/rclone/backend/googlephotos/api"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct" "github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure" "github.com/rclone/rclone/fs/config/obscure"
@@ -28,6 +29,7 @@ import (
"github.com/rclone/rclone/fs/fshttp" "github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/log" "github.com/rclone/rclone/fs/log"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/oauthutil" "github.com/rclone/rclone/lib/oauthutil"
"github.com/rclone/rclone/lib/pacer" "github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/rest" "github.com/rclone/rclone/lib/rest"
@@ -53,6 +55,7 @@ const (
minSleep = 10 * time.Millisecond minSleep = 10 * time.Millisecond
scopeReadOnly = "https://www.googleapis.com/auth/photoslibrary.readonly" scopeReadOnly = "https://www.googleapis.com/auth/photoslibrary.readonly"
scopeReadWrite = "https://www.googleapis.com/auth/photoslibrary" scopeReadWrite = "https://www.googleapis.com/auth/photoslibrary"
scopeAccess = 2 // position of access scope in list
) )
var ( var (
@@ -61,7 +64,7 @@ var (
Scopes: []string{ Scopes: []string{
"openid", "openid",
"profile", "profile",
scopeReadWrite, scopeReadWrite, // this must be at position scopeAccess
}, },
Endpoint: google.Endpoint, Endpoint: google.Endpoint,
ClientID: rcloneClientID, ClientID: rcloneClientID,
@@ -82,16 +85,16 @@ func init() {
opt := new(Options) opt := new(Options)
err := configstruct.Set(m, opt) err := configstruct.Set(m, opt)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "couldn't parse config into struct") return nil, fmt.Errorf("couldn't parse config into struct: %w", err)
} }
switch config.State { switch config.State {
case "": case "":
// Fill in the scopes // Fill in the scopes
if opt.ReadOnly { if opt.ReadOnly {
oauthConfig.Scopes[0] = scopeReadOnly oauthConfig.Scopes[scopeAccess] = scopeReadOnly
} else { } else {
oauthConfig.Scopes[0] = scopeReadWrite oauthConfig.Scopes[scopeAccess] = scopeReadWrite
} }
return oauthutil.ConfigOut("warning", &oauthutil.Options{ return oauthutil.ConfigOut("warning", &oauthutil.Options{
OAuth2Config: oauthConfig, OAuth2Config: oauthConfig,
@@ -129,14 +132,14 @@ you want to read the media.`,
}, { }, {
Name: "start_year", Name: "start_year",
Default: 2000, Default: 2000,
Help: `Year limits the photos to be downloaded to those which are uploaded after the given year`, Help: `Year limits the photos to be downloaded to those which are uploaded after the given year.`,
Advanced: true, Advanced: true,
}, { }, {
Name: "include_archived", Name: "include_archived",
Default: false, Default: false,
Help: `Also view and download archived media. Help: `Also view and download archived media.
By default rclone does not request archived media. Thus, when syncing, By default, rclone does not request archived media. Thus, when syncing,
archived media is not visible in directory listings or transferred. archived media is not visible in directory listings or transferred.
Note that media in albums is always visible and synced, no matter Note that media in albums is always visible and synced, no matter
@@ -148,16 +151,24 @@ listings and transferred.
Without this flag, archived media will not be visible in directory Without this flag, archived media will not be visible in directory
listings and won't be transferred.`, listings and won't be transferred.`,
Advanced: true, Advanced: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
Advanced: true,
Default: (encoder.Base |
encoder.EncodeCrLf |
encoder.EncodeInvalidUtf8),
}}...), }}...),
}) })
} }
// Options defines the configuration for this backend // Options defines the configuration for this backend
type Options struct { type Options struct {
ReadOnly bool `config:"read_only"` ReadOnly bool `config:"read_only"`
ReadSize bool `config:"read_size"` ReadSize bool `config:"read_size"`
StartYear int `config:"start_year"` StartYear int `config:"start_year"`
IncludeArchived bool `config:"include_archived"` IncludeArchived bool `config:"include_archived"`
Enc encoder.MultiEncoder `config:"encoding"`
} }
// Fs represents a remote storage server // Fs represents a remote storage server
@@ -281,7 +292,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
baseClient := fshttp.NewClient(ctx) baseClient := fshttp.NewClient(ctx)
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(ctx, name, m, oauthConfig, baseClient) oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(ctx, name, m, oauthConfig, baseClient)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "failed to configure Box") return nil, fmt.Errorf("failed to configure Box: %w", err)
} }
root = strings.Trim(path.Clean(root), "/") root = strings.Trim(path.Clean(root), "/")
@@ -334,13 +345,13 @@ func (f *Fs) fetchEndpoint(ctx context.Context, name string) (endpoint string, e
return shouldRetry(ctx, resp, err) return shouldRetry(ctx, resp, err)
}) })
if err != nil { if err != nil {
return "", errors.Wrap(err, "couldn't read openID config") return "", fmt.Errorf("couldn't read openID config: %w", err)
} }
// Find userinfo endpoint // Find userinfo endpoint
endpoint, ok := openIDconfig[name].(string) endpoint, ok := openIDconfig[name].(string)
if !ok { if !ok {
return "", errors.Errorf("couldn't find %q from openID config", name) return "", fmt.Errorf("couldn't find %q from openID config", name)
} }
return endpoint, nil return endpoint, nil
@@ -363,7 +374,7 @@ func (f *Fs) UserInfo(ctx context.Context) (userInfo map[string]string, err erro
return shouldRetry(ctx, resp, err) return shouldRetry(ctx, resp, err)
}) })
if err != nil { if err != nil {
return nil, errors.Wrap(err, "couldn't read user info") return nil, fmt.Errorf("couldn't read user info: %w", err)
} }
return userInfo, nil return userInfo, nil
} }
@@ -394,7 +405,7 @@ func (f *Fs) Disconnect(ctx context.Context) (err error) {
return shouldRetry(ctx, resp, err) return shouldRetry(ctx, resp, err)
}) })
if err != nil { if err != nil {
return errors.Wrap(err, "couldn't revoke token") return fmt.Errorf("couldn't revoke token: %w", err)
} }
fs.Infof(f, "res = %+v", res) fs.Infof(f, "res = %+v", res)
return nil return nil
@@ -481,7 +492,7 @@ func (f *Fs) listAlbums(ctx context.Context, shared bool) (all *albums, err erro
return shouldRetry(ctx, resp, err) return shouldRetry(ctx, resp, err)
}) })
if err != nil { if err != nil {
return nil, errors.Wrap(err, "couldn't list albums") return nil, fmt.Errorf("couldn't list albums: %w", err)
} }
newAlbums := result.Albums newAlbums := result.Albums
if shared { if shared {
@@ -495,7 +506,9 @@ func (f *Fs) listAlbums(ctx context.Context, shared bool) (all *albums, err erro
lastID = newAlbums[len(newAlbums)-1].ID lastID = newAlbums[len(newAlbums)-1].ID
} }
for i := range newAlbums { for i := range newAlbums {
all.add(&newAlbums[i]) anAlbum := newAlbums[i]
anAlbum.Title = f.opt.Enc.FromStandardPath(anAlbum.Title)
all.add(&anAlbum)
} }
if result.NextPageToken == "" { if result.NextPageToken == "" {
break break
@@ -536,7 +549,7 @@ func (f *Fs) list(ctx context.Context, filter api.SearchFilter, fn listFn) (err
return shouldRetry(ctx, resp, err) return shouldRetry(ctx, resp, err)
}) })
if err != nil { if err != nil {
return errors.Wrap(err, "couldn't list files") return fmt.Errorf("couldn't list files: %w", err)
} }
items := result.MediaItems items := result.MediaItems
if len(items) > 0 && items[0].ID == lastID { if len(items) > 0 && items[0].ID == lastID {
@@ -680,7 +693,7 @@ func (f *Fs) createAlbum(ctx context.Context, albumTitle string) (album *api.Alb
return shouldRetry(ctx, resp, err) return shouldRetry(ctx, resp, err)
}) })
if err != nil { if err != nil {
return nil, errors.Wrap(err, "couldn't create album") return nil, fmt.Errorf("couldn't create album: %w", err)
} }
f.albums[false].add(&result) f.albums[false].add(&result)
return &result, nil return &result, nil
@@ -866,7 +879,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
return shouldRetry(ctx, resp, err) return shouldRetry(ctx, resp, err)
}) })
if err != nil { if err != nil {
return errors.Wrap(err, "couldn't get media item") return fmt.Errorf("couldn't get media item: %w", err)
} }
o.setMetaData(&item) o.setMetaData(&item)
return nil return nil
@@ -1001,7 +1014,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return shouldRetry(ctx, resp, err) return shouldRetry(ctx, resp, err)
}) })
if err != nil { if err != nil {
return errors.Wrap(err, "couldn't upload file") return fmt.Errorf("couldn't upload file: %w", err)
} }
uploadToken := strings.TrimSpace(string(token)) uploadToken := strings.TrimSpace(string(token))
if uploadToken == "" { if uploadToken == "" {
@@ -1029,14 +1042,14 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return shouldRetry(ctx, resp, err) return shouldRetry(ctx, resp, err)
}) })
if err != nil { if err != nil {
return errors.Wrap(err, "failed to create media item") return fmt.Errorf("failed to create media item: %w", err)
} }
if len(result.NewMediaItemResults) != 1 { if len(result.NewMediaItemResults) != 1 {
return errors.New("bad response to BatchCreate wrong number of items") return errors.New("bad response to BatchCreate wrong number of items")
} }
mediaItemResult := result.NewMediaItemResults[0] mediaItemResult := result.NewMediaItemResults[0]
if mediaItemResult.Status.Code != 0 { if mediaItemResult.Status.Code != 0 {
return errors.Errorf("upload failed: %s (%d)", mediaItemResult.Status.Message, mediaItemResult.Status.Code) return fmt.Errorf("upload failed: %s (%d)", mediaItemResult.Status.Message, mediaItemResult.Status.Code)
} }
o.setMetaData(&mediaItemResult.MediaItem) o.setMetaData(&mediaItemResult.MediaItem)
@@ -1058,7 +1071,7 @@ func (o *Object) Remove(ctx context.Context) (err error) {
albumTitle, fileName := match[1], match[2] albumTitle, fileName := match[1], match[2]
album, ok := o.fs.albums[false].get(albumTitle) album, ok := o.fs.albums[false].get(albumTitle)
if !ok { if !ok {
return errors.Errorf("couldn't file %q in album %q for delete", fileName, albumTitle) return fmt.Errorf("couldn't file %q in album %q for delete", fileName, albumTitle)
} }
opts := rest.Opts{ opts := rest.Opts{
Method: "POST", Method: "POST",
@@ -1074,7 +1087,7 @@ func (o *Object) Remove(ctx context.Context) (err error) {
return shouldRetry(ctx, resp, err) return shouldRetry(ctx, resp, err)
}) })
if err != nil { if err != nil {
return errors.Wrap(err, "couldn't delete item from album") return fmt.Errorf("couldn't delete item from album: %w", err)
} }
return nil return nil
} }

View File

@@ -11,7 +11,6 @@ import (
"strings" "strings"
"time" "time"
"github.com/pkg/errors"
"github.com/rclone/rclone/backend/googlephotos/api" "github.com/rclone/rclone/backend/googlephotos/api"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
) )
@@ -270,7 +269,7 @@ func days(ctx context.Context, f lister, prefix string, match []string) (entries
year := match[1] year := match[1]
current, err := time.Parse("2006", year) current, err := time.Parse("2006", year)
if err != nil { if err != nil {
return nil, errors.Errorf("bad year %q", match[1]) return nil, fmt.Errorf("bad year %q", match[1])
} }
currentYear := current.Year() currentYear := current.Year()
for current.Year() == currentYear { for current.Year() == currentYear {
@@ -284,7 +283,7 @@ func days(ctx context.Context, f lister, prefix string, match []string) (entries
func yearMonthDayFilter(ctx context.Context, f lister, match []string) (sf api.SearchFilter, err error) { func yearMonthDayFilter(ctx context.Context, f lister, match []string) (sf api.SearchFilter, err error) {
year, err := strconv.Atoi(match[1]) year, err := strconv.Atoi(match[1])
if err != nil || year < 1000 || year > 3000 { if err != nil || year < 1000 || year > 3000 {
return sf, errors.Errorf("bad year %q", match[1]) return sf, fmt.Errorf("bad year %q", match[1])
} }
sf = api.SearchFilter{ sf = api.SearchFilter{
Filters: &api.Filters{ Filters: &api.Filters{
@@ -300,14 +299,14 @@ func yearMonthDayFilter(ctx context.Context, f lister, match []string) (sf api.S
if len(match) >= 3 { if len(match) >= 3 {
month, err := strconv.Atoi(match[2]) month, err := strconv.Atoi(match[2])
if err != nil || month < 1 || month > 12 { if err != nil || month < 1 || month > 12 {
return sf, errors.Errorf("bad month %q", match[2]) return sf, fmt.Errorf("bad month %q", match[2])
} }
sf.Filters.DateFilter.Dates[0].Month = month sf.Filters.DateFilter.Dates[0].Month = month
} }
if len(match) >= 4 { if len(match) >= 4 {
day, err := strconv.Atoi(match[3]) day, err := strconv.Atoi(match[3])
if err != nil || day < 1 || day > 31 { if err != nil || day < 1 || day > 31 {
return sf, errors.Errorf("bad day %q", match[3]) return sf, fmt.Errorf("bad day %q", match[3])
} }
sf.Filters.DateFilter.Dates[0].Day = day sf.Filters.DateFilter.Dates[0].Day = day
} }

180
backend/hasher/commands.go Normal file
View File

@@ -0,0 +1,180 @@
package hasher
import (
"context"
"errors"
"fmt"
"path"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/cache"
"github.com/rclone/rclone/fs/fspath"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/lib/kv"
)
// Command the backend to run a named command
//
// The command run is name
// args may be used to read arguments from
// opts may be used to read optional arguments from
//
// The result should be capable of being JSON encoded
// If it is a string or a []string it will be shown to the user
// otherwise it will be JSON encoded and shown to the user like that
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
switch name {
case "drop":
return nil, f.db.Stop(true)
case "dump", "fulldump":
return nil, f.dbDump(ctx, name == "fulldump", "")
case "import", "stickyimport":
sticky := name == "stickyimport"
if len(arg) != 2 {
return nil, errors.New("please provide checksum type and path to sum file")
}
return nil, f.dbImport(ctx, arg[0], arg[1], sticky)
default:
return nil, fs.ErrorCommandNotFound
}
}
var commandHelp = []fs.CommandHelp{{
Name: "drop",
Short: "Drop cache",
Long: `Completely drop checksum cache.
Usage Example:
rclone backend drop hasher:
`,
}, {
Name: "dump",
Short: "Dump the database",
Long: "Dump cache records covered by the current remote",
}, {
Name: "fulldump",
Short: "Full dump of the database",
Long: "Dump all cache records in the database",
}, {
Name: "import",
Short: "Import a SUM file",
Long: `Amend hash cache from a SUM file and bind checksums to files by size/time.
Usage Example:
rclone backend import hasher:subdir md5 /path/to/sum.md5
`,
}, {
Name: "stickyimport",
Short: "Perform fast import of a SUM file",
Long: `Fill hash cache from a SUM file without verifying file fingerprints.
Usage Example:
rclone backend stickyimport hasher:subdir md5 remote:path/to/sum.md5
`,
}}
func (f *Fs) dbDump(ctx context.Context, full bool, root string) error {
if root == "" {
remoteFs, err := cache.Get(ctx, f.opt.Remote)
if err != nil {
return err
}
root = fspath.JoinRootPath(remoteFs.Root(), f.Root())
}
op := &kvDump{
full: full,
root: root,
path: f.db.Path(),
fs: f,
}
err := f.db.Do(false, op)
if err == kv.ErrEmpty {
fs.Infof(op.path, "empty")
err = nil
}
return err
}
func (f *Fs) dbImport(ctx context.Context, hashName, sumRemote string, sticky bool) error {
var hashType hash.Type
if err := hashType.Set(hashName); err != nil {
return err
}
if hashType == hash.None {
return errors.New("please provide a valid hash type")
}
if !f.suppHashes.Contains(hashType) {
return errors.New("unsupported hash type")
}
if !f.keepHashes.Contains(hashType) {
fs.Infof(nil, "Need not import hashes of this type")
return nil
}
_, sumPath, err := fspath.SplitFs(sumRemote)
if err != nil {
return err
}
sumFs, err := cache.Get(ctx, sumRemote)
switch err {
case fs.ErrorIsFile:
// ok
case nil:
return fmt.Errorf("not a file: %s", sumRemote)
default:
return err
}
sumObj, err := sumFs.NewObject(ctx, path.Base(sumPath))
if err != nil {
return fmt.Errorf("cannot open sum file: %w", err)
}
hashes, err := operations.ParseSumFile(ctx, sumObj)
if err != nil {
return fmt.Errorf("failed to parse sum file: %w", err)
}
if sticky {
rootPath := f.Fs.Root()
for remote, hashVal := range hashes {
key := path.Join(rootPath, remote)
hashSums := operations.HashSums{hashName: hashVal}
if err := f.putRawHashes(ctx, key, anyFingerprint, hashSums); err != nil {
fs.Errorf(nil, "%s: failed to import: %v", remote, err)
}
}
fs.Infof(nil, "Summary: %d checksum(s) imported", len(hashes))
return nil
}
const longImportThreshold = 100
if len(hashes) > longImportThreshold {
fs.Infof(nil, "Importing %d checksums. Please wait...", len(hashes))
}
doneCount := 0
err = operations.ListFn(ctx, f, func(obj fs.Object) {
remote := obj.Remote()
hash := hashes[remote]
hashes[remote] = "" // mark as handled
o, ok := obj.(*Object)
if ok && hash != "" {
if err := o.putHashes(ctx, hashMap{hashType: hash}); err != nil {
fs.Errorf(nil, "%s: failed to import: %v", remote, err)
}
accounting.Stats(ctx).NewCheckingTransfer(obj).Done(ctx, err)
doneCount++
}
})
if err != nil {
fs.Errorf(nil, "Import failed: %v", err)
}
skipCount := 0
for remote, emptyOrDone := range hashes {
if emptyOrDone != "" {
fs.Infof(nil, "Skip vanished object: %s", remote)
skipCount++
}
}
fs.Infof(nil, "Summary: %d imported, %d skipped", doneCount, skipCount)
return err
}

508
backend/hasher/hasher.go Normal file
View File

@@ -0,0 +1,508 @@
// Package hasher implements a checksum handling overlay backend
package hasher
import (
"context"
"encoding/gob"
"errors"
"fmt"
"io"
"path"
"strings"
"sync"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/cache"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fspath"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/kv"
)
// Register with Fs
func init() {
fs.Register(&fs.RegInfo{
Name: "hasher",
Description: "Better checksums for other remotes",
NewFs: NewFs,
CommandHelp: commandHelp,
Options: []fs.Option{{
Name: "remote",
Required: true,
Help: "Remote to cache checksums for (e.g. myRemote:path).",
}, {
Name: "hashes",
Default: fs.CommaSepList{"md5", "sha1"},
Advanced: false,
Help: "Comma separated list of supported checksum types.",
}, {
Name: "max_age",
Advanced: false,
Default: fs.DurationOff,
Help: "Maximum time to keep checksums in cache (0 = no cache, off = cache forever).",
}, {
Name: "auto_size",
Advanced: true,
Default: fs.SizeSuffix(0),
Help: "Auto-update checksum for files smaller than this size (disabled by default).",
}},
})
}
// Options defines the configuration for this backend
type Options struct {
Remote string `config:"remote"`
Hashes fs.CommaSepList `config:"hashes"`
AutoSize fs.SizeSuffix `config:"auto_size"`
MaxAge fs.Duration `config:"max_age"`
}
// Fs represents a wrapped fs.Fs
type Fs struct {
fs.Fs
name string
root string
wrapper fs.Fs
features *fs.Features
opt *Options
db *kv.DB
// fingerprinting
fpTime bool // true if using time in fingerprints
fpHash hash.Type // hash type to use in fingerprints or None
// hash types triaged by groups
suppHashes hash.Set // all supported checksum types
passHashes hash.Set // passed directly to the base without caching
slowHashes hash.Set // passed to the base and then cached
autoHashes hash.Set // calculated in-house and cached
keepHashes hash.Set // checksums to keep in cache (slow + auto)
}
var warnExperimental sync.Once
// NewFs constructs an Fs from the remote:path string
func NewFs(ctx context.Context, fsname, rpath string, cmap configmap.Mapper) (fs.Fs, error) {
if !kv.Supported() {
return nil, errors.New("hasher is not supported on this OS")
}
warnExperimental.Do(func() {
fs.Infof(nil, "Hasher is EXPERIMENTAL!")
})
opt := &Options{}
err := configstruct.Set(cmap, opt)
if err != nil {
return nil, err
}
if strings.HasPrefix(opt.Remote, fsname+":") {
return nil, errors.New("can't point remote at itself")
}
remotePath := fspath.JoinRootPath(opt.Remote, rpath)
baseFs, err := cache.Get(ctx, remotePath)
if err != nil && err != fs.ErrorIsFile {
return nil, fmt.Errorf("failed to derive base remote %q: %w", opt.Remote, err)
}
f := &Fs{
Fs: baseFs,
name: fsname,
root: rpath,
opt: opt,
}
baseFeatures := baseFs.Features()
f.fpTime = baseFs.Precision() != fs.ModTimeNotSupported
if baseFeatures.SlowHash {
f.slowHashes = f.Fs.Hashes()
} else {
f.passHashes = f.Fs.Hashes()
f.fpHash = f.passHashes.GetOne()
}
f.suppHashes = f.passHashes
f.suppHashes.Add(f.slowHashes.Array()...)
for _, hashName := range opt.Hashes {
var ht hash.Type
if err := ht.Set(hashName); err != nil {
return nil, fmt.Errorf("invalid token %q in hash string %q", hashName, opt.Hashes.String())
}
if !f.slowHashes.Contains(ht) {
f.autoHashes.Add(ht)
}
f.keepHashes.Add(ht)
f.suppHashes.Add(ht)
}
fs.Debugf(f, "Groups by usage: cached %s, passed %s, auto %s, slow %s, supported %s",
f.keepHashes, f.passHashes, f.autoHashes, f.slowHashes, f.suppHashes)
var nilSet hash.Set
if f.keepHashes == nilSet {
return nil, errors.New("configured hash_names have nothing to keep in cache")
}
if f.opt.MaxAge > 0 {
gob.Register(hashRecord{})
db, err := kv.Start(ctx, "hasher", f.Fs)
if err != nil {
return nil, err
}
f.db = db
}
stubFeatures := &fs.Features{
CanHaveEmptyDirectories: true,
IsLocal: true,
ReadMimeType: true,
WriteMimeType: true,
}
f.features = stubFeatures.Fill(ctx, f).Mask(ctx, f.Fs).WrapsFs(f, f.Fs)
cache.PinUntilFinalized(f.Fs, f)
return f, err
}
//
// Filesystem
//
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string { return f.name }
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string { return f.root }
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features { return f.features }
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() hash.Set { return f.suppHashes }
// String returns a description of the FS
// The "hasher::" prefix is a distinctive feature.
func (f *Fs) String() string {
return fmt.Sprintf("hasher::%s:%s", f.name, f.root)
}
// UnWrap returns the Fs that this Fs is wrapping
func (f *Fs) UnWrap() fs.Fs { return f.Fs }
// WrapFs returns the Fs that is wrapping this Fs
func (f *Fs) WrapFs() fs.Fs { return f.wrapper }
// SetWrapper sets the Fs that is wrapping this Fs
func (f *Fs) SetWrapper(wrapper fs.Fs) { f.wrapper = wrapper }
// Wrap base entries into hasher entries.
func (f *Fs) wrapEntries(baseEntries fs.DirEntries) (hashEntries fs.DirEntries, err error) {
hashEntries = baseEntries[:0] // work inplace
for _, entry := range baseEntries {
switch x := entry.(type) {
case fs.Object:
hashEntries = append(hashEntries, f.wrapObject(x, nil))
default:
hashEntries = append(hashEntries, entry) // trash in - trash out
}
}
return hashEntries, nil
}
// List the objects and directories in dir into entries.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
if entries, err = f.Fs.List(ctx, dir); err != nil {
return nil, err
}
return f.wrapEntries(entries)
}
// ListR lists the objects and directories recursively into out.
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
return f.Fs.Features().ListR(ctx, dir, func(baseEntries fs.DirEntries) error {
hashEntries, err := f.wrapEntries(baseEntries)
if err != nil {
return err
}
return callback(hashEntries)
})
}
// Purge a directory
func (f *Fs) Purge(ctx context.Context, dir string) error {
if do := f.Fs.Features().Purge; do != nil {
if err := do(ctx, dir); err != nil {
return err
}
err := f.db.Do(true, &kvPurge{
dir: path.Join(f.Fs.Root(), dir),
})
if err != nil {
fs.Errorf(f, "Failed to purge some hashes: %v", err)
}
return nil
}
return fs.ErrorCantPurge
}
// PutStream uploads to the remote path with undeterminate size.
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
if do := f.Fs.Features().PutStream; do != nil {
_ = f.pruneHash(src.Remote())
oResult, err := do(ctx, in, src, options...)
return f.wrapObject(oResult, err), err
}
return nil, errors.New("PutStream not supported")
}
// PutUnchecked uploads the object, allowing duplicates.
func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
if do := f.Fs.Features().PutUnchecked; do != nil {
_ = f.pruneHash(src.Remote())
oResult, err := do(ctx, in, src, options...)
return f.wrapObject(oResult, err), err
}
return nil, errors.New("PutUnchecked not supported")
}
// pruneHash deletes hash for a path
func (f *Fs) pruneHash(remote string) error {
return f.db.Do(true, &kvPrune{
key: path.Join(f.Fs.Root(), remote),
})
}
// CleanUp the trash in the Fs
func (f *Fs) CleanUp(ctx context.Context) error {
if do := f.Fs.Features().CleanUp; do != nil {
return do(ctx)
}
return errors.New("CleanUp not supported")
}
// About gets quota information from the Fs
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
if do := f.Fs.Features().About; do != nil {
return do(ctx)
}
return nil, errors.New("About not supported")
}
// ChangeNotify calls the passed function with a path that has had changes.
func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) {
if do := f.Fs.Features().ChangeNotify; do != nil {
do(ctx, notifyFunc, pollIntervalChan)
}
}
// UserInfo returns info about the connected user
func (f *Fs) UserInfo(ctx context.Context) (map[string]string, error) {
if do := f.Fs.Features().UserInfo; do != nil {
return do(ctx)
}
return nil, fs.ErrorNotImplemented
}
// Disconnect the current user
func (f *Fs) Disconnect(ctx context.Context) error {
if do := f.Fs.Features().Disconnect; do != nil {
return do(ctx)
}
return fs.ErrorNotImplemented
}
// MergeDirs merges the contents of all the directories passed
// in into the first one and rmdirs the other directories.
func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
if do := f.Fs.Features().MergeDirs; do != nil {
return do(ctx, dirs)
}
return errors.New("MergeDirs not supported")
}
// DirCacheFlush resets the directory cache - used in testing
// as an optional interface
func (f *Fs) DirCacheFlush() {
if do := f.Fs.Features().DirCacheFlush; do != nil {
do()
}
}
// PublicLink generates a public link to the remote path (usually readable by anyone)
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (string, error) {
if do := f.Fs.Features().PublicLink; do != nil {
return do(ctx, remote, expire, unlink)
}
return "", errors.New("PublicLink not supported")
}
// Copy src to this remote using server-side copy operations.
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
do := f.Fs.Features().Copy
if do == nil {
return nil, fs.ErrorCantCopy
}
o, ok := src.(*Object)
if !ok {
return nil, fs.ErrorCantCopy
}
oResult, err := do(ctx, o.Object, remote)
return f.wrapObject(oResult, err), err
}
// Move src to this remote using server-side move operations.
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
do := f.Fs.Features().Move
if do == nil {
return nil, fs.ErrorCantMove
}
o, ok := src.(*Object)
if !ok {
return nil, fs.ErrorCantMove
}
oResult, err := do(ctx, o.Object, remote)
if err != nil {
return nil, err
}
_ = f.db.Do(true, &kvMove{
src: path.Join(f.Fs.Root(), src.Remote()),
dst: path.Join(f.Fs.Root(), remote),
dir: false,
fs: f,
})
return f.wrapObject(oResult, nil), nil
}
// DirMove moves src, srcRemote to this remote at dstRemote using server-side move operations.
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
do := f.Fs.Features().DirMove
if do == nil {
return fs.ErrorCantDirMove
}
srcFs, ok := src.(*Fs)
if !ok {
return fs.ErrorCantDirMove
}
err := do(ctx, srcFs.Fs, srcRemote, dstRemote)
if err == nil {
_ = f.db.Do(true, &kvMove{
src: path.Join(srcFs.Fs.Root(), srcRemote),
dst: path.Join(f.Fs.Root(), dstRemote),
dir: true,
fs: f,
})
}
return err
}
// Shutdown the backend, closing any background tasks and any cached connections.
func (f *Fs) Shutdown(ctx context.Context) (err error) {
err = f.db.Stop(false)
if do := f.Fs.Features().Shutdown; do != nil {
if err2 := do(ctx); err2 != nil {
err = err2
}
}
return
}
// NewObject finds the Object at remote.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
o, err := f.Fs.NewObject(ctx, remote)
return f.wrapObject(o, err), err
}
//
// Object
//
// Object represents a composite file wrapping one or more data chunks
type Object struct {
fs.Object
f *Fs
}
// Wrap base object into hasher object
func (f *Fs) wrapObject(o fs.Object, err error) *Object {
if err != nil || o == nil {
return nil
}
return &Object{Object: o, f: f}
}
// Fs returns read only access to the Fs that this object is part of
func (o *Object) Fs() fs.Info { return o.f }
// UnWrap returns the wrapped Object
func (o *Object) UnWrap() fs.Object { return o.Object }
// Return a string version
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.Object.String()
}
// ID returns the ID of the Object if possible
func (o *Object) ID() string {
if doer, ok := o.Object.(fs.IDer); ok {
return doer.ID()
}
return ""
}
// GetTier returns the Tier of the Object if possible
func (o *Object) GetTier() string {
if doer, ok := o.Object.(fs.GetTierer); ok {
return doer.GetTier()
}
return ""
}
// SetTier set the Tier of the Object if possible
func (o *Object) SetTier(tier string) error {
if doer, ok := o.Object.(fs.SetTierer); ok {
return doer.SetTier(tier)
}
return errors.New("SetTier not supported")
}
// MimeType of an Object if known, "" otherwise
func (o *Object) MimeType(ctx context.Context) string {
if doer, ok := o.Object.(fs.MimeTyper); ok {
return doer.MimeType(ctx)
}
return ""
}
// Check the interfaces are satisfied
var (
_ fs.Fs = (*Fs)(nil)
_ fs.Purger = (*Fs)(nil)
_ fs.Copier = (*Fs)(nil)
_ fs.Mover = (*Fs)(nil)
_ fs.DirMover = (*Fs)(nil)
_ fs.Commander = (*Fs)(nil)
_ fs.PutUncheckeder = (*Fs)(nil)
_ fs.PutStreamer = (*Fs)(nil)
_ fs.CleanUpper = (*Fs)(nil)
_ fs.UnWrapper = (*Fs)(nil)
_ fs.ListRer = (*Fs)(nil)
_ fs.Abouter = (*Fs)(nil)
_ fs.Wrapper = (*Fs)(nil)
_ fs.MergeDirser = (*Fs)(nil)
_ fs.DirCacheFlusher = (*Fs)(nil)
_ fs.ChangeNotifier = (*Fs)(nil)
_ fs.PublicLinker = (*Fs)(nil)
_ fs.UserInfoer = (*Fs)(nil)
_ fs.Disconnecter = (*Fs)(nil)
_ fs.Shutdowner = (*Fs)(nil)
_ fs.Object = (*Object)(nil)
_ fs.ObjectUnWrapper = (*Object)(nil)
_ fs.IDer = (*Object)(nil)
_ fs.SetTierer = (*Object)(nil)
_ fs.GetTierer = (*Object)(nil)
_ fs.MimeTyper = (*Object)(nil)
)

View File

@@ -0,0 +1,78 @@
package hasher
import (
"context"
"fmt"
"os"
"testing"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
"github.com/rclone/rclone/lib/kv"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func putFile(ctx context.Context, t *testing.T, f fs.Fs, name, data string) fs.Object {
mtime1 := fstest.Time("2001-02-03T04:05:06.499999999Z")
item := fstest.Item{Path: name, ModTime: mtime1}
_, o := fstests.PutTestContents(ctx, t, f, &item, data, true)
require.NotNil(t, o)
return o
}
func (f *Fs) testUploadFromCrypt(t *testing.T) {
// make a temporary local remote
tempRoot, err := fstest.LocalRemote()
require.NoError(t, err)
defer func() {
_ = os.RemoveAll(tempRoot)
}()
// make a temporary crypt remote
ctx := context.Background()
pass := obscure.MustObscure("crypt")
remote := fmt.Sprintf(":crypt,remote=%s,password=%s:", tempRoot, pass)
cryptFs, err := fs.NewFs(ctx, remote)
require.NoError(t, err)
// make a test file on the crypt remote
const dirName = "from_crypt_1"
const fileName = dirName + "/file_from_crypt_1"
const longTime = fs.ModTimeNotSupported
src := putFile(ctx, t, cryptFs, fileName, "doggy froggy")
// ensure that hash does not exist yet
_ = f.pruneHash(fileName)
hashType := f.keepHashes.GetOne()
hash, err := f.getRawHash(ctx, hashType, fileName, anyFingerprint, longTime)
assert.Error(t, err)
assert.Empty(t, hash)
// upload file to hasher
in, err := src.Open(ctx)
require.NoError(t, err)
dst, err := f.Put(ctx, in, src)
require.NoError(t, err)
assert.NotNil(t, dst)
// check that hash was created
hash, err = f.getRawHash(ctx, hashType, fileName, anyFingerprint, longTime)
assert.NoError(t, err)
assert.NotEmpty(t, hash)
//t.Logf("hash is %q", hash)
_ = operations.Purge(ctx, f, dirName)
}
// InternalTest dispatches all internal tests
func (f *Fs) InternalTest(t *testing.T) {
if !kv.Supported() {
t.Skip("hasher is not supported on this OS")
}
t.Run("UploadFromCrypt", f.testUploadFromCrypt)
}
var _ fstests.InternalTester = (*Fs)(nil)

View File

@@ -0,0 +1,39 @@
package hasher_test
import (
"os"
"path/filepath"
"testing"
"github.com/rclone/rclone/backend/hasher"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
"github.com/rclone/rclone/lib/kv"
_ "github.com/rclone/rclone/backend/all" // for integration tests
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
if !kv.Supported() {
t.Skip("hasher is not supported on this OS")
}
opt := fstests.Opt{
RemoteName: *fstest.RemoteName,
NilObject: (*hasher.Object)(nil),
UnimplementableFsMethods: []string{
"OpenWriterAt",
},
UnimplementableObjectMethods: []string{},
QuickTestOK: true,
}
if *fstest.RemoteName == "" {
tempDir := filepath.Join(os.TempDir(), "rclone-hasher-test")
opt.ExtraConfig = []fstests.ExtraConfigItem{
{Name: "TestHasher", Key: "type", Value: "hasher"},
{Name: "TestHasher", Key: "remote", Value: tempDir},
}
opt.RemoteName = "TestHasher:"
}
fstests.Run(t, &opt)
}

315
backend/hasher/kv.go Normal file
View File

@@ -0,0 +1,315 @@
package hasher
import (
"bytes"
"context"
"encoding/gob"
"errors"
"fmt"
"strings"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/lib/kv"
)
const (
timeFormat = "2006-01-02T15:04:05.000000000-0700"
anyFingerprint = "*"
)
type hashMap map[hash.Type]string
type hashRecord struct {
Fp string // fingerprint
Hashes operations.HashSums
Created time.Time
}
func (r *hashRecord) encode(key string) ([]byte, error) {
var buf bytes.Buffer
if err := gob.NewEncoder(&buf).Encode(r); err != nil {
fs.Debugf(key, "hasher encoding %v: %v", r, err)
return nil, err
}
return buf.Bytes(), nil
}
func (r *hashRecord) decode(key string, data []byte) error {
if err := gob.NewDecoder(bytes.NewBuffer(data)).Decode(r); err != nil {
fs.Debugf(key, "hasher decoding %q failed: %v", data, err)
return err
}
return nil
}
// kvPrune: prune a single hash
type kvPrune struct {
key string
}
func (op *kvPrune) Do(ctx context.Context, b kv.Bucket) error {
return b.Delete([]byte(op.key))
}
// kvPurge: delete a subtree
type kvPurge struct {
dir string
}
func (op *kvPurge) Do(ctx context.Context, b kv.Bucket) error {
dir := op.dir
if !strings.HasSuffix(dir, "/") {
dir += "/"
}
var items []string
cur := b.Cursor()
bkey, _ := cur.Seek([]byte(dir))
for bkey != nil {
key := string(bkey)
if !strings.HasPrefix(key, dir) {
break
}
items = append(items, key[len(dir):])
bkey, _ = cur.Next()
}
nerr := 0
for _, sub := range items {
if err := b.Delete([]byte(dir + sub)); err != nil {
nerr++
}
}
fs.Debugf(dir, "%d hashes purged, %d failed", len(items)-nerr, nerr)
return nil
}
// kvMove: assign hashes to new path
type kvMove struct {
src string
dst string
dir bool
fs *Fs
}
func (op *kvMove) Do(ctx context.Context, b kv.Bucket) error {
src, dst := op.src, op.dst
if !op.dir {
err := moveHash(b, src, dst)
fs.Debugf(op.fs, "moving cached hash %s to %s (err: %v)", src, dst, err)
return err
}
if !strings.HasSuffix(src, "/") {
src += "/"
}
if !strings.HasSuffix(dst, "/") {
dst += "/"
}
var items []string
cur := b.Cursor()
bkey, _ := cur.Seek([]byte(src))
for bkey != nil {
key := string(bkey)
if !strings.HasPrefix(key, src) {
break
}
items = append(items, key[len(src):])
bkey, _ = cur.Next()
}
nerr := 0
for _, suffix := range items {
srcKey, dstKey := src+suffix, dst+suffix
err := moveHash(b, srcKey, dstKey)
fs.Debugf(op.fs, "Rename cache record %s -> %s (err: %v)", srcKey, dstKey, err)
if err != nil {
nerr++
}
}
fs.Debugf(op.fs, "%d hashes moved, %d failed", len(items)-nerr, nerr)
return nil
}
func moveHash(b kv.Bucket, src, dst string) error {
data := b.Get([]byte(src))
err := b.Delete([]byte(src))
if err != nil || len(data) == 0 {
return err
}
return b.Put([]byte(dst), data)
}
// kvGet: get single hash from database
type kvGet struct {
key string
fp string
hash string
val string
age time.Duration
}
func (op *kvGet) Do(ctx context.Context, b kv.Bucket) error {
data := b.Get([]byte(op.key))
if len(data) == 0 {
return errors.New("no record")
}
var r hashRecord
if err := r.decode(op.key, data); err != nil {
return errors.New("invalid record")
}
if !(r.Fp == anyFingerprint || op.fp == anyFingerprint || r.Fp == op.fp) {
return errors.New("fingerprint changed")
}
if time.Since(r.Created) > op.age {
return errors.New("record timed out")
}
if r.Hashes != nil {
op.val = r.Hashes[op.hash]
}
return nil
}
// kvPut: set hashes for an object by key
type kvPut struct {
key string
fp string
hashes operations.HashSums
age time.Duration
}
func (op *kvPut) Do(ctx context.Context, b kv.Bucket) (err error) {
data := b.Get([]byte(op.key))
var r hashRecord
if len(data) > 0 {
err = r.decode(op.key, data)
if err != nil || r.Fp != op.fp || time.Since(r.Created) > op.age {
r.Hashes = nil
}
}
if len(r.Hashes) == 0 {
r.Created = time.Now()
r.Hashes = operations.HashSums{}
r.Fp = op.fp
}
for hashType, hashVal := range op.hashes {
r.Hashes[hashType] = hashVal
}
if data, err = r.encode(op.key); err != nil {
return fmt.Errorf("marshal failed: %w", err)
}
if err = b.Put([]byte(op.key), data); err != nil {
return fmt.Errorf("put failed: %w", err)
}
return err
}
// kvDump: dump the database.
// Note: long dump can cause concurrent operations to fail.
type kvDump struct {
full bool
root string
path string
fs *Fs
num int
total int
}
func (op *kvDump) Do(ctx context.Context, b kv.Bucket) error {
f, baseRoot, dbPath := op.fs, op.root, op.path
if op.full {
total := 0
num := 0
_ = b.ForEach(func(bkey, data []byte) error {
total++
key := string(bkey)
include := (baseRoot == "" || key == baseRoot || strings.HasPrefix(key, baseRoot+"/"))
var r hashRecord
if err := r.decode(key, data); err != nil {
fs.Errorf(nil, "%s: invalid record: %v", key, err)
return nil
}
fmt.Println(f.dumpLine(&r, key, include, nil))
if include {
num++
}
return nil
})
fs.Infof(dbPath, "%d records out of %d", num, total)
op.num, op.total = num, total // for unit tests
return nil
}
num := 0
cur := b.Cursor()
var bkey, data []byte
if baseRoot != "" {
bkey, data = cur.Seek([]byte(baseRoot))
} else {
bkey, data = cur.First()
}
for bkey != nil {
key := string(bkey)
if !(baseRoot == "" || key == baseRoot || strings.HasPrefix(key, baseRoot+"/")) {
break
}
var r hashRecord
if err := r.decode(key, data); err != nil {
fs.Errorf(nil, "%s: invalid record: %v", key, err)
continue
}
if key = strings.TrimPrefix(key[len(baseRoot):], "/"); key == "" {
key = "/"
}
fmt.Println(f.dumpLine(&r, key, true, nil))
num++
bkey, data = cur.Next()
}
fs.Infof(dbPath, "%d records", num)
op.num = num // for unit tests
return nil
}
func (f *Fs) dumpLine(r *hashRecord, path string, include bool, err error) string {
var status string
switch {
case !include:
status = "ext"
case err != nil:
status = "bad"
case r.Fp == anyFingerprint:
status = "stk"
default:
status = "ok "
}
var hashes []string
for _, hashType := range f.keepHashes.Array() {
hashName := hashType.String()
hashVal := r.Hashes[hashName]
if hashVal == "" || err != nil {
hashVal = "-"
}
hashVal = fmt.Sprintf("%-*s", hash.Width(hashType, false), hashVal)
hashes = append(hashes, hashName+":"+hashVal)
}
hashesStr := strings.Join(hashes, " ")
age := time.Since(r.Created).Round(time.Second)
if age > 24*time.Hour {
age = age.Round(time.Hour)
}
if err != nil {
age = 0
}
ageStr := age.String()
if strings.HasSuffix(ageStr, "h0m0s") {
ageStr = strings.TrimSuffix(ageStr, "0m0s")
}
return fmt.Sprintf("%s %s %9s %s", status, hashesStr, ageStr, path)
}

305
backend/hasher/object.go Normal file
View File

@@ -0,0 +1,305 @@
package hasher
import (
"context"
"errors"
"fmt"
"io"
"io/ioutil"
"path"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/operations"
)
// obtain hash for an object
func (o *Object) getHash(ctx context.Context, hashType hash.Type) (string, error) {
maxAge := time.Duration(o.f.opt.MaxAge)
if maxAge <= 0 {
return "", nil
}
fp := o.fingerprint(ctx)
if fp == "" {
return "", errors.New("fingerprint failed")
}
return o.f.getRawHash(ctx, hashType, o.Remote(), fp, maxAge)
}
// obtain hash for a path
func (f *Fs) getRawHash(ctx context.Context, hashType hash.Type, remote, fp string, age time.Duration) (string, error) {
key := path.Join(f.Fs.Root(), remote)
op := &kvGet{
key: key,
fp: fp,
hash: hashType.String(),
age: age,
}
err := f.db.Do(false, op)
return op.val, err
}
// put new hashes for an object
func (o *Object) putHashes(ctx context.Context, rawHashes hashMap) error {
if o.f.opt.MaxAge <= 0 {
return nil
}
fp := o.fingerprint(ctx)
if fp == "" {
return nil
}
key := path.Join(o.f.Fs.Root(), o.Remote())
hashes := operations.HashSums{}
for hashType, hashVal := range rawHashes {
hashes[hashType.String()] = hashVal
}
return o.f.putRawHashes(ctx, key, fp, hashes)
}
// set hashes for a path without any validation
func (f *Fs) putRawHashes(ctx context.Context, key, fp string, hashes operations.HashSums) error {
return f.db.Do(true, &kvPut{
key: key,
fp: fp,
hashes: hashes,
age: time.Duration(f.opt.MaxAge),
})
}
// Hash returns the selected checksum of the file or "" if unavailable.
func (o *Object) Hash(ctx context.Context, hashType hash.Type) (hashVal string, err error) {
f := o.f
if f.passHashes.Contains(hashType) {
fs.Debugf(o, "pass %s", hashType)
return o.Object.Hash(ctx, hashType)
}
if !f.suppHashes.Contains(hashType) {
fs.Debugf(o, "unsupp %s", hashType)
return "", hash.ErrUnsupported
}
if hashVal, err = o.getHash(ctx, hashType); err != nil {
fs.Debugf(o, "getHash: %v", err)
err = nil
hashVal = ""
}
if hashVal != "" {
fs.Debugf(o, "cached %s = %q", hashType, hashVal)
return hashVal, nil
}
if f.slowHashes.Contains(hashType) {
fs.Debugf(o, "slow %s", hashType)
hashVal, err = o.Object.Hash(ctx, hashType)
if err == nil && hashVal != "" && f.keepHashes.Contains(hashType) {
if err = o.putHashes(ctx, hashMap{hashType: hashVal}); err != nil {
fs.Debugf(o, "putHashes: %v", err)
err = nil
}
}
return hashVal, err
}
if f.autoHashes.Contains(hashType) && o.Size() < int64(f.opt.AutoSize) {
_ = o.updateHashes(ctx)
if hashVal, err = o.getHash(ctx, hashType); err != nil {
fs.Debugf(o, "auto %s = %q (%v)", hashType, hashVal, err)
err = nil
}
}
return hashVal, err
}
// updateHashes performs implicit "rclone hashsum --download" and updates cache.
func (o *Object) updateHashes(ctx context.Context) error {
r, err := o.Open(ctx)
if err != nil {
fs.Infof(o, "update failed (open): %v", err)
return err
}
defer func() {
_ = r.Close()
}()
if _, err = io.Copy(ioutil.Discard, r); err != nil {
fs.Infof(o, "update failed (copy): %v", err)
return err
}
return nil
}
// Update the object with the given data, time and size.
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
_ = o.f.pruneHash(src.Remote())
return o.Object.Update(ctx, in, src, options...)
}
// Remove an object.
func (o *Object) Remove(ctx context.Context) error {
_ = o.f.pruneHash(o.Remote())
return o.Object.Remove(ctx)
}
// SetModTime sets the modification time of the file.
// Also prunes the cache entry when modtime changes so that
// touching a file will trigger checksum recalculation even
// on backends that don't provide modTime with fingerprint.
func (o *Object) SetModTime(ctx context.Context, mtime time.Time) error {
if mtime != o.Object.ModTime(ctx) {
_ = o.f.pruneHash(o.Remote())
}
return o.Object.SetModTime(ctx, mtime)
}
// Open opens the file for read.
// Full reads will also update object hashes.
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (r io.ReadCloser, err error) {
size := o.Size()
var offset, limit int64 = 0, -1
for _, option := range options {
switch opt := option.(type) {
case *fs.SeekOption:
offset = opt.Offset
case *fs.RangeOption:
offset, limit = opt.Decode(size)
}
}
if offset < 0 {
return nil, errors.New("invalid offset")
}
if limit < 0 {
limit = size - offset
}
if r, err = o.Object.Open(ctx, options...); err != nil {
return nil, err
}
if offset != 0 || limit < size {
// It's a partial read
return r, err
}
return o.f.newHashingReader(ctx, r, func(sums hashMap) {
if err := o.putHashes(ctx, sums); err != nil {
fs.Infof(o, "auto hashing error: %v", err)
}
})
}
// Put data into the remote path with given modTime and size
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
var (
o *Object
common hash.Set
rehash bool
hashes hashMap
)
if fsrc := src.Fs(); fsrc != nil {
common = fsrc.Hashes().Overlap(f.keepHashes)
// Rehash if source does not have all required hashes or hashing is slow
rehash = fsrc.Features().SlowHash || common != f.keepHashes
}
wrapIn := in
if rehash {
r, err := f.newHashingReader(ctx, in, func(sums hashMap) {
hashes = sums
})
fs.Debugf(src, "Rehash in-fly due to incomplete or slow source set %v (err: %v)", common, err)
if err == nil {
wrapIn = r
} else {
rehash = false
}
}
_ = f.pruneHash(src.Remote())
oResult, err := f.Fs.Put(ctx, wrapIn, src, options...)
o = f.wrapObject(oResult, err)
if o == nil {
return nil, err
}
if !rehash {
hashes = hashMap{}
for _, ht := range common.Array() {
if h, e := src.Hash(ctx, ht); e == nil && h != "" {
hashes[ht] = h
}
}
}
if len(hashes) > 0 {
err := o.putHashes(ctx, hashes)
fs.Debugf(o, "Applied %d source hashes, err: %v", len(hashes), err)
}
return o, err
}
type hashingReader struct {
rd io.Reader
hasher *hash.MultiHasher
fun func(hashMap)
}
func (f *Fs) newHashingReader(ctx context.Context, rd io.Reader, fun func(hashMap)) (*hashingReader, error) {
hasher, err := hash.NewMultiHasherTypes(f.keepHashes)
if err != nil {
return nil, err
}
hr := &hashingReader{
rd: rd,
hasher: hasher,
fun: fun,
}
return hr, nil
}
func (r *hashingReader) Read(p []byte) (n int, err error) {
n, err = r.rd.Read(p)
if err != nil && err != io.EOF {
r.hasher = nil
}
if r.hasher != nil {
if _, errHash := r.hasher.Write(p[:n]); errHash != nil {
r.hasher = nil
err = errHash
}
}
if err == io.EOF && r.hasher != nil {
r.fun(r.hasher.Sums())
r.hasher = nil
}
return
}
func (r *hashingReader) Close() error {
if rc, ok := r.rd.(io.ReadCloser); ok {
return rc.Close()
}
return nil
}
// Return object fingerprint or empty string in case of errors
//
// Note that we can't use the generic `fs.Fingerprint` here because
// this fingerprint is used to pick _derived hashes_ that are slow
// to calculate or completely unsupported by the base remote.
//
// The hasher fingerprint must be based on `fsHash`, the first _fast_
// hash supported _by the underlying remote_ (if there is one),
// while `fs.Fingerprint` would select a hash _produced by hasher_
// creating unresolvable fingerprint loop.
func (o *Object) fingerprint(ctx context.Context) string {
size := o.Object.Size()
timeStr := "-"
if o.f.fpTime {
timeStr = o.Object.ModTime(ctx).UTC().Format(timeFormat)
if timeStr == "" {
return ""
}
}
hashStr := "-"
if o.f.fpHash != hash.None {
var err error
hashStr, err = o.Object.Hash(ctx, o.f.fpHash)
if hashStr == "" || err != nil {
return ""
}
}
return fmt.Sprintf("%d,%s,%s", size, timeStr, hashStr)
}

View File

@@ -1,3 +1,4 @@
//go:build !plan9
// +build !plan9 // +build !plan9
package hdfs package hdfs
@@ -262,6 +263,98 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
return f.client.RemoveAll(realpath) return f.client.RemoveAll(realpath)
} }
// Move src to this remote using server-side move operations.
//
// This is stored with the remote path given
//
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantMove
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object)
if !ok {
fs.Debugf(src, "Can't move - not same remote type")
return nil, fs.ErrorCantMove
}
// Get the real paths from the remote specs:
sourcePath := srcObj.fs.realpath(srcObj.remote)
targetPath := f.realpath(remote)
fs.Debugf(f, "rename [%s] to [%s]", sourcePath, targetPath)
// Make sure the target folder exists:
dirname := path.Dir(targetPath)
err := f.client.MkdirAll(dirname, 0755)
if err != nil {
return nil, err
}
// Do the move
// Note that the underlying HDFS library hard-codes Overwrite=True, but this is expected rclone behaviour.
err = f.client.Rename(sourcePath, targetPath)
if err != nil {
return nil, err
}
// Look up the resulting object
info, err := f.client.Stat(targetPath)
if err != nil {
return nil, err
}
// And return it:
return &Object{
fs: f,
remote: remote,
size: info.Size(),
modTime: info.ModTime(),
}, nil
}
// DirMove moves src, srcRemote to this remote at dstRemote
// using server-side move operations.
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantDirMove
//
// If destination exists then return fs.ErrorDirExists
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) (err error) {
srcFs, ok := src.(*Fs)
if !ok {
return fs.ErrorCantDirMove
}
// Get the real paths from the remote specs:
sourcePath := srcFs.realpath(srcRemote)
targetPath := f.realpath(dstRemote)
fs.Debugf(f, "rename [%s] to [%s]", sourcePath, targetPath)
// Check if the destination exists:
info, err := f.client.Stat(targetPath)
if err == nil {
fs.Debugf(f, "target directory already exits, IsDir = [%t]", info.IsDir())
return fs.ErrorDirExists
}
// Make sure the targets parent folder exists:
dirname := path.Dir(targetPath)
err = f.client.MkdirAll(dirname, 0755)
if err != nil {
return err
}
// Do the move
err = f.client.Rename(sourcePath, targetPath)
if err != nil {
return err
}
return nil
}
// About gets quota information from the Fs // About gets quota information from the Fs
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) { func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
info, err := f.client.StatFs() info, err := f.client.StatFs()
@@ -317,4 +410,6 @@ var (
_ fs.Purger = (*Fs)(nil) _ fs.Purger = (*Fs)(nil)
_ fs.PutStreamer = (*Fs)(nil) _ fs.PutStreamer = (*Fs)(nil)
_ fs.Abouter = (*Fs)(nil) _ fs.Abouter = (*Fs)(nil)
_ fs.Mover = (*Fs)(nil)
_ fs.DirMover = (*Fs)(nil)
) )

View File

@@ -1,3 +1,4 @@
//go:build !plan9
// +build !plan9 // +build !plan9
package hdfs package hdfs
@@ -18,35 +19,28 @@ func init() {
NewFs: NewFs, NewFs: NewFs,
Options: []fs.Option{{ Options: []fs.Option{{
Name: "namenode", Name: "namenode",
Help: "hadoop name node and port", Help: "Hadoop name node and port.\n\nE.g. \"namenode:8020\" to connect to host namenode at port 8020.",
Required: true, Required: true,
Examples: []fs.OptionExample{{
Value: "namenode:8020",
Help: "Connect to host namenode at port 8020",
}},
}, { }, {
Name: "username", Name: "username",
Help: "hadoop user name", Help: "Hadoop user name.",
Required: false, Required: false,
Examples: []fs.OptionExample{{ Examples: []fs.OptionExample{{
Value: "root", Value: "root",
Help: "Connect to hdfs as root", Help: "Connect to hdfs as root.",
}}, }},
}, { }, {
Name: "service_principal_name", Name: "service_principal_name",
Help: `Kerberos service principal name for the namenode Help: `Kerberos service principal name for the namenode.
Enables KERBEROS authentication. Specifies the Service Principal Name Enables KERBEROS authentication. Specifies the Service Principal Name
(<SERVICE>/<FQDN>) for the namenode.`, (SERVICE/FQDN) for the namenode. E.g. \"hdfs/namenode.hadoop.docker\"
for namenode running as service 'hdfs' with FQDN 'namenode.hadoop.docker'.`,
Required: false, Required: false,
Examples: []fs.OptionExample{{
Value: "hdfs/namenode.hadoop.docker",
Help: "Namenode running as service 'hdfs' with FQDN 'namenode.hadoop.docker'.",
}},
Advanced: true, Advanced: true,
}, { }, {
Name: "data_transfer_protection", Name: "data_transfer_protection",
Help: `Kerberos data transfer protection: authentication|integrity|privacy Help: `Kerberos data transfer protection: authentication|integrity|privacy.
Specifies whether or not authentication, data signature integrity Specifies whether or not authentication, data signature integrity
checks, and wire encryption is required when communicating the the checks, and wire encryption is required when communicating the the

View File

@@ -1,5 +1,6 @@
// Test HDFS filesystem interface // Test HDFS filesystem interface
//go:build !plan9
// +build !plan9 // +build !plan9
package hdfs_test package hdfs_test

View File

@@ -1,6 +1,7 @@
// Build for hdfs for unsupported platforms to stop go complaining // Build for hdfs for unsupported platforms to stop go complaining
// about "no buildable Go source files " // about "no buildable Go source files "
//go:build plan9
// +build plan9 // +build plan9
package hdfs package hdfs

View File

@@ -1,3 +1,4 @@
//go:build !plan9
// +build !plan9 // +build !plan9
package hdfs package hdfs

View File

@@ -6,6 +6,8 @@ package http
import ( import (
"context" "context"
"errors"
"fmt"
"io" "io"
"mime" "mime"
"net/http" "net/http"
@@ -16,7 +18,6 @@ import (
"sync" "sync"
"time" "time"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct" "github.com/rclone/rclone/fs/config/configstruct"
@@ -38,25 +39,18 @@ func init() {
NewFs: NewFs, NewFs: NewFs,
Options: []fs.Option{{ Options: []fs.Option{{
Name: "url", Name: "url",
Help: "URL of http host to connect to", Help: "URL of http host to connect to.\n\nE.g. \"https://example.com\", or \"https://user:pass@example.com\" to use a username and password.",
Required: true, Required: true,
Examples: []fs.OptionExample{{
Value: "https://example.com",
Help: "Connect to example.com",
}, {
Value: "https://user:pass@example.com",
Help: "Connect to example.com using a username and password",
}},
}, { }, {
Name: "headers", Name: "headers",
Help: `Set HTTP headers for all transactions Help: `Set HTTP headers for all transactions.
Use this to set additional HTTP headers for all transactions Use this to set additional HTTP headers for all transactions.
The input format is comma separated list of key,value pairs. Standard The input format is comma separated list of key,value pairs. Standard
[CSV encoding](https://godoc.org/encoding/csv) may be used. [CSV encoding](https://godoc.org/encoding/csv) may be used.
For example to set a Cookie use 'Cookie,name=value', or '"Cookie","name=value"'. For example, to set a Cookie use 'Cookie,name=value', or '"Cookie","name=value"'.
You can set multiple headers, e.g. '"Cookie","name=value","Authorization","xxx"'. You can set multiple headers, e.g. '"Cookie","name=value","Authorization","xxx"'.
`, `,
@@ -64,7 +58,7 @@ You can set multiple headers, e.g. '"Cookie","name=value","Authorization","xxx"'
Advanced: true, Advanced: true,
}, { }, {
Name: "no_slash", Name: "no_slash",
Help: `Set this if the site doesn't end directories with / Help: `Set this if the site doesn't end directories with /.
Use this if your target website does not use / on the end of Use this if your target website does not use / on the end of
directories. directories.
@@ -80,7 +74,7 @@ directories.`,
Advanced: true, Advanced: true,
}, { }, {
Name: "no_head", Name: "no_head",
Help: `Don't use HEAD requests to find file sizes in dir listing Help: `Don't use HEAD requests to find file sizes in dir listing.
If your site is being very slow to load then you can try this option. If your site is being very slow to load then you can try this option.
Normally rclone does a HEAD request for each potential file in a Normally rclone does a HEAD request for each potential file in a
@@ -139,7 +133,7 @@ func statusError(res *http.Response, err error) error {
} }
if res.StatusCode < 200 || res.StatusCode > 299 { if res.StatusCode < 200 || res.StatusCode > 299 {
_ = res.Body.Close() _ = res.Body.Close()
return errors.Errorf("HTTP Error %d: %s", res.StatusCode, res.Status) return fmt.Errorf("HTTP Error %d: %s", res.StatusCode, res.Status)
} }
return nil return nil
} }
@@ -384,15 +378,15 @@ func (f *Fs) readDir(ctx context.Context, dir string) (names []string, err error
URL := f.url(dir) URL := f.url(dir)
u, err := url.Parse(URL) u, err := url.Parse(URL)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "failed to readDir") return nil, fmt.Errorf("failed to readDir: %w", err)
} }
if !strings.HasSuffix(URL, "/") { if !strings.HasSuffix(URL, "/") {
return nil, errors.Errorf("internal error: readDir URL %q didn't end in /", URL) return nil, fmt.Errorf("internal error: readDir URL %q didn't end in /", URL)
} }
// Do the request // Do the request
req, err := http.NewRequestWithContext(ctx, "GET", URL, nil) req, err := http.NewRequestWithContext(ctx, "GET", URL, nil)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "readDir failed") return nil, fmt.Errorf("readDir failed: %w", err)
} }
f.addHeaders(req) f.addHeaders(req)
res, err := f.httpClient.Do(req) res, err := f.httpClient.Do(req)
@@ -404,7 +398,7 @@ func (f *Fs) readDir(ctx context.Context, dir string) (names []string, err error
} }
err = statusError(res, err) err = statusError(res, err)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "failed to readDir") return nil, fmt.Errorf("failed to readDir: %w", err)
} }
contentType := strings.SplitN(res.Header.Get("Content-Type"), ";", 2)[0] contentType := strings.SplitN(res.Header.Get("Content-Type"), ";", 2)[0]
@@ -412,10 +406,10 @@ func (f *Fs) readDir(ctx context.Context, dir string) (names []string, err error
case "text/html": case "text/html":
names, err = parse(u, res.Body) names, err = parse(u, res.Body)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "readDir") return nil, fmt.Errorf("readDir: %w", err)
} }
default: default:
return nil, errors.Errorf("Can't parse content type %q", contentType) return nil, fmt.Errorf("Can't parse content type %q", contentType)
} }
return names, nil return names, nil
} }
@@ -435,7 +429,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
} }
names, err := f.readDir(ctx, dir) names, err := f.readDir(ctx, dir)
if err != nil { if err != nil {
return nil, errors.Wrapf(err, "error listing %q", dir) return nil, fmt.Errorf("error listing %q: %w", dir, err)
} }
var ( var (
entriesMu sync.Mutex // to protect entries entriesMu sync.Mutex // to protect entries
@@ -547,7 +541,7 @@ func (o *Object) stat(ctx context.Context) error {
url := o.url() url := o.url()
req, err := http.NewRequestWithContext(ctx, "HEAD", url, nil) req, err := http.NewRequestWithContext(ctx, "HEAD", url, nil)
if err != nil { if err != nil {
return errors.Wrap(err, "stat failed") return fmt.Errorf("stat failed: %w", err)
} }
o.fs.addHeaders(req) o.fs.addHeaders(req)
res, err := o.fs.httpClient.Do(req) res, err := o.fs.httpClient.Do(req)
@@ -556,7 +550,7 @@ func (o *Object) stat(ctx context.Context) error {
} }
err = statusError(res, err) err = statusError(res, err)
if err != nil { if err != nil {
return errors.Wrap(err, "failed to stat") return fmt.Errorf("failed to stat: %w", err)
} }
t, err := http.ParseTime(res.Header.Get("Last-Modified")) t, err := http.ParseTime(res.Header.Get("Last-Modified"))
if err != nil { if err != nil {
@@ -569,7 +563,7 @@ func (o *Object) stat(ctx context.Context) error {
if o.fs.opt.NoSlash { if o.fs.opt.NoSlash {
mediaType, _, err := mime.ParseMediaType(o.contentType) mediaType, _, err := mime.ParseMediaType(o.contentType)
if err != nil { if err != nil {
return errors.Wrapf(err, "failed to parse Content-Type: %q", o.contentType) return fmt.Errorf("failed to parse Content-Type: %q: %w", o.contentType, err)
} }
if mediaType == "text/html" { if mediaType == "text/html" {
return fs.ErrorNotAFile return fs.ErrorNotAFile
@@ -595,7 +589,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
url := o.url() url := o.url()
req, err := http.NewRequestWithContext(ctx, "GET", url, nil) req, err := http.NewRequestWithContext(ctx, "GET", url, nil)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "Open failed") return nil, fmt.Errorf("Open failed: %w", err)
} }
// Add optional headers // Add optional headers
@@ -608,7 +602,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
res, err := o.fs.httpClient.Do(req) res, err := o.fs.httpClient.Do(req)
err = statusError(res, err) err = statusError(res, err)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "Open failed") return nil, fmt.Errorf("Open failed: %w", err)
} }
return res.Body, nil return res.Body, nil
} }

View File

@@ -9,6 +9,7 @@ package hubic
import ( import (
"context" "context"
"encoding/json" "encoding/json"
"errors"
"fmt" "fmt"
"io/ioutil" "io/ioutil"
"net/http" "net/http"
@@ -16,7 +17,6 @@ import (
"time" "time"
swiftLib "github.com/ncw/swift/v2" swiftLib "github.com/ncw/swift/v2"
"github.com/pkg/errors"
"github.com/rclone/rclone/backend/swift" "github.com/rclone/rclone/backend/swift"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/config/configmap"
@@ -120,7 +120,7 @@ func (f *Fs) getCredentials(ctx context.Context) (err error) {
if resp.StatusCode < 200 || resp.StatusCode > 299 { if resp.StatusCode < 200 || resp.StatusCode > 299 {
body, _ := ioutil.ReadAll(resp.Body) body, _ := ioutil.ReadAll(resp.Body)
bodyStr := strings.TrimSpace(strings.Replace(string(body), "\n", " ", -1)) bodyStr := strings.TrimSpace(strings.Replace(string(body), "\n", " ", -1))
return errors.Errorf("failed to get credentials: %s: %s", resp.Status, bodyStr) return fmt.Errorf("failed to get credentials: %s: %s", resp.Status, bodyStr)
} }
decoder := json.NewDecoder(resp.Body) decoder := json.NewDecoder(resp.Body)
var result credentials var result credentials
@@ -146,7 +146,7 @@ func (f *Fs) getCredentials(ctx context.Context) (err error) {
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) { func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
client, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig) client, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "failed to configure Hubic") return nil, fmt.Errorf("failed to configure Hubic: %w", err)
} }
f := &Fs{ f := &Fs{
@@ -163,7 +163,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
} }
err = c.Authenticate(ctx) err = c.Authenticate(ctx)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "error authenticating swift connection") return nil, fmt.Errorf("error authenticating swift connection: %w", err)
} }
// Parse config into swift.Options struct // Parse config into swift.Options struct

View File

@@ -2,10 +2,9 @@ package api
import ( import (
"encoding/xml" "encoding/xml"
"errors"
"fmt" "fmt"
"time" "time"
"github.com/pkg/errors"
) )
const ( const (
@@ -368,6 +367,7 @@ type JottaFile struct {
XMLName xml.Name XMLName xml.Name
Name string `xml:"name,attr"` Name string `xml:"name,attr"`
Deleted Flag `xml:"deleted,attr"` Deleted Flag `xml:"deleted,attr"`
PublicURI string `xml:"publicURI"`
PublicSharePath string `xml:"publicSharePath"` PublicSharePath string `xml:"publicSharePath"`
State string `xml:"currentRevision>state"` State string `xml:"currentRevision>state"`
CreatedAt Time `xml:"currentRevision>created"` CreatedAt Time `xml:"currentRevision>created"`

View File

@@ -7,6 +7,7 @@ import (
"encoding/base64" "encoding/base64"
"encoding/hex" "encoding/hex"
"encoding/json" "encoding/json"
"errors"
"fmt" "fmt"
"io" "io"
"io/ioutil" "io/ioutil"
@@ -19,7 +20,6 @@ import (
"strings" "strings"
"time" "time"
"github.com/pkg/errors"
"github.com/rclone/rclone/backend/jottacloud/api" "github.com/rclone/rclone/backend/jottacloud/api"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting" "github.com/rclone/rclone/fs/accounting"
@@ -69,6 +69,10 @@ const (
teliaCloudTokenURL = "https://cloud-auth.telia.se/auth/realms/telia_se/protocol/openid-connect/token" teliaCloudTokenURL = "https://cloud-auth.telia.se/auth/realms/telia_se/protocol/openid-connect/token"
teliaCloudAuthURL = "https://cloud-auth.telia.se/auth/realms/telia_se/protocol/openid-connect/auth" teliaCloudAuthURL = "https://cloud-auth.telia.se/auth/realms/telia_se/protocol/openid-connect/auth"
teliaCloudClientID = "desktop" teliaCloudClientID = "desktop"
tele2CloudTokenURL = "https://mittcloud-auth.tele2.se/auth/realms/comhem/protocol/openid-connect/token"
tele2CloudAuthURL = "https://mittcloud-auth.tele2.se/auth/realms/comhem/protocol/openid-connect/auth"
tele2CloudClientID = "desktop"
) )
// Register with Fs // Register with Fs
@@ -86,7 +90,7 @@ func init() {
Advanced: true, Advanced: true,
}, { }, {
Name: "trashed_only", Name: "trashed_only",
Help: "Only show files that are in the trash.\nThis will show trashed files in their original directory structure.", Help: "Only show files that are in the trash.\n\nThis will show trashed files in their original directory structure.",
Default: false, Default: false,
Advanced: true, Advanced: true,
}, { }, {
@@ -99,6 +103,11 @@ func init() {
Help: "Files bigger than this can be resumed if the upload fail's.", Help: "Files bigger than this can be resumed if the upload fail's.",
Default: fs.SizeSuffix(10 * 1024 * 1024), Default: fs.SizeSuffix(10 * 1024 * 1024),
Advanced: true, Advanced: true,
}, {
Name: "no_versions",
Help: "Avoid server side versioning by deleting files and recreating files instead of overwriting them.",
Default: false,
Advanced: true,
}, { }, {
Name: config.ConfigEncoding, Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp, Help: config.ConfigEncodingHelp,
@@ -117,15 +126,18 @@ func init() {
func Config(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) { func Config(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
switch config.State { switch config.State {
case "": case "":
return fs.ConfigChooseFixed("auth_type_done", "config_type", `Authentication type`, []fs.OptionExample{{ return fs.ConfigChooseFixed("auth_type_done", "config_type", `Authentication type.`, []fs.OptionExample{{
Value: "standard", Value: "standard",
Help: "Standard authentication - use this if you're a normal Jottacloud user.", Help: "Standard authentication.\nUse this if you're a normal Jottacloud user.",
}, { }, {
Value: "legacy", Value: "legacy",
Help: "Legacy authentication - this is only required for certain whitelabel versions of Jottacloud and not recommended for normal users.", Help: "Legacy authentication.\nThis is only required for certain whitelabel versions of Jottacloud and not recommended for normal users.",
}, { }, {
Value: "telia", Value: "telia",
Help: "Telia Cloud authentication - use this if you are using Telia Cloud.", Help: "Telia Cloud authentication.\nUse this if you are using Telia Cloud.",
}, {
Value: "tele2",
Help: "Tele2 Cloud authentication.\nUse this if you are using Tele2 Cloud.",
}}) }})
case "auth_type_done": case "auth_type_done":
// Jump to next state according to config chosen // Jump to next state according to config chosen
@@ -141,12 +153,12 @@ func Config(ctx context.Context, name string, m configmap.Mapper, config fs.Conf
srv := rest.NewClient(fshttp.NewClient(ctx)) srv := rest.NewClient(fshttp.NewClient(ctx))
token, tokenEndpoint, err := doTokenAuth(ctx, srv, loginToken) token, tokenEndpoint, err := doTokenAuth(ctx, srv, loginToken)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "failed to get oauth token") return nil, fmt.Errorf("failed to get oauth token: %w", err)
} }
m.Set(configTokenURL, tokenEndpoint) m.Set(configTokenURL, tokenEndpoint)
err = oauthutil.PutToken(name, m, &token, true) err = oauthutil.PutToken(name, m, &token, true)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "error while saving token") return nil, fmt.Errorf("error while saving token: %w", err)
} }
return fs.ConfigGoto("choose_device") return fs.ConfigGoto("choose_device")
case "legacy": // configure a jottacloud backend using legacy authentication case "legacy": // configure a jottacloud backend using legacy authentication
@@ -163,7 +175,7 @@ machines.`)
if config.Result == "true" { if config.Result == "true" {
deviceRegistration, err := registerDevice(ctx, srv) deviceRegistration, err := registerDevice(ctx, srv)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "failed to register device") return nil, fmt.Errorf("failed to register device: %w", err)
} }
m.Set(configClientID, deviceRegistration.ClientID) m.Set(configClientID, deviceRegistration.ClientID)
m.Set(configClientSecret, obscure.MustObscure(deviceRegistration.ClientSecret)) m.Set(configClientSecret, obscure.MustObscure(deviceRegistration.ClientSecret))
@@ -211,11 +223,11 @@ machines.`)
m.Set("password", "") m.Set("password", "")
m.Set("auth_code", "") m.Set("auth_code", "")
if err != nil { if err != nil {
return nil, errors.Wrap(err, "failed to get oauth token") return nil, fmt.Errorf("failed to get oauth token: %w", err)
} }
err = oauthutil.PutToken(name, m, &token, true) err = oauthutil.PutToken(name, m, &token, true)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "error while saving token") return nil, fmt.Errorf("error while saving token: %w", err)
} }
return fs.ConfigGoto("choose_device") return fs.ConfigGoto("choose_device")
case "telia": // telia cloud config case "telia": // telia cloud config
@@ -233,6 +245,21 @@ machines.`)
RedirectURL: oauthutil.RedirectLocalhostURL, RedirectURL: oauthutil.RedirectLocalhostURL,
}, },
}) })
case "tele2": // tele2 cloud config
m.Set("configVersion", fmt.Sprint(configVersion))
m.Set(configClientID, tele2CloudClientID)
m.Set(configTokenURL, tele2CloudTokenURL)
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
OAuth2Config: &oauth2.Config{
Endpoint: oauth2.Endpoint{
AuthURL: tele2CloudAuthURL,
TokenURL: tele2CloudTokenURL,
},
ClientID: tele2CloudClientID,
Scopes: []string{"openid", "jotta-default", "offline_access"},
RedirectURL: oauthutil.RedirectLocalhostURL,
},
})
case "choose_device": case "choose_device":
return fs.ConfigConfirm("choose_device_query", false, "config_non_standard", "Use a non standard device/mountpoint e.g. for accessing files uploaded using the official Jottacloud client?") return fs.ConfigConfirm("choose_device_query", false, "config_non_standard", "Use a non standard device/mountpoint e.g. for accessing files uploaded using the official Jottacloud client?")
case "choose_device_query": case "choose_device_query":
@@ -297,6 +324,7 @@ type Options struct {
MD5MemoryThreshold fs.SizeSuffix `config:"md5_memory_limit"` MD5MemoryThreshold fs.SizeSuffix `config:"md5_memory_limit"`
TrashedOnly bool `config:"trashed_only"` TrashedOnly bool `config:"trashed_only"`
HardDelete bool `config:"hard_delete"` HardDelete bool `config:"hard_delete"`
NoVersions bool `config:"no_versions"`
UploadThreshold fs.SizeSuffix `config:"upload_resume_limit"` UploadThreshold fs.SizeSuffix `config:"upload_resume_limit"`
Enc encoder.MultiEncoder `config:"encoding"` Enc encoder.MultiEncoder `config:"encoding"`
} }
@@ -523,7 +551,7 @@ func getCustomerInfo(ctx context.Context, apiSrv *rest.Client) (info *api.Custom
_, err = apiSrv.CallJSON(ctx, &opts, nil, &info) _, err = apiSrv.CallJSON(ctx, &opts, nil, &info)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "couldn't get customer info") return nil, fmt.Errorf("couldn't get customer info: %w", err)
} }
return info, nil return info, nil
@@ -538,7 +566,7 @@ func getDriveInfo(ctx context.Context, srv *rest.Client, username string) (info
_, err = srv.CallXML(ctx, &opts, nil, &info) _, err = srv.CallXML(ctx, &opts, nil, &info)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "couldn't get drive info") return nil, fmt.Errorf("couldn't get drive info: %w", err)
} }
return info, nil return info, nil
@@ -553,7 +581,7 @@ func getDeviceInfo(ctx context.Context, srv *rest.Client, path string) (info *ap
_, err = srv.CallXML(ctx, &opts, nil, &info) _, err = srv.CallXML(ctx, &opts, nil, &info)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "couldn't get device info") return nil, fmt.Errorf("couldn't get device info: %w", err)
} }
return info, nil return info, nil
@@ -591,9 +619,11 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.Jo
} }
if err != nil { if err != nil {
return nil, errors.Wrap(err, "read metadata failed") return nil, fmt.Errorf("read metadata failed: %w", err)
} }
if result.XMLName.Local != "file" { if result.XMLName.Local == "folder" {
return nil, fs.ErrorIsDir
} else if result.XMLName.Local != "file" {
return nil, fs.ErrorNotAFile return nil, fs.ErrorNotAFile
} }
return &result, nil return &result, nil
@@ -712,7 +742,7 @@ func getOAuthClient(ctx context.Context, name string, m configmap.Mapper) (oAuth
// Create OAuth Client // Create OAuth Client
oAuthClient, ts, err = oauthutil.NewClientWithBaseClient(ctx, name, m, oauthConfig, baseClient) oAuthClient, ts, err = oauthutil.NewClientWithBaseClient(ctx, name, m, oauthConfig, baseClient)
if err != nil { if err != nil {
return nil, nil, errors.Wrap(err, "Failed to configure Jottacloud oauth client") return nil, nil, fmt.Errorf("Failed to configure Jottacloud oauth client: %w", err)
} }
return oAuthClient, ts, nil return oAuthClient, ts, nil
} }
@@ -756,7 +786,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
// Renew the token in the background // Renew the token in the background
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error { f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
_, err := f.readMetaDataForPath(ctx, "") _, err := f.readMetaDataForPath(ctx, "")
if err == fs.ErrorNotAFile { if err == fs.ErrorNotAFile || err == fs.ErrorIsDir {
err = nil err = nil
} }
return err return err
@@ -778,7 +808,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
} }
_, err := f.NewObject(context.TODO(), remote) _, err := f.NewObject(context.TODO(), remote)
if err != nil { if err != nil {
if errors.Cause(err) == fs.ErrorObjectNotFound || errors.Cause(err) == fs.ErrorNotAFile { if errors.Is(err, fs.ErrorObjectNotFound) || errors.Is(err, fs.ErrorNotAFile) || errors.Is(err, fs.ErrorIsDir) {
// File doesn't exist so return old f // File doesn't exist so return old f
f.root = root f.root = root
return f, nil return f, nil
@@ -801,8 +831,10 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.Jot
} }
var err error var err error
if info != nil { if info != nil {
// Set info if !f.validFile(info) {
err = o.setMetaData(info) return nil, fs.ErrorObjectNotFound
}
err = o.setMetaData(info) // sets the info
} else { } else {
err = o.readMetaData(ctx, false) // reads info and meta, returning an error err = o.readMetaData(ctx, false) // reads info and meta, returning an error
} }
@@ -871,40 +903,30 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
return nil, fs.ErrorDirNotFound return nil, fs.ErrorDirNotFound
} }
} }
return nil, errors.Wrap(err, "couldn't list files") return nil, fmt.Errorf("couldn't list files: %w", err)
} }
if bool(result.Deleted) && !f.opt.TrashedOnly { if !f.validFolder(&result) {
return nil, fs.ErrorDirNotFound return nil, fs.ErrorDirNotFound
} }
for i := range result.Folders { for i := range result.Folders {
item := &result.Folders[i] item := &result.Folders[i]
if !f.opt.TrashedOnly && bool(item.Deleted) { if f.validFolder(item) {
continue remote := path.Join(dir, f.opt.Enc.ToStandardName(item.Name))
d := fs.NewDir(remote, time.Time(item.ModifiedAt))
entries = append(entries, d)
} }
remote := path.Join(dir, f.opt.Enc.ToStandardName(item.Name))
d := fs.NewDir(remote, time.Time(item.ModifiedAt))
entries = append(entries, d)
} }
for i := range result.Files { for i := range result.Files {
item := &result.Files[i] item := &result.Files[i]
if f.opt.TrashedOnly { if f.validFile(item) {
if !item.Deleted || item.State != "COMPLETED" { remote := path.Join(dir, f.opt.Enc.ToStandardName(item.Name))
continue if o, err := f.newObjectWithInfo(ctx, remote, item); err == nil {
} entries = append(entries, o)
} else {
if item.Deleted || item.State != "COMPLETED" {
continue
} }
} }
remote := path.Join(dir, f.opt.Enc.ToStandardName(item.Name))
o, err := f.newObjectWithInfo(ctx, remote, item)
if err != nil {
continue
}
entries = append(entries, o)
} }
return entries, nil return entries, nil
} }
@@ -921,7 +943,7 @@ func (f *Fs) listFileDir(ctx context.Context, remoteStartPath string, startFolde
startPathLength := len(startPath) startPathLength := len(startPath)
for i := range startFolder.Folders { for i := range startFolder.Folders {
folder := &startFolder.Folders[i] folder := &startFolder.Folders[i]
if folder.Deleted { if !f.validFolder(folder) {
return nil return nil
} }
folderPath := f.opt.Enc.ToStandardPath(path.Join(folder.Path, folder.Name)) folderPath := f.opt.Enc.ToStandardPath(path.Join(folder.Path, folder.Name))
@@ -939,17 +961,16 @@ func (f *Fs) listFileDir(ctx context.Context, remoteStartPath string, startFolde
} }
for i := range folder.Files { for i := range folder.Files {
file := &folder.Files[i] file := &folder.Files[i]
if file.Deleted || file.State != "COMPLETED" { if f.validFile(file) {
continue remoteFile := path.Join(remoteDir, f.opt.Enc.ToStandardName(file.Name))
} o, err := f.newObjectWithInfo(ctx, remoteFile, file)
remoteFile := path.Join(remoteDir, f.opt.Enc.ToStandardName(file.Name)) if err != nil {
o, err := f.newObjectWithInfo(ctx, remoteFile, file) return err
if err != nil { }
return err err = fn(o)
} if err != nil {
err = fn(o) return err
if err != nil { }
return err
} }
} }
} }
@@ -982,7 +1003,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
return fs.ErrorDirNotFound return fs.ErrorDirNotFound
} }
} }
return errors.Wrap(err, "couldn't list files") return fmt.Errorf("couldn't list files: %w", err)
} }
list := walk.NewListRHelper(callback) list := walk.NewListRHelper(callback)
err = f.listFileDir(ctx, dir, &result, func(entry fs.DirEntry) error { err = f.listFileDir(ctx, dir, &result, func(entry fs.DirEntry) error {
@@ -1082,7 +1103,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error)
return shouldRetry(ctx, resp, err) return shouldRetry(ctx, resp, err)
}) })
if err != nil { if err != nil {
return errors.Wrap(err, "couldn't purge directory") return fmt.Errorf("couldn't purge directory: %w", err)
} }
return nil return nil
@@ -1149,7 +1170,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
info, err := f.copyOrMove(ctx, "cp", srcObj.filePath(), remote) info, err := f.copyOrMove(ctx, "cp", srcObj.filePath(), remote)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "couldn't copy file") return nil, fmt.Errorf("couldn't copy file: %w", err)
} }
return f.newObjectWithInfo(ctx, remote, info) return f.newObjectWithInfo(ctx, remote, info)
@@ -1179,7 +1200,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
info, err := f.copyOrMove(ctx, "mv", srcObj.filePath(), remote) info, err := f.copyOrMove(ctx, "mv", srcObj.filePath(), remote)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "couldn't move file") return nil, fmt.Errorf("couldn't move file: %w", err)
} }
return f.newObjectWithInfo(ctx, remote, info) return f.newObjectWithInfo(ctx, remote, info)
@@ -1223,7 +1244,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
_, err = f.copyOrMove(ctx, "mvDir", path.Join(f.endpointURL, f.opt.Enc.FromStandardPath(srcPath))+"/", dstRemote) _, err = f.copyOrMove(ctx, "mvDir", path.Join(f.endpointURL, f.opt.Enc.FromStandardPath(srcPath))+"/", dstRemote)
if err != nil { if err != nil {
return errors.Wrap(err, "couldn't move directory") return fmt.Errorf("couldn't move directory: %w", err)
} }
return nil return nil
} }
@@ -1257,20 +1278,28 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
} }
if err != nil { if err != nil {
if unlink { if unlink {
return "", errors.Wrap(err, "couldn't remove public link") return "", fmt.Errorf("couldn't remove public link: %w", err)
} }
return "", errors.Wrap(err, "couldn't create public link") return "", fmt.Errorf("couldn't create public link: %w", err)
} }
if unlink { if unlink {
if result.PublicSharePath != "" { if result.PublicURI != "" {
return "", errors.Errorf("couldn't remove public link - %q", result.PublicSharePath) return "", fmt.Errorf("couldn't remove public link - %q", result.PublicURI)
} }
return "", nil return "", nil
} }
if result.PublicSharePath == "" { if result.PublicURI == "" {
return "", errors.New("couldn't create public link - no link path received") return "", errors.New("couldn't create public link - no uri received")
} }
return joinPath(baseURL, result.PublicSharePath), nil if result.PublicSharePath != "" {
webLink := joinPath(baseURL, result.PublicSharePath)
fs.Debugf(nil, "Web link: %s", webLink)
} else {
fs.Debugf(nil, "No web link received")
}
directLink := joinPath(baseURL, fmt.Sprintf("opin/io/downloadPublic/%s/%s", f.user, result.PublicURI))
fs.Debugf(nil, "Direct link: %s", directLink)
return directLink, nil
} }
// About gets quota information // About gets quota information
@@ -1290,6 +1319,21 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
return usage, nil return usage, nil
} }
// UserInfo fetches info about the current user
func (f *Fs) UserInfo(ctx context.Context) (userInfo map[string]string, err error) {
cust, err := getCustomerInfo(ctx, f.apiSrv)
if err != nil {
return nil, err
}
return map[string]string{
"Username": cust.Username,
"Email": cust.Email,
"Name": cust.Name,
"AccountType": cust.AccountType,
"SubscriptionType": cust.SubscriptionType,
}, nil
}
// CleanUp empties the trash // CleanUp empties the trash
func (f *Fs) CleanUp(ctx context.Context) error { func (f *Fs) CleanUp(ctx context.Context) error {
opts := rest.Opts{ opts := rest.Opts{
@@ -1300,7 +1344,7 @@ func (f *Fs) CleanUp(ctx context.Context) error {
var info api.TrashResponse var info api.TrashResponse
_, err := f.apiSrv.CallJSON(ctx, &opts, nil, &info) _, err := f.apiSrv.CallJSON(ctx, &opts, nil, &info)
if err != nil { if err != nil {
return errors.Wrap(err, "couldn't empty trash") return fmt.Errorf("couldn't empty trash: %w", err)
} }
return nil return nil
@@ -1360,6 +1404,25 @@ func (o *Object) MimeType(ctx context.Context) string {
return o.mimeType return o.mimeType
} }
// validFile checks if info indicates file is valid
func (f *Fs) validFile(info *api.JottaFile) bool {
if info.State != "COMPLETED" {
return false // File is incomplete or corrupt
}
if !info.Deleted {
return !f.opt.TrashedOnly // Regular file; return false if TrashedOnly, else true
}
return f.opt.TrashedOnly // Deleted file; return true if TrashedOnly, else false
}
// validFolder checks if info indicates folder is valid
func (f *Fs) validFolder(info *api.JottaFolder) bool {
// Returns true if folder is not deleted.
// If TrashedOnly option then always returns true, because a folder not
// in trash must be traversed to get to files/subfolders that are.
return !bool(info.Deleted) || f.opt.TrashedOnly
}
// setMetaData sets the metadata from info // setMetaData sets the metadata from info
func (o *Object) setMetaData(info *api.JottaFile) (err error) { func (o *Object) setMetaData(info *api.JottaFile) (err error) {
o.hasMetaData = true o.hasMetaData = true
@@ -1379,7 +1442,7 @@ func (o *Object) readMetaData(ctx context.Context, force bool) (err error) {
if err != nil { if err != nil {
return err return err
} }
if bool(info.Deleted) && !o.fs.opt.TrashedOnly { if !o.fs.validFile(info) {
return fs.ErrorObjectNotFound return fs.ErrorObjectNotFound
} }
return o.setMetaData(info) return o.setMetaData(info)
@@ -1400,7 +1463,50 @@ func (o *Object) ModTime(ctx context.Context) time.Time {
// SetModTime sets the modification time of the local fs object // SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error { func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
return fs.ErrorCantSetModTime // make sure metadata is available, we need its current size and md5
err := o.readMetaData(ctx, false)
if err != nil {
fs.Logf(o, "Failed to read metadata: %v", err)
return err
}
// prepare allocate request with existing metadata but changed timestamps
var resp *http.Response
var options []fs.OpenOption
opts := rest.Opts{
Method: "POST",
Path: "files/v1/allocate",
Options: options,
ExtraHeaders: make(map[string]string),
}
fileDate := api.Time(modTime).APIString()
var request = api.AllocateFileRequest{
Bytes: o.size,
Created: fileDate,
Modified: fileDate,
Md5: o.md5,
Path: path.Join(o.fs.opt.Mountpoint, o.fs.opt.Enc.FromStandardPath(path.Join(o.fs.root, o.remote))),
}
// send it
var response api.AllocateFileResponse
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.apiSrv.CallJSON(ctx, &opts, &request, &response)
return shouldRetry(ctx, resp, err)
})
if err != nil {
return err
}
// check response
if response.State != "COMPLETED" {
// could be the file was modified (size/md5 changed) between readMetaData and the allocate request
return errors.New("metadata did not match")
}
// update local metadata
o.modTime = modTime
return nil
} }
// Storable returns a boolean showing whether this object storable // Storable returns a boolean showing whether this object storable
@@ -1494,6 +1600,20 @@ func readMD5(in io.Reader, size, threshold int64) (md5sum string, out io.Reader,
// //
// The new object may have been created if an error is returned // The new object may have been created if an error is returned
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) { func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
if o.fs.opt.NoVersions {
err := o.readMetaData(ctx, false)
if err == nil {
// if the object exists delete it
err = o.remove(ctx, true)
if err != nil {
return fmt.Errorf("failed to remove old object: %w", err)
}
}
// if the object does not exist we can just continue but if the error is something different we should report that
if err != fs.ErrorObjectNotFound {
return err
}
}
o.fs.tokenRenewer.Start() o.fs.tokenRenewer.Start()
defer o.fs.tokenRenewer.Stop() defer o.fs.tokenRenewer.Stop()
size := src.Size() size := src.Size()
@@ -1507,7 +1627,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
md5String, in, cleanup, err = readMD5(in, size, int64(o.fs.opt.MD5MemoryThreshold)) md5String, in, cleanup, err = readMD5(in, size, int64(o.fs.opt.MD5MemoryThreshold))
defer cleanup() defer cleanup()
if err != nil { if err != nil {
return errors.Wrap(err, "failed to calculate MD5") return fmt.Errorf("failed to calculate MD5: %w", err)
} }
// Wrap the accounting back onto the stream // Wrap the accounting back onto the stream
in = wrap(in) in = wrap(in)
@@ -1584,8 +1704,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return nil return nil
} }
// Remove an object func (o *Object) remove(ctx context.Context, hard bool) error {
func (o *Object) Remove(ctx context.Context) error {
opts := rest.Opts{ opts := rest.Opts{
Method: "POST", Method: "POST",
Path: o.filePath(), Path: o.filePath(),
@@ -1593,7 +1712,7 @@ func (o *Object) Remove(ctx context.Context) error {
NoResponse: true, NoResponse: true,
} }
if o.fs.opt.HardDelete { if hard {
opts.Parameters.Set("rm", "true") opts.Parameters.Set("rm", "true")
} else { } else {
opts.Parameters.Set("dl", "true") opts.Parameters.Set("dl", "true")
@@ -1605,6 +1724,11 @@ func (o *Object) Remove(ctx context.Context) error {
}) })
} }
// Remove an object
func (o *Object) Remove(ctx context.Context) error {
return o.remove(ctx, o.fs.opt.HardDelete)
}
// Check the interfaces are satisfied // Check the interfaces are satisfied
var ( var (
_ fs.Fs = (*Fs)(nil) _ fs.Fs = (*Fs)(nil)
@@ -1615,6 +1739,7 @@ var (
_ fs.ListRer = (*Fs)(nil) _ fs.ListRer = (*Fs)(nil)
_ fs.PublicLinker = (*Fs)(nil) _ fs.PublicLinker = (*Fs)(nil)
_ fs.Abouter = (*Fs)(nil) _ fs.Abouter = (*Fs)(nil)
_ fs.UserInfoer = (*Fs)(nil)
_ fs.CleanUpper = (*Fs)(nil) _ fs.CleanUpper = (*Fs)(nil)
_ fs.Object = (*Object)(nil) _ fs.Object = (*Object)(nil)
_ fs.MimeTyper = (*Object)(nil) _ fs.MimeTyper = (*Object)(nil)

View File

@@ -32,29 +32,29 @@ func init() {
NewFs: NewFs, NewFs: NewFs,
Options: []fs.Option{{ Options: []fs.Option{{
Name: "endpoint", Name: "endpoint",
Help: "The Koofr API endpoint to use", Help: "The Koofr API endpoint to use.",
Default: "https://app.koofr.net", Default: "https://app.koofr.net",
Required: true, Required: true,
Advanced: true, Advanced: true,
}, { }, {
Name: "mountid", Name: "mountid",
Help: "Mount ID of the mount to use. If omitted, the primary mount is used.", Help: "Mount ID of the mount to use.\n\nIf omitted, the primary mount is used.",
Required: false, Required: false,
Default: "", Default: "",
Advanced: true, Advanced: true,
}, { }, {
Name: "setmtime", Name: "setmtime",
Help: "Does the backend support setting modification time. Set this to false if you use a mount ID that points to a Dropbox or Amazon Drive backend.", Help: "Does the backend support setting modification time.\n\nSet this to false if you use a mount ID that points to a Dropbox or Amazon Drive backend.",
Default: true, Default: true,
Required: true, Required: true,
Advanced: true, Advanced: true,
}, { }, {
Name: "user", Name: "user",
Help: "Your Koofr user name", Help: "Your Koofr user name.",
Required: true, Required: true,
}, { }, {
Name: "password", Name: "password",
Help: "Your Koofr password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password)", Help: "Your Koofr password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password).",
IsPassword: true, IsPassword: true,
Required: true, Required: true,
}, { }, {
@@ -344,7 +344,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (obj fs.Object, err e
return nil, translateErrorsObject(err) return nil, translateErrorsObject(err)
} }
if info.Type == "dir" { if info.Type == "dir" {
return nil, fs.ErrorNotAFile return nil, fs.ErrorIsDir
} }
return &Object{ return &Object{
fs: f, fs: f,
@@ -608,5 +608,25 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
if err != nil { if err != nil {
return "", translateErrorsDir(err) return "", translateErrorsDir(err)
} }
return linkData.ShortURL, nil
// URL returned by API looks like following:
//
// https://app.koofr.net/links/35d9fb92-74a3-4930-b4ed-57f123bfb1a6
//
// Direct url looks like following:
//
// https://app.koofr.net/content/links/39a6cc01-3b23-477a-8059-c0fb3b0f15de/files/get?path=%2F
//
// I am not sure about meaning of "path" parameter; in my expriments
// it is always "%2F", and omitting it or putting any other value
// results in 404.
//
// There is one more quirk: direct link to file in / returns that file,
// direct link to file somewhere else in hierarchy returns zip archive
// with one member.
link := linkData.URL
link = strings.ReplaceAll(link, "/links", "/content/links")
link += "/files/get?path=%2F"
return link, nil
} }

View File

@@ -1,13 +1,14 @@
//go:build darwin || dragonfly || freebsd || linux
// +build darwin dragonfly freebsd linux // +build darwin dragonfly freebsd linux
package local package local
import ( import (
"context" "context"
"fmt"
"os" "os"
"syscall" "syscall"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
) )
@@ -19,7 +20,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
if os.IsNotExist(err) { if os.IsNotExist(err) {
return nil, fs.ErrorDirNotFound return nil, fs.ErrorDirNotFound
} }
return nil, errors.Wrap(err, "failed to read disk usage") return nil, fmt.Errorf("failed to read disk usage: %w", err)
} }
bs := int64(s.Bsize) // nolint: unconvert bs := int64(s.Bsize) // nolint: unconvert
usage := &fs.Usage{ usage := &fs.Usage{

View File

@@ -1,13 +1,14 @@
//go:build windows
// +build windows // +build windows
package local package local
import ( import (
"context" "context"
"fmt"
"syscall" "syscall"
"unsafe" "unsafe"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
) )
@@ -23,7 +24,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
uintptr(unsafe.Pointer(&free)), // lpTotalNumberOfFreeBytes uintptr(unsafe.Pointer(&free)), // lpTotalNumberOfFreeBytes
) )
if e1 != syscall.Errno(0) { if e1 != syscall.Errno(0) {
return nil, errors.Wrap(e1, "failed to read disk usage") return nil, fmt.Errorf("failed to read disk usage: %w", e1)
} }
usage := &fs.Usage{ usage := &fs.Usage{
Total: fs.NewUsageValue(total), // quota of bytes that can be used Total: fs.NewUsageValue(total), // quota of bytes that can be used

View File

@@ -1,11 +0,0 @@
//+build darwin
package local
import "github.com/rclone/rclone/lib/encoder"
// This is the encoding used by the local backend for macOS
//
// macOS can't store invalid UTF-8, it converts them into %XX encoding
const defaultEnc = (encoder.Base |
encoder.EncodeInvalidUtf8)

View File

@@ -1,8 +0,0 @@
//+build !windows,!darwin
package local
import "github.com/rclone/rclone/lib/encoder"
// This is the encoding used by the local backend for non windows platforms
const defaultEnc = encoder.Base

View File

@@ -1,4 +1,5 @@
//+build !linux //go:build !linux
// +build !linux
package local package local

View File

@@ -1,4 +1,5 @@
//+build linux //go:build linux
// +build linux
package local package local

View File

@@ -1,3 +1,4 @@
//go:build windows || plan9 || js
// +build windows plan9 js // +build windows plan9 js
package local package local

View File

@@ -1,3 +1,4 @@
//go:build !windows && !plan9 && !js
// +build !windows,!plan9,!js // +build !windows,!plan9,!js
package local package local

View File

@@ -4,6 +4,7 @@ package local
import ( import (
"bytes" "bytes"
"context" "context"
"errors"
"fmt" "fmt"
"io" "io"
"io/ioutil" "io/ioutil"
@@ -16,7 +17,6 @@ import (
"time" "time"
"unicode/utf8" "unicode/utf8"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting" "github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/config" "github.com/rclone/rclone/fs/config"
@@ -44,11 +44,11 @@ func init() {
CommandHelp: commandHelp, CommandHelp: commandHelp,
Options: []fs.Option{{ Options: []fs.Option{{
Name: "nounc", Name: "nounc",
Help: "Disable UNC (long path names) conversion on Windows", Help: "Disable UNC (long path names) conversion on Windows.",
Advanced: runtime.GOOS != "windows", Advanced: runtime.GOOS != "windows",
Examples: []fs.OptionExample{{ Examples: []fs.OptionExample{{
Value: "true", Value: "true",
Help: "Disables long file names", Help: "Disables long file names.",
}}, }},
}, { }, {
Name: "copy_links", Name: "copy_links",
@@ -59,7 +59,7 @@ func init() {
Advanced: true, Advanced: true,
}, { }, {
Name: "links", Name: "links",
Help: "Translate symlinks to/from regular files with a '" + linkSuffix + "' extension", Help: "Translate symlinks to/from regular files with a '" + linkSuffix + "' extension.",
Default: false, Default: false,
NoPrefix: true, NoPrefix: true,
ShortOpt: "l", ShortOpt: "l",
@@ -67,6 +67,7 @@ func init() {
}, { }, {
Name: "skip_links", Name: "skip_links",
Help: `Don't warn about skipped symlinks. Help: `Don't warn about skipped symlinks.
This flag disables warning messages on skipped symlinks or junction This flag disables warning messages on skipped symlinks or junction
points, as you explicitly acknowledge that they should be skipped.`, points, as you explicitly acknowledge that they should be skipped.`,
Default: false, Default: false,
@@ -74,21 +75,21 @@ points, as you explicitly acknowledge that they should be skipped.`,
Advanced: true, Advanced: true,
}, { }, {
Name: "zero_size_links", Name: "zero_size_links",
Help: `Assume the Stat size of links is zero (and read them instead) (Deprecated) Help: `Assume the Stat size of links is zero (and read them instead) (deprecated).
Rclone used to use the Stat size of links as the link size, but this fails in quite a few places Rclone used to use the Stat size of links as the link size, but this fails in quite a few places:
- Windows - Windows
- On some virtual filesystems (such ash LucidLink) - On some virtual filesystems (such ash LucidLink)
- Android - Android
So rclone now always reads the link So rclone now always reads the link.
`, `,
Default: false, Default: false,
Advanced: true, Advanced: true,
}, { }, {
Name: "unicode_normalization", Name: "unicode_normalization",
Help: `Apply unicode NFC normalization to paths and filenames Help: `Apply unicode NFC normalization to paths and filenames.
This flag can be used to normalize file names into unicode NFC form This flag can be used to normalize file names into unicode NFC form
that are read from the local filesystem. that are read from the local filesystem.
@@ -106,7 +107,7 @@ routine so this flag shouldn't normally be used.`,
Advanced: true, Advanced: true,
}, { }, {
Name: "no_check_updated", Name: "no_check_updated",
Help: `Don't check to see if the files change during upload Help: `Don't check to see if the files change during upload.
Normally rclone checks the size and modification time of files as they Normally rclone checks the size and modification time of files as they
are being uploaded and aborts with a message which starts "can't copy are being uploaded and aborts with a message which starts "can't copy
@@ -152,7 +153,7 @@ to override the default choice.`,
Advanced: true, Advanced: true,
}, { }, {
Name: "case_insensitive", Name: "case_insensitive",
Help: `Force the filesystem to report itself as case insensitive Help: `Force the filesystem to report itself as case insensitive.
Normally the local backend declares itself as case insensitive on Normally the local backend declares itself as case insensitive on
Windows/macOS and case sensitive for everything else. Use this flag Windows/macOS and case sensitive for everything else. Use this flag
@@ -161,7 +162,7 @@ to override the default choice.`,
Advanced: true, Advanced: true,
}, { }, {
Name: "no_preallocate", Name: "no_preallocate",
Help: `Disable preallocation of disk space for transferred files Help: `Disable preallocation of disk space for transferred files.
Preallocation of disk space helps prevent filesystem fragmentation. Preallocation of disk space helps prevent filesystem fragmentation.
However, some virtual filesystem layers (such as Google Drive File However, some virtual filesystem layers (such as Google Drive File
@@ -172,7 +173,7 @@ Use this flag to disable preallocation.`,
Advanced: true, Advanced: true,
}, { }, {
Name: "no_sparse", Name: "no_sparse",
Help: `Disable sparse files for multi-thread downloads Help: `Disable sparse files for multi-thread downloads.
On Windows platforms rclone will make sparse files when doing On Windows platforms rclone will make sparse files when doing
multi-thread downloads. This avoids long pauses on large files where multi-thread downloads. This avoids long pauses on large files where
@@ -182,7 +183,7 @@ cause disk fragmentation and can be slow to work with.`,
Advanced: true, Advanced: true,
}, { }, {
Name: "no_set_modtime", Name: "no_set_modtime",
Help: `Disable setting modtime Help: `Disable setting modtime.
Normally rclone updates modification time of files after they are done Normally rclone updates modification time of files after they are done
uploading. This can cause permissions issues on Linux platforms when uploading. This can cause permissions issues on Linux platforms when
@@ -195,7 +196,7 @@ enabled, rclone will no longer update the modtime after copying a file.`,
Name: config.ConfigEncoding, Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp, Help: config.ConfigEncodingHelp,
Advanced: true, Advanced: true,
Default: defaultEnc, Default: encoder.OS,
}}, }},
} }
fs.Register(fsi) fs.Register(fsi)
@@ -401,7 +402,7 @@ func (f *Fs) newObjectWithInfo(remote string, info os.FileInfo) (fs.Object, erro
} }
if o.mode.IsDir() { if o.mode.IsDir() {
return nil, errors.Wrapf(fs.ErrorNotAFile, "%q", remote) return nil, fs.ErrorIsDir
} }
return o, nil return o, nil
} }
@@ -431,7 +432,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
fd, err := os.Open(fsDirPath) fd, err := os.Open(fsDirPath)
if err != nil { if err != nil {
isPerm := os.IsPermission(err) isPerm := os.IsPermission(err)
err = errors.Wrapf(err, "failed to open directory %q", dir) err = fmt.Errorf("failed to open directory %q: %w", dir, err)
fs.Errorf(dir, "%v", err) fs.Errorf(dir, "%v", err)
if isPerm { if isPerm {
_ = accounting.Stats(ctx).Error(fserrors.NoRetryError(err)) _ = accounting.Stats(ctx).Error(fserrors.NoRetryError(err))
@@ -442,7 +443,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
defer func() { defer func() {
cerr := fd.Close() cerr := fd.Close()
if cerr != nil && err == nil { if cerr != nil && err == nil {
err = errors.Wrapf(cerr, "failed to close directory %q:", dir) err = fmt.Errorf("failed to close directory %q:: %w", dir, cerr)
} }
}() }()
@@ -467,8 +468,12 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
for _, name := range names { for _, name := range names {
namepath := filepath.Join(fsDirPath, name) namepath := filepath.Join(fsDirPath, name)
fi, fierr := os.Lstat(namepath) fi, fierr := os.Lstat(namepath)
if os.IsNotExist(fierr) {
// skip entry removed by a concurrent goroutine
continue
}
if fierr != nil { if fierr != nil {
err = errors.Wrapf(err, "failed to read directory %q", namepath) err = fmt.Errorf("failed to read directory %q: %w", namepath, err)
fs.Errorf(dir, "%v", fierr) fs.Errorf(dir, "%v", fierr)
_ = accounting.Stats(ctx).Error(fserrors.NoRetryError(fierr)) // fail the sync _ = accounting.Stats(ctx).Error(fserrors.NoRetryError(fierr)) // fail the sync
continue continue
@@ -478,7 +483,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
} }
} }
if err != nil { if err != nil {
return nil, errors.Wrap(err, "failed to read directory entry") return nil, fmt.Errorf("failed to read directory entry: %w", err)
} }
for _, fi := range fis { for _, fi := range fis {
@@ -491,7 +496,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
fi, err = os.Stat(localPath) fi, err = os.Stat(localPath)
if os.IsNotExist(err) || isCircularSymlinkError(err) { if os.IsNotExist(err) || isCircularSymlinkError(err) {
// Skip bad symlinks and circular symlinks // Skip bad symlinks and circular symlinks
err = fserrors.NoRetryError(errors.Wrap(err, "symlink")) err = fserrors.NoRetryError(fmt.Errorf("symlink: %w", err))
fs.Errorf(newRemote, "Listing error: %v", err) fs.Errorf(newRemote, "Listing error: %v", err)
err = accounting.Stats(ctx).Error(err) err = accounting.Stats(ctx).Error(err)
continue continue
@@ -565,9 +570,8 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
// Mkdir creates the directory if it doesn't exist // Mkdir creates the directory if it doesn't exist
func (f *Fs) Mkdir(ctx context.Context, dir string) error { func (f *Fs) Mkdir(ctx context.Context, dir string) error {
// FIXME: https://github.com/syncthing/syncthing/blob/master/lib/osutil/mkdirall_windows.go
localPath := f.localPath(dir) localPath := f.localPath(dir)
err := os.MkdirAll(localPath, 0777) err := file.MkdirAll(localPath, 0777)
if err != nil { if err != nil {
return err return err
} }
@@ -668,7 +672,7 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
return err return err
} }
if !fi.Mode().IsDir() { if !fi.Mode().IsDir() {
return errors.Errorf("can't purge non directory: %q", dir) return fmt.Errorf("can't purge non directory: %q", dir)
} }
return os.RemoveAll(dir) return os.RemoveAll(dir)
} }
@@ -761,7 +765,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
// Create parent of destination // Create parent of destination
dstParentPath := filepath.Dir(dstPath) dstParentPath := filepath.Dir(dstPath)
err = os.MkdirAll(dstParentPath, 0777) err = file.MkdirAll(dstParentPath, 0777)
if err != nil { if err != nil {
return err return err
} }
@@ -862,12 +866,12 @@ func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
err := o.lstat() err := o.lstat()
var changed bool var changed bool
if err != nil { if err != nil {
if os.IsNotExist(errors.Cause(err)) { if errors.Is(err, os.ErrNotExist) {
// If file not found then we assume any accumulated // If file not found then we assume any accumulated
// hashes are OK - this will error on Open // hashes are OK - this will error on Open
changed = true changed = true
} else { } else {
return "", errors.Wrap(err, "hash: failed to stat") return "", fmt.Errorf("hash: failed to stat: %w", err)
} }
} else { } else {
o.fs.objectMetaMu.RLock() o.fs.objectMetaMu.RLock()
@@ -896,16 +900,16 @@ func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
in = readers.NewLimitedReadCloser(in, o.size) in = readers.NewLimitedReadCloser(in, o.size)
} }
if err != nil { if err != nil {
return "", errors.Wrap(err, "hash: failed to open") return "", fmt.Errorf("hash: failed to open: %w", err)
} }
var hashes map[hash.Type]string var hashes map[hash.Type]string
hashes, err = hash.StreamTypes(in, hash.NewHashSet(r)) hashes, err = hash.StreamTypes(in, hash.NewHashSet(r))
closeErr := in.Close() closeErr := in.Close()
if err != nil { if err != nil {
return "", errors.Wrap(err, "hash: failed to read") return "", fmt.Errorf("hash: failed to read: %w", err)
} }
if closeErr != nil { if closeErr != nil {
return "", errors.Wrap(closeErr, "hash: failed to close") return "", fmt.Errorf("hash: failed to close: %w", closeErr)
} }
hashValue = hashes[r] hashValue = hashes[r]
o.fs.objectMetaMu.Lock() o.fs.objectMetaMu.Lock()
@@ -986,17 +990,17 @@ func (file *localOpenFile) Read(p []byte) (n int, err error) {
// Check if file has the same size and modTime // Check if file has the same size and modTime
fi, err := file.fd.Stat() fi, err := file.fd.Stat()
if err != nil { if err != nil {
return 0, errors.Wrap(err, "can't read status of source file while transferring") return 0, fmt.Errorf("can't read status of source file while transferring: %w", err)
} }
file.o.fs.objectMetaMu.RLock() file.o.fs.objectMetaMu.RLock()
oldtime := file.o.modTime oldtime := file.o.modTime
oldsize := file.o.size oldsize := file.o.size
file.o.fs.objectMetaMu.RUnlock() file.o.fs.objectMetaMu.RUnlock()
if oldsize != fi.Size() { if oldsize != fi.Size() {
return 0, fserrors.NoLowLevelRetryError(errors.Errorf("can't copy - source file is being updated (size changed from %d to %d)", oldsize, fi.Size())) return 0, fserrors.NoLowLevelRetryError(fmt.Errorf("can't copy - source file is being updated (size changed from %d to %d)", oldsize, fi.Size()))
} }
if !oldtime.Equal(fi.ModTime()) { if !oldtime.Equal(fi.ModTime()) {
return 0, fserrors.NoLowLevelRetryError(errors.Errorf("can't copy - source file is being updated (mod time changed from %v to %v)", oldtime, fi.ModTime())) return 0, fserrors.NoLowLevelRetryError(fmt.Errorf("can't copy - source file is being updated (mod time changed from %v to %v)", oldtime, fi.ModTime()))
} }
} }
@@ -1095,7 +1099,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
// mkdirAll makes all the directories needed to store the object // mkdirAll makes all the directories needed to store the object
func (o *Object) mkdirAll() error { func (o *Object) mkdirAll() error {
dir := filepath.Dir(o.path) dir := filepath.Dir(o.path)
return os.MkdirAll(dir, 0777) return file.MkdirAll(dir, 0777)
} }
type nopWriterCloser struct { type nopWriterCloser struct {

View File

@@ -93,16 +93,16 @@ func TestSymlink(t *testing.T) {
file2d := fstest.NewItem("symlink.txt", "hello", modTime1) file2d := fstest.NewItem("symlink.txt", "hello", modTime1)
// Check with no symlink flags // Check with no symlink flags
fstest.CheckItems(t, r.Flocal, file1) r.CheckLocalItems(t, file1)
fstest.CheckItems(t, r.Fremote) r.CheckRemoteItems(t)
// Set fs into "-L" mode // Set fs into "-L" mode
f.opt.FollowSymlinks = true f.opt.FollowSymlinks = true
f.opt.TranslateSymlinks = false f.opt.TranslateSymlinks = false
f.lstat = os.Stat f.lstat = os.Stat
fstest.CheckItems(t, r.Flocal, file1, file2d) r.CheckLocalItems(t, file1, file2d)
fstest.CheckItems(t, r.Fremote) r.CheckRemoteItems(t)
// Set fs into "-l" mode // Set fs into "-l" mode
f.opt.FollowSymlinks = false f.opt.FollowSymlinks = false
@@ -111,7 +111,7 @@ func TestSymlink(t *testing.T) {
fstest.CheckListingWithPrecision(t, r.Flocal, []fstest.Item{file1, file2}, nil, fs.ModTimeNotSupported) fstest.CheckListingWithPrecision(t, r.Flocal, []fstest.Item{file1, file2}, nil, fs.ModTimeNotSupported)
if haveLChtimes { if haveLChtimes {
fstest.CheckItems(t, r.Flocal, file1, file2) r.CheckLocalItems(t, file1, file2)
} }
// Create a symlink // Create a symlink
@@ -119,7 +119,7 @@ func TestSymlink(t *testing.T) {
file3 := r.WriteObjectTo(ctx, r.Flocal, "symlink2.txt"+linkSuffix, "file.txt", modTime3, false) file3 := r.WriteObjectTo(ctx, r.Flocal, "symlink2.txt"+linkSuffix, "file.txt", modTime3, false)
fstest.CheckListingWithPrecision(t, r.Flocal, []fstest.Item{file1, file2, file3}, nil, fs.ModTimeNotSupported) fstest.CheckListingWithPrecision(t, r.Flocal, []fstest.Item{file1, file2, file3}, nil, fs.ModTimeNotSupported)
if haveLChtimes { if haveLChtimes {
fstest.CheckItems(t, r.Flocal, file1, file2, file3) r.CheckLocalItems(t, file1, file2, file3)
} }
// Check it got the correct contents // Check it got the correct contents

View File

@@ -11,7 +11,8 @@ import (
// TestIntegration runs integration tests against the remote // TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) { func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{ fstests.Run(t, &fstests.Opt{
RemoteName: "", RemoteName: "",
NilObject: (*local.Object)(nil), NilObject: (*local.Object)(nil),
QuickTestOK: true,
}) })
} }

View File

@@ -1,5 +1,6 @@
// Device reading functions // Device reading functions
//go:build !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris
// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris // +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris
package local package local

View File

@@ -1,5 +1,6 @@
// Device reading functions // Device reading functions
//go:build darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris
// +build darwin dragonfly freebsd linux netbsd openbsd solaris // +build darwin dragonfly freebsd linux netbsd openbsd solaris
package local package local

View File

@@ -1,4 +1,5 @@
//+build !windows //go:build !windows
// +build !windows
package local package local

View File

@@ -1,4 +1,5 @@
//+build windows //go:build windows
// +build windows
package local package local

View File

@@ -1,3 +1,4 @@
//go:build !windows && !plan9 && !js
// +build !windows,!plan9,!js // +build !windows,!plan9,!js
package local package local

View File

@@ -1,3 +1,4 @@
//go:build windows || plan9 || js
// +build windows plan9 js // +build windows plan9 js
package local package local

Some files were not shown because too many files have changed in this diff Show More