1
0
mirror of https://github.com/rclone/rclone.git synced 2025-12-06 00:03:32 +00:00

Compare commits

...

599 Commits

Author SHA1 Message Date
Nick Craig-Wood
f9354fff2f Version v1.58.0 2022-03-18 12:29:54 +00:00
Nick Craig-Wood
ff1f173fc2 build: add bisync.md to docs builder and fix missing tardigrade.md stub 2022-03-18 11:22:23 +00:00
Nick Craig-Wood
f8073a7b63 build: ensure the Go version used for the build is always up to date #6020 2022-03-17 17:14:50 +00:00
Nick Craig-Wood
807f1cedaa hasher: fix crash on object not found
Before this fix `NewObject` could return a wrapped `fs.Object(nil)`
which caused a crash. This was caused by `wrapObject` returning a
`nil` `*Object` which was cast into an `fs.Object`.

This changes the interface of `wrapObject` so it returns an
`fs.Object` instead of a `*Object` and an error which must be checked.
This forces the callers to return a `nil` object rather than an
`fs.Object(nil)`.

See: https://forum.rclone.org/t/panic-in-hasher-when-mounting-with-vfs-cache-and-not-synced-data-in-the-cache/29697/11
2022-03-16 11:30:26 +00:00
Nick Craig-Wood
bf9c68c88a storj: implement server side Move 2022-03-14 15:44:56 +00:00
Nick Craig-Wood
189cba0fbe s3: add other regions for Lyve and correct Provider name 2022-03-14 15:43:35 +00:00
Nick Craig-Wood
69f726f16c Add Nil Alexandrov to contributors 2022-03-14 15:43:35 +00:00
Nil Alexandrov
65652f7a75 Add Akamai Netstorage as a new backend. 2022-03-09 12:42:22 +00:00
Nil Alexandrov
47f9ab2f56 lib/rest: add support for setting trailers 2022-03-09 12:42:22 +00:00
Nick Craig-Wood
5dd51e6149 union: fix deadlock when one part of a multi-upload fails
Before this fix, rclone would deadlock when uploading two files at
once, if one errored. This caused the other file to block in the multi
reader and never complete.

This fix drains the input buffer on error which allows the other
upload to complete.

See: https://forum.rclone.org/t/union-with-create-policy-all-copy-stuck-when-first-union-fails/29601
2022-03-09 11:30:55 +00:00
Nick Craig-Wood
6a6d254a9f s3: add support for Seagate Lyve Cloud storage 2022-03-09 11:30:55 +00:00
jaKa
fd453f2c7b koofr: renamed digistorage to exclude the romania part. 2022-03-08 22:39:23 +00:00
jaKa
5d06a82c5d koofr: add digistorage service as a koofr provider. 2022-03-08 10:36:18 +00:00
Nick Craig-Wood
847868b4ba ftp: hard fork github.com/jlaffaye/ftp to fix go get
Having a replace directive in go.mod causes "go get
github.com/rclone/rclone" to fail as it discussed in this Go issue:
https://github.com/golang/go/issues/44840

This is apparently how the Go team want go.mod to work, so this commit
hard forks github.com/jlaffaye/ftp into github.com/rclone/ftp so we
can remove the `replace` directive from the go.mod file.

Fixes #5810
2022-03-07 09:55:49 +00:00
Ivan Andreev
38ca178cf3 mailru: fix int32 overflow on arm32 - fixes #6003 2022-03-06 13:33:57 +00:00
Nick Craig-Wood
9427d22f99 Add ctrl-q to contributors 2022-03-06 13:33:26 +00:00
ctrl-q
7b1428a498 onedrive: Do not retry on 400 pathIsTooLong 2022-03-06 13:05:05 +00:00
Nick Craig-Wood
ec72432cec vfs: fix failed to _ensure cache internal error: downloaders is nil error
This error was caused by renaming an open file.

When the file was renamed in the cache, the downloaders were cleared,
however the downloaders were not re-opened when needed again, instead
this error was generated.

This fix re-opens the downloaders if they have been closed by renaming
the file.

Fixes #5984
2022-03-03 17:43:29 +00:00
Nick Craig-Wood
2339172df2 pcloud: fix pre-1970 time stamps - fixes #5917
Before this change rclone send pre-1970 timestamps as negative
numbers. pCloud ignores these and sets them as todays date.

This change sends the timestamps as unsigned 64 bit integers (which is
how the binary protocol sends them) and pCloud accepts the (actually
negative) timestamp like this.
2022-03-03 17:18:40 +00:00
Nick Craig-Wood
268b808bf8 filter: add {{ regexp }} syntax to pattern matches - fixes #4074
There has been a desire from more advanced rclone users to have regexp
filtering as well as the glob filtering.

This patch adds regexp filtering using this syntax `{{ regexp }}`
which is currently a syntax error, so is backwards compatibile.

This means regexps can be used everywhere globs can be used, and that
they also can be mixed with globs in the same pattern, eg `*.{{jpe?g}}`
2022-03-03 17:16:28 +00:00
Nick Craig-Wood
74898bac3b build: add windows/arm64 build - NB this does not support mount yet #5828 2022-03-03 17:13:32 +00:00
Nick Craig-Wood
e0fbca02d4 compress: fix memory leak - fixes #6013
Before this change we forgot to close the compressor when checking to
see if an object was compressible.
2022-03-03 17:10:21 +00:00
Nick Craig-Wood
21355b4208 sync: Fix --max-duration so it doesn't retry when the duration is exceeded
Before this change, if the --max-duration limit was reached then
rclone would retry the sync as a fatal error wasn't raised.

This checks the deadline and raises a fatal error if necessary at the
end of the sync.

Fixes #6002
2022-03-03 17:08:16 +00:00
Nick Craig-Wood
251b84ff2c sftp: fix unecessary seeking when uploading and downloading files
This stops the SFTP library issuing out of order writes which fixes
the problems uploading to `serve sftp` from the `sftp` backend.

This was fixes upstream in this pull request: https://github.com/pkg/sftp/pull/482

Fixes #5806
2022-03-03 17:02:35 +00:00
Nick Craig-Wood
537b62917f s3: add --s3-use-multipart-etag provider quirk #5993
Before this change the new multipart upload ETag checking code was
failing in the integration tests with Alibaba OSS.

Apparently Alibaba calculate the ETag in a different way to AWS.

This introduces a new provider quirk with a flag to disable the
checking of the ETag for multipart uploads.

Mulpart Etag checking has been enabled for all providers that we can
test for and work, and left disabled for the others.
2022-03-01 16:36:39 +00:00
Nick Craig-Wood
71a784cfa2 compress: fix crash if metadata upload failed - fixes #5994
Before this changed the backend attempted to delete a nil object if
the metadata upload failed.
2022-02-28 19:47:52 +00:00
Nick Craig-Wood
8ee0fe9863 serve docker: disable linux tests in CI as they are locking up regularly 2022-02-28 18:01:47 +00:00
Nick Craig-Wood
8f164e4df5 s3: Use the ETag on multipart transfers to verify the transfer was OK
Before this rclone ignored the ETag on multipart uploads which missed
an opportunity for a whole file integrity check.

This adds that check which means that we now check even harder that
multipart uploads have arrived properly.

See #5993
2022-02-25 16:19:03 +00:00
Nick Craig-Wood
06ecc6511b drive: when using a link type --drive-export-formats show all doc types
Before this change we always hid unexportable document types (eg
Google maps).

After this change, if using --drive-export-formats
url/desktop/link.html/webloc we will show links for all documents
regardless of whether they are exportable or not as the links to them
work regardless of whether they are exportable or not.

See: https://forum.rclone.org/t/rclone-mount-for-google-drive-does-not-show-as-web-links-the-google-documents-of-the-google-my-map-gmap-type/29415
2022-02-25 16:08:11 +00:00
Nick Craig-Wood
3529bdec9b sftp: update docs on how to create known_hosts file
This also removes the note on the limitation that only one entry per
host is allowed in the file as it works with many entries provided
they have different key types.

See: https://forum.rclone.org/t/rclone-fails-ssh-handshakes-with-rsync-nets-sftp-when-a-known-hosts-file-is-specified/29206/
2022-02-25 16:08:11 +00:00
partev
486b43f8c7 doc: fix a typo
"and this it may require you to unblock it temporarily" -> "and it may require you to unblock it temporarily"
2022-02-22 21:05:05 +00:00
Nick Craig-Wood
89f0e4df80 swift: fix about so it shows info about the current container only
Before this change `rclone about swift:container` would show aggregate
info about all the containers, not just the one in use.

This causes a problem if container listing is disabled (for example in
the Blomp service).

This fix makes `rclone about swift:container` show only the info about
the given `container`. If aggregate info about all the containers is
required then use `rclone about swift:`.

See: https://forum.rclone.org/t/rclone-mount-blomp-problem/29151/18
2022-02-22 12:55:57 +00:00
Nick Craig-Wood
399fb5b7fb Add Vincent Murphy to contributors 2022-02-22 12:55:57 +00:00
Vincent Murphy
19f1ed949c docs: Fix broken test_proxy.py link 2022-02-22 12:26:17 +00:00
Nick Craig-Wood
d3a1001094 drive: add --drive-skip-dangling-shortcuts flag - fixes #5949
This flag enables dangling shortcuts to be skipped without an error.
2022-02-22 12:22:21 +00:00
Nick Craig-Wood
dc7e3ea1e3 drive,gcs,googlephotos: disable OAuth OOB flow (copy a token) due to google deprecation
Before this change, rclone supported authorizing for remote systems by
going to a URL and cutting and pasting a token from Google. This is
known as the OAuth out-of-band (oob) flow.

This, while very convenient for users, has been shown to be insecure
and has been deprecated by Google.

https://developers.googleblog.com/2022/02/making-oauth-flows-safer.html#disallowed-oob

> OAuth out-of-band (OOB) is a legacy flow developed to support native
> clients which do not have a redirect URI like web apps to accept the
> credentials after a user approves an OAuth consent request. The OOB
> flow poses a remote phishing risk and clients must migrate to an
> alternative method to protect against this vulnerability. New
> clients will be unable to use this flow starting on Feb 28, 2022.

This change disables that flow, and forces the user to use the
redirect URL flow. (This is the flow used already for local configs.)

In practice this will mean that instead of cutting and pasting a token
for remote config, it will be necessary to run "rclone authorize"
instead. This is how all the other OAuth backends work so it is a well
tested code path.

Fixes #6000
2022-02-18 12:46:30 +00:00
Nick Craig-Wood
f22b703a51 storj: rename tardigrade backend to storj backend #5616
This adds an alias for backwards compatibility and leaves a stub
documentation page to redirect people to the new documentation.
2022-02-11 11:04:15 +00:00
Nick Craig-Wood
c40129d610 fs: allow backends to have aliases #5616
This allows a backend to have multiple aliases. These aliases are
hidden from `rclone config` and the command line flags are hidden from
the user. However the flags, environment varialbes and config for the
alias will work just fine.
2022-02-11 11:04:15 +00:00
Nick Craig-Wood
8dc93f1792 Add Márton Elek to contributors 2022-02-11 11:04:03 +00:00
Nick Craig-Wood
f4c40bf79d mount: add --devname to set the device name sent to FUSE for mount display
Before this change, the device name was always the remote:path rclone
was configured with. However this can contain sensitive information
and it appears in the `mount` output, so `--devname` allows the user
to configure it.

See: https://forum.rclone.org/t/rclone-mount-blomp-problem/29151/11
2022-02-09 11:56:43 +00:00
Nick Craig-Wood
9cc50a614b s3: add note about Storj provider bug and workaround
See: https://github.com/storj/gateway-mt/issues/39
2022-02-08 11:40:29 +00:00
Elek, Márton
bcb07a67f6 tardigrade: update docs to explain differences between s3 and this backend
Co-authored-by: Caleb Case <calebcase@gmail.com>
2022-02-08 11:40:29 +00:00
Márton Elek
25ea04f1db s3: add specific provider for Storj Shared gateways
- unsupported features (Copy) are turned off for Storj
- enable urlEncodedListing for Storj provider
- set chunksize to 64Mb
2022-02-08 11:40:29 +00:00
Nick Craig-Wood
06ffd4882d onedrive: add --onedrive-root-folder-id flag #5948
This is to navigate to difficult to find folders in onedrive.
2022-02-07 12:29:36 +00:00
Nick Craig-Wood
19a5e1d63b docs: document --disable-http2 #5253 2022-02-07 12:29:36 +00:00
Nick Craig-Wood
ec88b66dad Add Abhiraj to contributors 2022-02-07 12:29:36 +00:00
Abhiraj
aa2d7f00c2 drive: added --drive-copy-shortcut-content - fixes #4604 2022-02-04 11:37:58 +00:00
Nick Craig-Wood
3e125443aa build: fix ARM architecture version in .deb packages after nfpm change
Fixes #5973
2022-02-03 11:24:06 +00:00
Nick Craig-Wood
3c271b8b1e Add Eng Zer Jun to contributors 2022-02-03 11:24:06 +00:00
Nick Craig-Wood
6d92ba2c6c Add viveknathani to contributors 2022-02-03 11:24:06 +00:00
albertony
c26dc69e1b docs/jottacloud: add note that mime types are not available with --fast-list 2022-02-02 13:12:50 +01:00
albertony
b0de0b4609 docs: include all commands in online help top menu drop-down 2022-02-01 20:40:50 +01:00
albertony
f54641511a librclone: add support for mount commands
Fixes #5661
2022-02-01 19:29:36 +01:00
Eng Zer Jun
8cf76f5e11 test: use T.TempDir to create temporary test directory
The directory created by `T.TempDir` is automatically removed when the
test and all its subtests complete.

Reference: https://pkg.go.dev/testing#T.TempDir
Signed-off-by: Eng Zer Jun <engzerjun@gmail.com>
2022-02-01 11:47:04 +00:00
viveknathani
18c24014da docs/content: describe mandatory fields for drive
Making a client-id for Google Drive requires you to add two more fields
besides the already documented "Application name" field. This commit
documents what should be written for those two fields.

Fixes #5967
2022-02-01 11:42:12 +00:00
Nick Craig-Wood
0ae39bda8d docs: fix and reword --update docs
After discussion on the forum with @bandwidth, this rewords the
--update docs to be correct and easier to understand.

See: https://forum.rclone.org/t/help-understanding-update/28937
2022-02-01 11:07:51 +00:00
Nick Craig-Wood
051685baa1 s3: fix multipart upload with --no-head flag - Fixes #5956
Before this change a multipart upload with the --no-head flag returned
the MD5SUM as a base64 string rather than a Hex string as the rest of
rclone was expecting.
2022-01-29 12:48:51 +00:00
albertony
07f53aebdc touch: fix issue where directory is created instead of file
Detected on ftp, sftp and Dropbox backends.

Fixes #5952
2022-01-28 20:29:12 +01:00
albertony
bd6d36b3f6 docs: improve standard list of properties for options 2022-01-28 19:43:51 +01:00
Nick Craig-Wood
b168479429 gcs: add missing regions - fixes #5955 2022-01-28 12:34:13 +00:00
Nick Craig-Wood
b447b0cd78 build: upgrade actions runner macos-11 to fix macOS build problems #5951 2022-01-27 17:33:04 +00:00
Nick Craig-Wood
4bd2386632 build: don't specify macos SDK any more as default is good enough #5951
This fixes the build, in particular the error:

    Failed to run ["xcrun" "--sdk" "macosx11.1" "--show-sdk-path"]: exit status 1
2022-01-27 17:33:04 +00:00
Nick Craig-Wood
83b6b62c1b build: disable cmount tests under macOS and the CI since they are locking up
This fixes #5951 and allows the macOS builds to run again

See #5960 for more info.
2022-01-27 17:33:04 +00:00
Nick Craig-Wood
5826cc9d9e Add Paulo Martins to contributors 2022-01-27 17:33:04 +00:00
Nick Craig-Wood
252432ae54 Add Gourav T to contributors 2022-01-27 17:33:04 +00:00
Nick Craig-Wood
8821629333 Add Isaac Levy to contributors 2022-01-27 17:33:04 +00:00
Nick Craig-Wood
a2092a8faf Add Vanessasaurus to contributors 2022-01-27 17:33:04 +00:00
Nick Craig-Wood
2b6f4241b4 Add Alain Nussbaumer to contributors 2022-01-27 17:33:04 +00:00
Nick Craig-Wood
e3dd16d490 Add Charlie Jiang to contributors 2022-01-27 17:33:04 +00:00
Nick Craig-Wood
9e1fd923f6 Add Yunhai Luo to contributors 2022-01-27 17:33:04 +00:00
Nick Craig-Wood
3684789858 Add Koopa to contributors 2022-01-27 17:33:04 +00:00
Nick Craig-Wood
1ac1dd428a Add Niels van de Weem to contributors 2022-01-27 17:33:04 +00:00
Nick Craig-Wood
65dbd29c22 Add Kim to contributors 2022-01-27 17:33:04 +00:00
albertony
164774d7e1 Add Shmz Ozggrn to contributors 2022-01-27 09:43:42 +01:00
Shmz Ozggrn
507020f408 docs: Use Adaptive Logo in README 2022-01-27 09:35:36 +01:00
albertony
a667e03fc9 http: improved recognition of url pointing to a single file - fixes #5929 2022-01-26 11:41:01 +01:00
albertony
1045344943 http: status string already includes the status code 2022-01-26 11:41:01 +01:00
albertony
5e469db420 docs/http: fix list layout in --http-no-head help
Existing help text ended with a list, but then auto-generated list items
Config, Env Var, Type and Default would be included in the same list.
2022-01-26 11:41:01 +01:00
albertony
946e84d194 http: use string contains instead of index 2022-01-26 11:41:01 +01:00
albertony
162aba60eb http: error strings should not be capitalized 2022-01-26 11:41:01 +01:00
albertony
d8a874c32b Make http tests line ending agnostic 2022-01-26 11:41:01 +01:00
albertony
9c451d9ac6 Fix linting errors 2022-01-26 00:02:17 +01:00
albertony
8f3f24672c docs/serve: move help for template option into separate section 2022-01-25 18:19:21 +01:00
Paulo Martins
0eb7b716d9 s3: document Content-MD5 workaround for object-lock enabled buckets - Fixes #5765 2022-01-25 16:10:57 +00:00
Gourav T
ee9684e60f fichier: implemented about functionality 2022-01-25 15:53:58 +00:00
negative0
e0cbe413e1 rc: Allow user to disable authentication for web gui 2022-01-25 15:52:30 +00:00
albertony
2523dd6220 version: report correct friendly-name for windows 10/11 versions after 2004
Until Windows 10 version 2004 (May 2020) this can be found from registry entry
ReleaseID, after that we must use entry DisplayVersion (ReleaseId is stuck at 2009).
Source: https://ss64.com/nt/ver.html
2022-01-24 21:27:42 +01:00
albertony
c504d97017 config: fix display of config choices with empty help text 2022-01-18 20:17:57 +01:00
albertony
b783f09fc6 config: show default and example values in correct input syntax instead of quoted and escaped golang string syntax
See #5551
2022-01-16 14:57:38 +01:00
albertony
a301478a13 config: improved punctuation in initial config prompt 2022-01-16 14:57:38 +01:00
albertony
63b450a2a5 config: minor improvement of help text for encoding option
See #5551
2022-01-16 14:57:38 +01:00
albertony
843b77aaaa docs/ftp: improved default value description of port and username options
See #5551
2022-01-16 14:57:38 +01:00
albertony
3641727edb config: fix issue where required password options had to be re-entered when editing existing remote
See #5551
2022-01-16 14:57:38 +01:00
albertony
38e2f835ed config: fix handling of default, exclusive and required properties of multiple-choice options
Previously an empty input (just pressing enter) was only allowed for multiple-choice
options that did not have the Exclusive property set. With this change the existing
Required property is introduced into the multiple choice handling, so that one can have
Exclusive and Required options where only a value from the list is allowed, and one can
have Exclusive but not Required options where an empty value is accepted but any
non-empty value must still be matching an item from the list.

Fixes #5549

See #5551
2022-01-16 14:57:38 +01:00
albertony
bd4bbed592 config: remove explicit setting of required property to true for options with a default value
See #5551
2022-01-16 14:57:38 +01:00
albertony
994b501188 config: remove explicit setting of required property to its default value false
See #5551
2022-01-16 14:57:38 +01:00
albertony
dfa9381814 docs/jottacloud: correct reference to temp-dir 2022-01-16 14:34:15 +01:00
albertony
2a85feda4b docs/jottacloud: add note about upload only being supported on jotta device 2022-01-16 14:34:15 +01:00
albertony
ad46af9168 docs/librclone: note that adding -ldflags -s to the build command will reduce size of library file 2022-01-16 14:32:01 +01:00
albertony
2fed02211c docs/librclone: document use from C/C++ on Windows 2022-01-16 14:11:56 +01:00
albertony
237daa8aaf dedupe: add quit as a choice in interactive mode
Fixes #5881
2022-01-14 19:57:48 +01:00
albertony
8aeca6c033 docs: align menu items when icons have different sizes 2022-01-14 17:39:27 +00:00
albertony
fd82876086 librclone: allow empty string or null input instead of empty json object 2022-01-14 17:37:13 +00:00
Isaac Levy
be1a668e95 onedrive: minor optimization of quickxorhash
This patch avoids creating a new slice header in favour of a for loop.

This saves a few instructions!
2022-01-14 17:30:56 +00:00
Vanessasaurus
9d4eab32d8 cmd: fix broken example link in help.go
This link appears to be broken, so here is another reference to (I think) the same file that provides a good example of coba. We could also do the current commit 8312004f41/cli/cobra.go although it might be better to maintain an up to date example.
2022-01-13 16:26:19 +00:00
Alain Nussbaumer
b4ba7b69b8 dlna: change icons to the newest ones. 2022-01-13 16:23:24 +00:00
albertony
deef659aef Add Bumsu Hyeon to contributors 2022-01-13 13:25:20 +01:00
Bumsu Hyeon
4b99e84242 vfs/cache: fix handling of special characters in file names (#5875) 2022-01-13 13:23:25 +01:00
albertony
06bdf7c64c Add Lu Wang to contributors 2022-01-12 21:33:35 +01:00
Lu Wang
e1225b5729 docs/s3: fixed max-age example 2022-01-12 21:31:54 +01:00
albertony
871cc2f62d docs: fix links to rc sections 2022-01-12 19:51:26 +01:00
Charlie Jiang
bc23bf11db onedrive: add config option for oauth scope Sites.Read.All (#5883) 2022-01-10 21:28:19 +08:00
albertony
b55575e622 docs: fix typo 2022-01-03 18:46:40 +01:00
albertony
328f0e7135 docs: fix links to rc debug commands 2021-12-30 21:52:34 +01:00
albertony
a52814eed9 docs: fix links to rc data types section 2021-12-30 20:46:39 +01:00
albertony
071a9e882d docs: capitalization of flag usage strings 2021-12-30 14:07:24 +01:00
albertony
4e2ca3330c tree: remove obsolete --human replaced by global --human-readable - fixes #5868 2021-12-21 20:17:00 +01:00
Yunhai Luo
408d9f3e7a s3: Add GLACIER_IR storage class 2021-12-03 14:46:45 +00:00
Koopa
0681a5c86a lib/rest: process HTML entities within XML
MEGAcmd currently includes escaped HTML4 entites in its XML messages.
This behavior deviates from the XML standard, but currently it prevents
rclone from being able to use the remote.
2021-12-01 16:31:43 +00:00
Niels van de Weem
df09c3f555 pcloud: add support for recursive list 2021-12-01 15:58:44 +00:00
Kim
c41814fd2d backend:jottacloud change api used by ListR ( --fast-list ) 2021-12-01 14:21:37 +01:00
Nick Craig-Wood
c2557cc432 azureblob: fix crash with SAS URL and no container - fixes #5820
Before this change attempting NewObject on a SAS URL's root would
crash the Azure SDK.

This change detects that using the code from this previous fix

f7404f52e7 azureblob: fix crash when listing outside a SAS URL's root - fixes #4851

And returns not object not found instead.

It also prevents things being uploaded to the root of the SAS URL
which also crashes the Azure SDK.
2021-11-27 16:18:18 +00:00
Nick Craig-Wood
3425726c50 oauthutil: fix crash when webrowser requests /robots.txt - fixes #5836
Before this change the oauth webserver would crash if it received a
request to /robots.txt.

This patch makes it ignore (with 404 error) any paths it isn't
expecting.
2021-11-25 12:12:14 +00:00
Nick Craig-Wood
46175a22d8 Add Logeshwaran Murugesan to contributors 2021-11-25 12:11:47 +00:00
Logeshwaran Murugesan
bcf0e15ad7 Simplify content length processing in s3 with download url 2021-11-25 12:03:14 +00:00
Nick Craig-Wood
b91c349cd5 local: fix hash invalidation which caused errors with local crypt mount
Before this fix if a file was updated, but to the same length and
timestamp then the local backend would return the wrong (cached)
hashes for the object.

This happens regularly on a crypted local disk mount when the VFS
thinks files have been changed but actually their contents are
identical to that written previously. This is because when files are
uploaded their nonce changes so the contents of the file changes but
the timestamp and size remain the same because the file didn't
actually change.

This causes errors like this:

    ERROR: file: Failed to copy: corrupted on transfer: md5 crypted
    hash differ "X" vs "Y"

This turned out to be because the local backend wasn't clearing its
cache of hashes when the file was updated.

This fix clears the hash cache for Update and Remove.

It also puts a src and destination in the crypt message to make future
debugging easier.

Fixes #4031
2021-11-24 12:09:34 +00:00
Nick Craig-Wood
d252816706 vfs: add vfs/stats remote control to show statistics - fixes #5816 2021-11-23 18:00:21 +00:00
Nick Craig-Wood
729117af68 Add GGG KILLER to contributors 2021-11-23 18:00:21 +00:00
GGG KILLER
cd4d8d55ec docs: add a note about the B2 download_url format
Currently the B2 docs don't specify which format the download_url
setting should have, and if you input it wrong, there is nothing
in the verbose logs or anywhere else that can let you know that.
2021-11-23 17:57:34 +00:00
Nick Craig-Wood
f26abc89a6 union: fix treatment of remotes with // in
See: https://forum.rclone.org/t/connection-string-with-union-backend-and-a-lot-of-quotes/27577
2021-11-23 17:41:12 +00:00
lindwurm
b5abbe819f s3: Add Wasabi AP Northeast 2 endpoint info
* Wasabi starts to provide AP Northeast 2 (Osaka) endpoint, so add it to the list
* Rename ap-northeast-1 as "AP Northeast 1 (Tokyo)" from "AP Northeast"

Signed-off-by: lindwurm <lindwurm.q@gmail.com>
2021-11-22 18:02:57 +00:00
Nick Craig-Wood
a351484997 sftp: fix timeout on hashing large files by sending keepalives
Before this fix the SFTP sessions could timeout when doing hashes if
they took longer than the --timeout parameter.

This patch sends keepalive packets every minute while a shell command
is running to keep the connection open.

See: https://forum.rclone.org/t/rclone-check-over-sftp-failure-to-calculate-md5-hash-for-large-files/27487
2021-11-22 15:26:29 +00:00
Nick Craig-Wood
099eff8891 sftp: refactor so we only have one way of running remote commands
This also returns errors from running ssh Hash commands which we
didn't do before.
2021-11-22 15:26:29 +00:00
albertony
c4cb167d4a Add rsapkf and Will Holtz to contributors 2021-11-21 19:26:05 +01:00
Will Holtz
38e100ab19 docs/config: more explicit doc for config create --all with params 2021-11-21 19:22:19 +01:00
rsapkf
db95a0d6c3 docs/pcloud: fix typo 2021-11-21 19:16:19 +01:00
Nick Craig-Wood
df07964db3 azureblob: raise --azureblob-upload-concurrency to 16 by default
After speed testing it was discovered that upload speed goes up pretty
much linearly with upload concurrency. This patch changes the default
from 4 to 16 which means that rclone will use 16 * 4M = 64M per
transfer which is OK even for low memory devices.

This adds a note that performance may be increased by increasing
upload concurrency.

See: https://forum.rclone.org/t/performance-of-rclone-vs-azcopy/27437/9
2021-11-18 16:09:02 +00:00
Nick Craig-Wood
fbc4c4ad9a azureblob: remove 100MB upper limit on chunk_size as it is no longer needed 2021-11-18 16:09:02 +00:00
Nick Craig-Wood
4454b3e1ae azureblob: implement --azureblob-upload-concurrency parameter to speed uploads
See: https://forum.rclone.org/t/performance-of-rclone-vs-azcopy/27437
2021-11-18 16:08:57 +00:00
Nick Craig-Wood
f9321fccbb Add deinferno to contributors 2021-11-18 15:51:45 +00:00
Ole Frost
3c2252b7c0 fs/operations: add server-side moves to stats
Fixes #5430
2021-11-18 12:20:56 +00:00
Cnly
51c952654c fstests: treat accountUpgradeRequired as success for OneDrive PublicLink 2021-11-17 17:35:17 +00:00
deinferno
80e47be65f yandex: add permanent deletion support 2021-11-17 16:57:41 +00:00
Michał Matczuk
38dc3e93ee fshttp: add prometheus metrics for HTTP status code
This patch adds rclone_http_status_code counter vector labeled by

* host,
* method,
* code.

It allows to see HTTP errors, backoffs etc.

The Metrics struct is designed for extensibility.
Adding new metrics is a matter of adding them to Metrics struct and including them in the response handling.

This feature has been discussed in the forum [1].

[1] https://forum.rclone.org/t/prometheus-metrics/14484
2021-11-17 18:38:12 +03:00
Nick Craig-Wood
ba6730720d Fix repeated error messages after pkg/errors removal 2021-11-15 17:58:40 +00:00
Nick Craig-Wood
7735b5c694 Add Sinan Tan to contributors 2021-11-15 17:58:40 +00:00
Nick Craig-Wood
d45b3479ee Add Andy Jackson to contributors 2021-11-15 17:58:40 +00:00
Nick Craig-Wood
4c5df0a765 Add Fredric Arklid to contributors 2021-11-15 17:58:40 +00:00
Sinan Tan
8c61a09be2 crypt: add test cases and documentation for base64 and base32768 filename encoding #5801 2021-11-15 17:57:02 +00:00
Max Sum
c217145cae crypt: add base64 and base32768 filename encoding options #5801 2021-11-15 17:57:02 +00:00
thomae
4c93378f0e serve sftp: update docs on --stdio 2021-11-12 10:49:35 +00:00
thomae
f9e54f96c3 docs/sftp: fix typo 2021-11-11 19:20:15 +01:00
Andy Jackson
af0fcd03cb hdfs: add file and directory move/rename support 2021-11-11 16:41:43 +00:00
albertony
00aafc957e sftp: add rclone to list of supported md5sum/sha1sum commands to look for
See #5781
2021-11-11 15:16:45 +01:00
albertony
29abbd2032 hashsum: support creating hash from data received on stdin
See #5781
2021-11-11 15:16:45 +01:00
Fredric Arklid
663b2d9c46 jottacloud: Add support for Tele2 Cloud 2021-11-11 12:32:23 +00:00
Nick Craig-Wood
f36d6d01b5 rc: fix operations/publiclink default for expires parameter
Before this change the expires parameter was defaulting to 0 if not
provided.

This change makes it default to fs.DurationOff which is the same as
the `rclone link` command.

See: https://forum.rclone.org/t/operations-publiclink-from-dropbox-error-not-autorized/27374
2021-11-11 11:14:22 +00:00
Nick Craig-Wood
0c03aa3a8b dropbox: speed up directory listings by specifying 1000 items in a chunk 2021-11-11 11:14:22 +00:00
Nick Craig-Wood
caa2b8bf40 dropbox: save an API request when at the root
Before this change, rclone always emitted an API request to discover
what type of thing the root is.

This is unecessary as it is always a directory.
2021-11-11 11:14:22 +00:00
Nick Craig-Wood
421e840e37 Add Borna Butkovic to contributors 2021-11-11 11:14:22 +00:00
Nick Craig-Wood
9b57d27be4 Add David to contributors 2021-11-11 11:14:22 +00:00
Borna Butkovic
627ac1b2d9 ftp: add --ftp-ask-password to prompt for password when needed 2021-11-10 17:34:14 +00:00
David
ae395d8cf0 rc: error on web GUI update won't be fatal - fixes #5385 2021-11-10 17:05:13 +00:00
Ankur Gupta
f04520a6e3 operations: fix goroutine leak in case of copy retry
Whenever transfer.Account() is called, a new goroutine acc.averageLoop()
is started. This goroutine exits only when the channel acc.exit is closed.
acc.exit is closed when acc.Done() is called, which happens during tr.Done().

However, if tr.Reset is called during a copy low level retry, it replaces
the tr.acc, without calling acc.Done(), which results in the goroutine
mentioned above never exiting.

This commit calls acc.Done() during a tr.Reset()
2021-11-10 16:44:29 +00:00
Nick Craig-Wood
c968c3e41c build: raise minimum go version to go1.15
This was necessary because go1.14 seems to have a modules related bug
which means it tries to build modules even though the uses of them are
all disabled with build constraints. This seems to be fixed in go1.15.
2021-11-10 16:11:12 +00:00
Nick Craig-Wood
3661791e82 serve restic: disable for go1.16 and earlier after update 2021-11-10 15:42:50 +00:00
Nick Craig-Wood
4198763c35 build: update all dependencies 2021-11-10 10:34:56 +00:00
Nick Craig-Wood
3de47b8ed4 build: upgrade go.mod file to go1.17 2021-11-10 10:34:56 +00:00
Nick Craig-Wood
71b8e1e80b build: more docs on upgrading dependencies 2021-11-10 10:34:56 +00:00
Nick Craig-Wood
7366e97dfc mega: fix error handling broken by removal of github.com/pkg/errors
There were instances of errors.Wrap being called with a nil error
which the conversion didn't deal with correctly.
2021-11-09 13:43:45 +00:00
Nick Craig-Wood
21ba4d9a18 onedrive: fix error handling broken by removal of github.com/pkg/errors
There were instances of errors.Wrap being called with a nil error
which the conversion didn't deal with correctly.
2021-11-09 13:43:45 +00:00
Nick Craig-Wood
96e099d8e7 union: fix error handling broken by removal of github.com/pkg/errors
There were instances of errors.Wrap being called with a nil error
which the conversion didn't deal with correctly.
2021-11-09 13:43:45 +00:00
Nick Craig-Wood
2a31b5bdd6 Add bbabich to contributors 2021-11-09 13:43:45 +00:00
Nick Craig-Wood
9bdfe4c36f Add Vitor Arruda to contributors 2021-11-09 13:43:45 +00:00
Nick Craig-Wood
e3a2f539fe Add Chris Lu to contributors 2021-11-09 13:43:45 +00:00
Nick Craig-Wood
ffa943e31f Add Carlo Mion to contributors 2021-11-09 13:43:45 +00:00
bbabich
b16f603c51 s3: Add RackCorp object storage to providers 2021-11-09 11:46:58 +00:00
database64128
a7a8372976 🧪 fstest: fix time tests on Windows and add convenience methods to check local and remote fs with precision
Previously only the fs being checked on gets passed to
GetModifyWindow(). However, in most tests, the test files are
generated in the local fs and transferred to the remote fs. So the
local fs time precision has to be taken into account.

This meant that on Windows the time tests failed because the
local fs has a time precision of 100ns. Checking remote items uploaded
from local fs on Windows also requires a modify window of 100ns.
2021-11-09 11:43:36 +00:00
Vitor Arruda
9beb0677e4 backend: Fix union eplus policy returned nil 2021-11-08 11:55:27 +00:00
Nick Craig-Wood
e43b5ce5e5 Remove github.com/pkg/errors and replace with std library version
This is possible now that we no longer support go1.12 and brings
rclone into line with standard practices in the Go world.

This also removes errors.New and errors.Errorf from lib/errors and
prefers the stdlib errors package over lib/errors.
2021-11-07 11:53:30 +00:00
Chris Lu
97328e5755 Improve description for SeaweedFS 2021-11-06 21:01:50 +03:00
Carlo Mion
7b7d780fff stats: fix missing StatsInfo fields in the computation of the group sum 2021-11-05 15:33:00 +00:00
Carlo Mion
c2600f9e4d stats: fix missing computation of transferQueueSize when summing up statistics group - fixes #5749 2021-11-05 15:33:00 +00:00
Ivan Andreev
7bd853ce35 Add Roberto Ricci to contributors 2021-11-05 18:29:47 +03:00
Roberto Ricci
05150cfb1d backend/ftp: increase testUploadTimeout.maxTime to 10 seconds
On slow machines (e.g. Github CI), especially if GOARCH=386,
the test for cmd/serve/ftp could fail if this value is too small.

Fixes #5783
2021-11-05 18:27:44 +03:00
albertony
25366268fe Add Atílio Antônio to contributors 2021-11-04 12:55:49 +01:00
Atílio Antônio
c08d48a50d docs: improve grammar and fix typos (#5361)
This alters some comments in source files, but is interested mainly in documentation files and help messages.
2021-11-04 12:50:43 +01:00
Nick Craig-Wood
454574e2cc s3: collect the provider quirks into a single function and update
This removes the checks against the provider throughout the code and
puts them into a single setQuirks function for easy maintenance when
adding a new provider.

It also updates the quirks with the results of testing against
backends we have access to.

This also adds a list_url_encode parameter so that quirk can be
manually set.
2021-11-03 21:44:09 +00:00
Nick Craig-Wood
9218a3eb00 fs: add a tristate true/false/unset configuration value 2021-11-03 21:44:09 +00:00
Nick Craig-Wood
1e4ef4b4d5 Add Felix Bünemann to contributors 2021-11-03 21:44:09 +00:00
Nick Craig-Wood
8d92f7d697 s3: fallback to ListObject v1 on unsupported providers
This implements a quirks system for providers and notes which
providers we have tested to support ListObjectsV2.

For those providers which don't support ListObjectsV2 we use the
original ListObjects call.
2021-11-03 19:13:50 +00:00
Felix Bünemann
fd56abc5f2 s3: Use ListObjectsV2 for faster listings
Using ListObjectsV2 with a continuation token is about 5-6x faster than
ListObjectsV2 with a marker.
2021-11-03 19:13:50 +00:00
Ivan Andreev
b323bf34e2 sync/test: skip test ConcurrentTruncate on uptobox (take 2)
The test is not applicable to uptobox which can't upload empty files.
The test was not skipped as intended because the direct error was compared.
This fix will compare error Cause because Sync wraps the error.
2021-11-02 19:24:23 +03:00
Ivan Andreev
e78e73eae7 lib/encoder: fix benchmarks
Some day in the past the Slash encode option was added to Onedrive
encoder so it began to encode slashes in file names rather then treat
them as path separators.
This patch adapts benchmark test cases accordingly.

Fixes #5659
2021-11-02 19:23:16 +03:00
Nick Craig-Wood
f51a5eca2e fstests: add encoding test for URL encoded path name #5768
Add an encoding test to make sure backends can deal with a URL encoded
path name. This is a fairly common failing in backends and has been an
intermittent problem with onedrive itself.
2021-11-02 15:59:36 +00:00
albertony
39e2af7974 config: allow dot in remote names (#5606) 2021-11-01 20:50:06 +01:00
Ivan Andreev
b3217adf08 Add Chris Nelson to contributors 2021-11-01 21:24:06 +03:00
Ivan Andreev
074234119a bisync: documentation #5164
Co-authored-by: Chris Nelson <stuff@cjnaz.com>
2021-11-01 21:00:27 +03:00
Ivan Andreev
6210e22ab5 bisync: implementation #5164
Fixes #118

Co-authored-by: Chris Nelson <stuff@cjnaz.com>
2021-11-01 21:00:27 +03:00
Ivan Andreev
940e99a929 bisync: test scenarios #5164
Co-authored-by: Chris Nelson <stuff@cjnaz.com>
2021-11-01 21:00:27 +03:00
Ivan Andreev
79b6866b57 rc: export NewErrParamInvalid #5164 2021-11-01 21:00:27 +03:00
Ivan Andreev
c142e3edcc filter: export GlobToRegexp #5164 2021-11-01 21:00:27 +03:00
Nick Craig-Wood
5c646dff9a Start v1.58.0-DEV development 2021-11-01 16:54:17 +00:00
Nick Craig-Wood
19dfaf7440 docs: fix shortcode rendering on download page 2021-11-01 16:50:52 +00:00
Nick Craig-Wood
169990e270 Version v1.57.0 2021-11-01 15:45:40 +00:00
Nick Craig-Wood
e781bcbba1 Add David Liu to contributors 2021-11-01 15:45:40 +00:00
Nick Craig-Wood
409916b0c5 Add trevyn to contributors 2021-11-01 15:45:40 +00:00
albertony
d9c5be32e7 Add thomae to contributors 2021-11-01 15:10:14 +01:00
thomae
57074be9b3 serve sftp: fix typo 2021-11-01 15:07:13 +01:00
Joda Stößer
bae5c7c81b docs: remove application type "other" from drive.md
The application type "other" is not an option anymore.
2021-11-01 13:15:31 +00:00
albertony
05321f4aef docs/sftp: more detailed explanation of pubkey file and certificate 2021-11-01 13:14:17 +00:00
albertony
c9d7248d85 docs/jottacloud: update description of whitelabel services 2021-11-01 12:57:51 +00:00
albertony
da8f9be84b docs: describe the --human-readable option in more detail 2021-11-01 12:55:52 +00:00
David Liu
b806166147 docs: swift: Update OCI url
Oracle cloud storage now rebranded also as OCI bucket with new entry point
2021-11-01 12:54:23 +00:00
acsfer
20f936c9d4 Add note about S3 compatible services 2021-11-01 12:47:18 +00:00
albertony
91cdaffcc1 docs: add faq section explaining why rclone changes fullwidth characters in file names 2021-11-01 12:46:23 +00:00
trevyn
33bf9b4923 Add mention of Rust bindings for librclone 2021-11-01 12:43:31 +00:00
albertony
b4944f4520 docs/librclone: document that strings are utf8 encoded 2021-11-01 12:39:00 +00:00
albertony
286b152e7b librclone: free strings in python example 2021-11-01 12:36:52 +00:00
Nick Craig-Wood
f7764a0c9d premiumizeme: fix server side directory move after API changes
Apparently moving a directory using the id "0" as the root no longer
works, so this reads the real root ID when it is listed and uses that.

This fixes the DirMove problem.

See: https://forum.rclone.org/t/premiumize-cant-move-files/27169
See: #5734
2021-10-31 19:18:55 +00:00
Nick Craig-Wood
07fcba888c dircache: add SetRootIDAlias to update RootID from FindLeaf 2021-10-31 19:18:55 +00:00
Nick Craig-Wood
af705c754c premiumizeme: fix server side move after API change
See: https://forum.rclone.org/t/premiumize-cant-move-files/27169
See: #5734
2021-10-31 19:18:55 +00:00
Nick Craig-Wood
f85e3209b3 premiumizeme: fix directory listing after API changes
The API doesn't seem to accept a value of "0" any more for the root
directory ID, giving the error "Could not decode folder id".

However omitting it seems to work fine.
2021-10-31 19:18:55 +00:00
Nick Craig-Wood
e77dcb7f52 test_all: remove stray debug 2021-10-31 19:18:55 +00:00
Nick Craig-Wood
4ab842198a Revert "premiumizeme: attempt to fix integration tests"
This reverts commit 1eff0eef7a.

Now that the test account is premium again it is very fast and this is
no longer needed.
2021-10-31 19:18:54 +00:00
albertony
a8059b8a90 docs/mount: add note that to execute programs one must set custom filesystem permissions (#5771) 2021-10-31 00:48:56 +02:00
Nick Craig-Wood
cf2c2792e6 s3: fix corrupted on transfer: sizes differ 0 vs xxxx with Ceph
In this commit, released in 1.56.0 we started reading the size of the
object from the Content-Length header as returned by the GET request
to read the object.

4401d180aa s3: add --s3-no-head-object

However some object storage systems, notably Ceph, don't return a
Content-Length header.

The new code correctly calls the setMetaData function with a nil
pointer to the ContentLength.

However due to this commit from 2014, released in v1.18, the
setMetaData function was not ignoring the size as it should have done.

0da6f24221  s3: use official github.com/aws/aws-sdk-go including multipart upload #101

This commit correctly ignores the content length if not set.

Fixes #5732
2021-10-30 12:01:09 +01:00
Nick Craig-Wood
e6e1c49b58 s3: fix shared_credentials_file auth after reverting incorrect fix #5762
Before this change the `shared_credentials_file` config option was
being ignored.

The correct value is passed into the SDK but it only sets the
credentials in the default provider. Unfortunately we wipe the default
provider in order to install our own chain if env_auth is true.

This patch restores the shared credentials file in the session
options, exactly the same as how we restore the profile.

Original fix:

1605f9e14d s3: Fix shared_credentials_file auth
2021-10-30 11:54:17 +01:00
Nick Craig-Wood
712f9c9760 s3: fix IAM Role for Service Account not working and other auth problems
This patch reverts this commit

1605f9e14d s3: Fix shared_credentials_file auth

It unfortunately had the side effect of making the s3 SDK ignore the
config in our custom chain and use the default provider. This means
that advanced auth was being ignored such as --s3-profile with
role_arn.

Fixes #5468
Fixes #5762
2021-10-30 11:54:17 +01:00
albertony
a238877ad8 docs: note that destination is always a directory (#5766) 2021-10-30 00:30:00 +02:00
Ivan Andreev
70297c3aed sync/test: TestConcurrentTruncate needs empty files - skip on uptobox 2021-10-28 17:04:56 +03:00
Nolan Woods
a074a2b983 lib/http: Fix handling of ssl credentials
Adds a test that makes an actual http and https request against the server
2021-10-27 14:46:10 +03:00
Nick Craig-Wood
00ceeef21c hdfs: wait longer for the server to start up in the integration tests #5734
This needs fixing properly so rclone knows when the server has started
properly.
2021-10-23 22:53:17 +01:00
Nick Craig-Wood
2e81b78486 Add Dmitry Bogatov to contributors 2021-10-23 22:53:17 +01:00
Dmitry Bogatov
bb11803f1f Create direct share link for "koofr" backend
Instead of creating link to web interface, create direct link usable by
curl(1) or wget(1).
2021-10-23 15:00:33 +03:00
Nick Craig-Wood
a542ddf60a hdfs: attempt to make integration tests more reliable #5734
This makes sure the namenode is accepting TCP connections before
starting the integration tests in an attempt to make them more
reliable.
2021-10-22 13:07:48 +01:00
Nick Craig-Wood
257f5d279a filefabric: fix directory move after API change #5734
The API has changed in the directory move call JSON response from
returning a TaskID as a string to returning it as an integer. In other
places it is still returned as a string though.

This patch allows the TaskID to be an integer or a string in the JSON
response and keeps it internally as a string like before.
2021-10-22 12:58:00 +01:00
albertony
4f05ece39e test: fix touchdir test on backends without modtime support 2021-10-22 13:37:34 +02:00
albertony
9c8c0a58b5 touch: fix recursive touch due to recently introduced error ErrorIsDir 2021-10-22 13:37:34 +02:00
albertony
a70c20fe6b touch: improve error message from recursive touch 2021-10-22 13:37:34 +02:00
Ivan Andreev
59e77f794e serve/docker: skip race test until we find a solution for deadlock
Related to #5738
2021-10-22 14:00:48 +03:00
Ivan Andreev
1a66736ef0 Add Thomas Stachl to contributors 2021-10-21 15:23:48 +03:00
Ivan Andreev
844025d053 ftp: add support for precise time #5655 2021-10-21 14:50:53 +03:00
albertony
3a03f2778c test: ignore integration test TestCopyFileMaxTransfer on Google Drive
The test fails because it expects a copy with MaxTransfer and CutoffModeHard should
return fatal error, because this is thrown from accounting (ErrorMaxTransferLimitReachedFatal),
but in case of Google Drive the external google API catches and replaces it with a
non-fatal error:

pw.CloseWithError(fmt.Errorf("googleapi: Copy failed: %v", err))

(7290f25351/internal/gensupport/media.go (L140))
2021-10-21 12:42:25 +01:00
Ivan Andreev
29c6c86c00 ftp: fix timeout after long uploads #5596 2021-10-21 14:18:23 +03:00
Ivan Andreev
a32fde09ca fs/http: declutter code #5596 2021-10-21 14:18:23 +03:00
Ivan Andreev
1d50336615 ftp: replace jlaffaye/ftp by rclone/ftp in the build #5596 2021-10-21 14:18:23 +03:00
Thomas Stachl
015b250905 serve/docker: build docker plugin for multiple platforms #5668
Fixes #5462

Co-authored-by: Ivan Andreev <ivandeex@gmail.com>
2021-10-21 13:01:23 +03:00
Nick Craig-Wood
4b1ada2d51 filefabric: allow integration tests double time as they keep timing out #5734 2021-10-21 09:54:29 +01:00
albertony
f589dbc077 docs: don't show fictional example values for options as multiple choice items
See #5538
2021-10-20 22:56:19 +02:00
albertony
8efead1ee6 docs: update guide for contributing documentation
See #5538
2021-10-20 22:56:19 +02:00
albertony
9a17b32b5d docs: automatically remove ending punctuation from first line of backend option help string when used for flag usage
See #5538
2021-10-20 22:56:19 +02:00
albertony
8b65c55711 cmd/config: improve option prompt
See #5538
2021-10-20 22:56:19 +02:00
albertony
e2f47ecdeb docs: punctuation cleanup
See #5538
2021-10-20 22:56:19 +02:00
albertony
b868561951 jottacloud: return direct download link from rclone link command
If the shared path is a directory, then the download will be a zip archive.

Fixes #5391

See #5399
2021-10-20 19:54:29 +02:00
albertony
78db3dba0e jottacloud: add support for UserInfo feature
See #5399
2021-10-20 19:54:29 +02:00
albertony
41876dd669 touch: add support for touching files in directory, with options for recursive, filtering and dry-run/interactive
Fixes #5301
2021-10-20 19:24:57 +02:00
Nick Craig-Wood
2e72ec96c1 qingstor: ignore harmless failing integration test #5734
The test TestIntegration/FsMkdir/FsPutFiles/FromRoot/ListR fails in
the integration test because there is a broken bucket in the test
account which support haven't been able to remove.
2021-10-20 17:51:06 +01:00
Nick Craig-Wood
9742648fce fichier: allow more list retries for the integration tests #5734 2021-10-20 17:45:54 +01:00
Nick Craig-Wood
d73264572b putio: allow integration tests double time as they keep timing out #5734 2021-10-20 17:38:30 +01:00
Nick Craig-Wood
ff801e8e17 test_all: allow configuring a multiplier for the timeout #5734 2021-10-20 17:38:30 +01:00
Nick Craig-Wood
72c013c6f4 vfs: increase time to wait for writers in tests to 30s
In some backends (eg putio) this deadline was consistently missed at
10s so this patch increases it to 30s.

See: #5734
2021-10-20 17:38:30 +01:00
Nick Craig-Wood
1eff0eef7a premiumizeme: attempt to fix integration tests
This tries to fix the integration tests by only allowing one
premiumizeme test to run at once, in the hope it will stop rclone
hitting the rate limits and breaking the tests.

See: #5734
2021-10-20 17:38:30 +01:00
Nick Craig-Wood
5a5318720a onedrive: stop public link test complaining on non-business account
The TestIntegration/FsMkdir/FsPutFiles/PublicLink test doesn't work on
a standard onedrive account, it returns

    accessDenied: accountUpgradeRequired: Account Upgrade is required for this operation.

See: #5734
2021-10-20 17:38:30 +01:00
Nick Craig-Wood
e253b44882 dropbox: stop public link test complaining on non-enterprise account
The TestIntegration/FsMkdir/FsPutFiles/PublicLink test doesn't work on
a standard dropbox account, only on an enterprise account because it
sets expiry dates.

See: #5734
2021-10-20 17:38:30 +01:00
Ivan Andreev
0d7426a2dd hasher: backend documentation #5587 2021-10-20 19:11:54 +03:00
Ivan Andreev
f102ef2161 hasher: add hasher backend #5587 2021-10-20 19:11:54 +03:00
Ivan Andreev
57c7fde864 lib/kv: add unit tests, fix races #5587
After testing concurrent calling of `kv.Start` and `db.Stop` I had to restrict
more parts of these under mutex to make results deterministic without Sleep's
in the test body. It's more safe but has potential to lock Start for up to
2 seconds due to `db.open`.
2021-10-20 19:11:54 +03:00
Ivan Andreev
50df8cec9c lib/kv: add key-value database api #5587
Add bolt-based key-value database support.

Quick API description:
https://github.com/rclone/rclone/pull/5587#issuecomment-942174768
2021-10-20 19:11:54 +03:00
Nick Craig-Wood
8cd3251b57 fstests: Relax MimeType support checking #5587
Before this change we checked that features.ReadMimeTime was set if
and only if the Object.MimeType method was implemented.

However this test is overly general - we don't care if Objects
advertise MimeType when features.ReadMimeTime is set provided that
they always return an empty string (which is what a wrapping backend
might do).

This patch implements that logic.
2021-10-20 19:11:54 +03:00
Nick Craig-Wood
cc2f6f722c filefabric: make backoff exponential for error_background to fix errors
Before this change the backoff for the error_background error was 6
seconds. This means that if it wasn't resolved in 60 seconds (with the
default 10 low level retries) then an error was reported.

This error was being reported frequently in the integration tests, so
is likely affecting real users too.

This patch changes the backoff into an exponential backoff
1,2,4,8...1024 seconds to make sure we wait long enough for the
background operation to complete.

See #5734
2021-10-20 15:41:09 +01:00
Ivan Andreev
6cda4c2c3c Add Ian Levesque to contributors 2021-10-19 23:02:51 +03:00
Ivan Andreev
023b666863 Add Filip Rysavy to contributors 2021-10-19 23:01:47 +03:00
Ivan Andreev
2a4c6ad0e7 Add Matthew Sevey to contributors 2021-10-19 23:00:16 +03:00
Ivan Andreev
6d02530f9d sia: finish documentation #4514
Also rename stuttering `--sia-sia-user-agent` to `--sia-user-agent`
2021-10-19 22:55:27 +03:00
Ivan Andreev
c5bc857f9b sia: fix and enable integration tests #4514
- setup correct path encoding (fixes backend test FsEncoding)
- ignore range option if file is empty (fixes VFS test TestFileReadAtZeroLength)
- cleanup stray files left after failed upload (fixes test FsPutError)
- rebase code on master, adapt backend for rclone context passing
- translate Siad errors to rclone native FS errors in sia errorHandler
- TestSia: return proper backend options from the script
- TestSia: use uptodate AntFarm image, nebulouslabs/siaantfarm is stale
2021-10-19 22:55:27 +03:00
Matthew Sevey
0d1e017e09 sia: setup docker with sia-antfarm for test #4514
Always pull the latest Sia Antfarm docker image
Add wait for Sia renter to become upload ready

Co-authored-by: Filip Rysavy <fil@siasky.net>
2021-10-19 22:55:27 +03:00
Ian Levesque
3351b1e6ae sia: add backend for sia decentralized cloud #4514 2021-10-19 22:55:27 +03:00
Fred
b085aa1a3f seafile: fix error when not configured for 2fa (#5665) 2021-10-19 20:53:35 +01:00
Nick Craig-Wood
eb0c8284f1 azureblob: fix incorrect size after --azureblob-no-head-object patch
In

05f128868f azureblob: add --azureblob-no-head-object

we incorrectly parsed the size of the object as the Content-Length of
the returned header. This is incorrect in the presense of Range
requests.

This fixes the problem by parsing the Content-Range header if
avaialble to read the correct length from if a Range request was
issued.

See: #5734
2021-10-19 20:12:17 +01:00
Nick Craig-Wood
f5c7c597ba s3: Use a combination of SDK retries and rclone retries - fixes #5509
This reverts commit

dc06973796 Revert "s3: use rclone's low level retries instead of AWS SDK to fix listing retries"

Which in turn reverted

5470d34740 "backend/s3: use low-level-retries as the number of SDK retries"

So we are back where we started.

It then modifies it to set the AWS SDK to `--low-level-retries`
retries, but set the rclone retries to 2 so that directory listings
can be retried.
2021-10-19 20:12:17 +01:00
Nick Craig-Wood
3cef84aabe Add r0kk3rz to contributors 2021-10-19 20:12:17 +01:00
Nick Craig-Wood
93afd5c346 Add Rajat Goel to contributors 2021-10-19 20:12:17 +01:00
Alex Chen
1c3c8babd3 docs: mention make for building and cmount tag for macos (#5487) 2021-10-19 12:18:06 +08:00
Ivan Andreev
690a7ac783 chunker: fix md5all test for no-meta test remotes 2021-10-18 18:04:07 +03:00
Ivan Andreev
bbcc9a45fe serve/docker: allow to customize proxy settings of docker plugin 2021-10-18 18:03:06 +03:00
albertony
16949fde09 Do not override mime types from os defaults
https://forum.rclone.org/t/rclone-serve-http-save-as/26672
2021-10-18 13:28:22 +01:00
r0kk3rz
8e4b87ae03 s3: Add AWS Snowball Edge to providers examples - fixes #5720 2021-10-18 12:52:59 +01:00
Rajat Goel
db6002952e dropbox: upgrade sdk version 2021-10-16 10:55:02 +01:00
Nick Craig-Wood
96e14bf456 sftp: fix initialization bug introduced by fs.ErrorIsDir return
3fbaa4c0b0 backends: make NewObject return fs.ErrorIsDir if possible
2021-10-16 10:38:24 +01:00
Nick Craig-Wood
54da6154c4 operations: fix lsjson --stat on root directories of bucket based backends 2021-10-16 10:38:24 +01:00
Ivan Andreev
f50537b64b ftp: add option to disable tls13 2021-10-15 20:24:37 +03:00
Ivan Andreev
f37b25a2df ftp: enable tls session cache by default 2021-10-15 19:54:25 +03:00
albertony
29b8c71522 build: force utf8 when updating backend docs from python script (#5721) 2021-10-15 18:51:57 +02:00
Nick Craig-Wood
7b66ca132d build: increase timeout for golangci-lint to 10 minutes 2021-10-15 15:58:52 +01:00
Nick Craig-Wood
9ce0df3242 dropbox: add --dropbox-batch-commit-timeout to control batch timeout
This also adds an Debug message log showing how long each batch took

See: #5491
2021-10-15 15:32:40 +01:00
Nick Craig-Wood
f4c5f1f185 box: retry operation_blocked_temporary errors #5545 2021-10-15 15:28:54 +01:00
Nick Craig-Wood
825f7826f5 box: add --box-owned-by to only show items owned by the login passed #5545 2021-10-15 15:28:54 +01:00
Nick Craig-Wood
34140b2f57 box: delete items in parallel in cleanup using --checkers threads #5545 2021-10-15 15:28:54 +01:00
Nick Craig-Wood
e18ae5da09 box: factor directory listing and cleanup listing into one function #5545 2021-10-15 15:28:54 +01:00
Nick Craig-Wood
b61912b4c8 box: add --box-list-chunk to control listing chunk size #5545 2021-10-15 15:28:54 +01:00
Nick Craig-Wood
bfecf5301b box: when doing cleanup delete as much as possible - fixes #5545
Before this change the cleanup routine exited on the first deletion
error.

This change counts any errors on deletion and exits when the iteration
is complete with an error showing the number of deletion failures.
Deletion failures will be logged.
2021-10-15 15:28:54 +01:00
Nick Craig-Wood
308323e9c4 box: make listings of heavily used directories more reliable #5545
Before this change we uses limit/offset paging for directories in the
main directory listing routine and in the trash cleanup listing.

This switches to the new scheme of limit/marker which is more reliable
on a directory which is continuously changing. It has the disadvantage
that it doesn't tell us the total number of items available, however
that wasn't information rclone uses.
2021-10-15 15:28:54 +01:00
Nick Craig-Wood
fc5d6c16b6 serve ftp: ensure modtime is passed as UTC always to fix timezone oddities
See: https://forum.rclone.org/t/ftp-server-reports-file-timestamps-in-utc/26274
2021-10-15 15:25:51 +01:00
Nick Craig-Wood
c821fbeddf drive: add -o config option to backend drives to config for all shared drives
See: https://forum.rclone.org/t/bulk-create-remotes-to-existing-google-shared-drives/26837/
2021-10-15 15:22:14 +01:00
Nick Craig-Wood
93d85015af sftp: fix timeout when doing MD5SUM of large file
Before this change we were timing out MD5SUMs after 1 minute because
rclone was closing the SSH session when there were sessions still
aftive.

This change counts sessions active for all SSH sessions now (Upload,
Download, Hashes and running commands).

See: https://forum.rclone.org/t/while-rclone-copying-large-files-md5sum-failed-with-exit-status/26845/
2021-10-15 15:19:22 +01:00
Nick Craig-Wood
a98e3ea6f1 build: replace the deprecated golint linter with revive
This fixes up a small number of new lint items also
2021-10-15 12:51:31 +01:00
Nick Craig-Wood
167406bc68 build: switch to using the golangci-lint action for better error reporting
The action reports errors to users in their pull requests which is
much easier to understand.
2021-10-15 12:50:22 +01:00
Nick Craig-Wood
036abde393 build: fix indentation in build.yml 2021-10-15 12:50:22 +01:00
Nick Craig-Wood
edf8978d15 operations: fix HashSum tests after removing ERROR and UNSUPPORTED
This was caused by

7a1cab57b6 cmd/hashsum: dont put ERROR or UNSUPPORTED in output

And was picked up in the integration tests.

This patch no longer calls the HashLister for unsupported hash types.
2021-10-15 10:51:08 +01:00
Nick Craig-Wood
f529c02446 lsjson: add --stat flag and operations/stat api
This enables information about single files to be efficiently
retrieved.
2021-10-14 17:15:50 +01:00
Nick Craig-Wood
3fbaa4c0b0 backends: make NewObject return fs.ErrorIsDir if possible
This changes the interface to NewObject so that if NewObject is called
on a directory then it should return fs.ErrorIsDir if possible without
doing any extra work, otherwise fs.ErrorObjectNotFound.

Tested on integration test server with:

go run integration-test.go -tests backend -run TestIntegration/FsMkdir/FsPutFiles/FsNewObjectDir -branch fix-stat -maxtries 1
2021-10-14 17:15:50 +01:00
Nick Craig-Wood
af732c5431 Add Logeshwaran to contributors 2021-10-14 17:15:48 +01:00
Nick Craig-Wood
14de0cfb43 Add Joda Stößer to contributors 2021-10-14 17:14:53 +01:00
albertony
c2597a4fa3 docs: cleanup header levels in backend docs (#5698) 2021-10-14 15:40:18 +02:00
Logeshwaran
ceaafe6620 s3: add support to use CDN URL to download the file
The egress charges while using a CloudFront CDN url is cheaper when
compared to accessing the file directly from S3. So added a download
URL advanced option, which when set downloads the file using it.
2021-10-14 11:19:38 +01:00
Joda Stößer
d41b9b46d4 docs: improve ordered list prefix for drive.md 2021-10-14 11:08:15 +01:00
Joda Stößer
98d9ba363f .github: correct contribution link in pull request template 2021-10-14 11:07:25 +01:00
Ivan Andreev
16fb608bee hashsum: treat hash values in sum file as case insensitive
Also warn duplicate file paths in sum files.

Fixes https://forum.rclone.org/t/rclone-check-sum/25566/45
2021-10-13 18:21:34 +03:00
Ivan Andreev
cf9b82b8db chunker: md5all must create metadata if base hash is slow
Before this patch the md5all option would skip creating metadata with
hashsum if base filesystem provided md5, in hope to pass it through.
However, if base hash is slow (for example on local fs), chunker passed
slow md5 but never reported this fact in features.

This patch makes chunker snapshot base hashsum in metadata when md5all is
set and base hashsum is slow since chunker was intended to provide only
instant hashsums from the start.

Fixes #5508
2021-10-13 16:18:08 +03:00
albertony
7d66bfbb7c docs: toc styling (#5695) 2021-10-13 15:04:11 +02:00
Nolan Woods
023e32de05 lib/http: Factor password hash salt into options with default 2021-10-13 11:33:38 +01:00
Nolan Woods
b1cb41f8da lib/http: Fix bad username check in single auth secret provider 2021-10-13 11:33:38 +01:00
Nick Craig-Wood
1cb31e8cc7 crypt: fix uploads with --crypt-no-data-encryption
Before this change, when uploading to a crypt, the ObjectInfo
accidentally used the encrypted size, not the unencrypted size when
--crypt-no-data-encryption was set.

Fixes #5498
2021-10-12 17:12:41 +01:00
Ivan Andreev
1e7db7193e docs: note minimum supported docker engine 2021-10-12 13:27:20 +03:00
Ivan Andreev
7190c058a7 crypt: return wrapped object even with no-data-encryption
In presence of no_data_encryption the Crypt's Put method used to over-optimize
and returned base object. This patch makes it return Crypt-wrapped object now.

Fixes #5498
2021-10-12 00:41:12 +03:00
albertony
85074f8f88 librclone: add RcloneFreeString function
See PR #5703

Based on initial work by Weng Haoyu (@wengh) in PR #5362
2021-10-11 19:10:07 +02:00
albertony
c7329d2ece docs: add section in install documentation about portable install
See #5591
2021-10-11 15:08:35 +02:00
albertony
f3e71f129c config: convert --cache-dir value to an absolute path 2021-10-11 15:08:35 +02:00
albertony
0ffdca42d5 docs: document --cache-dir flag 2021-10-11 15:08:35 +02:00
albertony
dbb6f94d95 config: make temporary directory user-configurable
See #5591
2021-10-11 15:08:35 +02:00
albertony
352f9bcd47 config: add paths command to show configured paths
See #5591
2021-10-11 15:08:35 +02:00
Nick Craig-Wood
d8886b37a6 serve sftp: update docs on host key generation 2021-10-11 10:43:16 +01:00
albertony
894a5a1a83 serve sftp: fix generation of server keys on windows 2021-10-11 10:43:16 +01:00
albertony
ada6a92c8b serve sftp: generate an Ed25519 server key as well as ECDSA and RSA 2021-10-11 10:43:16 +01:00
Nick Craig-Wood
df0b7d8eab serve sftp: generate an ECDSA server key as well as RSA
Before this fix, rclone only generated an RSA server key when the user
didn't supply a key.

However the RSA server key is being deprecated as it is now insecure.

This patch generates an ECDSA server key too which will be used in
preference over the RSA key, but the RSA key will carry on working.

Fixes #5671
2021-10-11 10:43:16 +01:00
Nick Craig-Wood
0dfffc0ed4 Add YenForYang to contributors 2021-10-11 10:43:16 +01:00
Alfonso Montero
19fc1b2a95 docs/compress: minor improvements 2021-10-09 18:22:38 +02:00
Ivan Andreev
bce395385d mount/docs: improve wording 2021-10-09 18:53:57 +03:00
albertony
a5b8fcc127 docs: align dropdown items when icons have different sizes 2021-10-09 18:49:05 +03:00
YenForYang
269f90c1e4 drive: Fix buffering for single request upload for files smaller than --drive-upload-cutoff
I discovered that `rclone` always upload in chunks of 16MiB whenever
uploading a file smaller than `--drive-upload-cutoff`. This is
undesirable since the purpose of the flag `--drive-upload-cutoff` is
to *prevent* chunking below a certain file size.

I realized that it wasn't `rclone` forcing the 16MiB chunks. The
`google-api-go-client` forces a chunk size default of
[`googleapi.DefaultUploadChunkSize`](32bf29c2e1/googleapi/googleapi.go (L55-L57))
bytes for resumable type uploads. This means that all requests that
use `*drive.Service` directly for upload without specifying a
`googleapi.ChunkSize` will be forced to use a *`resumable`*
`uploadType` (rather than `multipart`) for files less than
`googleapi.DefaultUploadChunkSize`. This is also noted directly in the
Drive API client documentation [here](https://pkg.go.dev/google.golang.org/api/drive/v3@v0.44.0#FilesUpdateCall.Media).

This fixes the problem by passing `googleapi.ChunkSize(0)` to
`Media()` method calls, which is the only way to disable chunking
completely. This is mentioned in the API docs
[here](https://pkg.go.dev/google.golang.org/api/googleapi@v0.44.0#ChunkSize).

The other alternative would be to pass
`googleapi.ChunkSize(f.opt.ChunkSize)` -- however, I'm *strongly* in
favor of *not* doing this for performance reasons. By not explicitly
passing a `googleapi.ChunkSize(0)`, we effectively allow
[`PrepareUpload()`](https://pkg.go.dev/google.golang.org/api/internal/gensupport@v0.44.0#PrepareUpload)
to create a
[`NewMediaBuffer`](https://pkg.go.dev/google.golang.org/api/internal/gensupport@v0.44.0#NewMediaBuffer)
that copies the original `io.Reader` passed to `Media()` in order to
check that its size is less than `ChunkSize`, which will unnecessarily
consume time and memory.

`minChunkSize` is also changed to be `googleapi.MinUploadChunkSize`,
as it is something specified we have no control over.
2021-10-08 15:29:38 +01:00
Ivan Andreev
7a1cab57b6 cmd/hashsum: dont put ERROR or UNSUPPORTED in output 2021-10-08 14:26:27 +03:00
Ankur Gupta
c8d5606f2c Removed multiple emails for Ankur Gupta 2021-10-08 10:25:42 +01:00
Ivan Andreev
a2545066e2 drive: constrain list by filter #5023
Google Drive API allows for clauses like "modifiedTime > '2012-06-04T12:00:00'"
in the query param, so the filter flags --max-age and --min-age can be applied
directly at the directory listing phase rather than in a filter.
This is extremely helpful when we want to do an incremental backup of a remote
drive with many files but the number of recently changed file is small.

Co-authored-by: fotile96 <fotile96@users.noreply.github.com>
2021-10-07 22:11:22 +03:00
Ivan Andreev
729704bcb8 serve/docker: fix octal umask 2021-10-07 22:02:27 +03:00
Nick Craig-Wood
8b4a89d34b Update github.com/ncw/swift to v2.0.1 2021-10-07 12:02:09 +01:00
Ivan Andreev
15a9816512 ftp: update encoding in integration tests with ProFtpd, PureFtpd, VsFtpd
PR #5589 established recommended encodings to use with major FTP servers.
This patch updates integration tests correspondingly.
2021-10-05 21:45:08 +03:00
Ivan Andreev
cace18d89a docs/ftp: state clearly that active mode is not supported 2021-10-05 15:52:50 +03:00
Ivan Andreev
a065fb23e5 mount: document the mount helper mode, make command docs - #5594 2021-10-03 01:44:08 +03:00
Ivan Andreev
a95c7a001e core: run rclone as mount helper - #5594 2021-10-03 01:44:08 +03:00
Ivan Andreev
ffa1b1a258 config: enable verbose logging by the --verbose argument - #5594 2021-10-03 01:44:08 +03:00
Ivan Andreev
8b8a943dd8 mountlib: correctly daemonize for compatibility with automount - #5593
This patch will:
- add --daemon-wait flag to control the time to wait for background mount
- remove dependency on sevlyar/go-daemon and implement backgrounding directly
- avoid setsid during backgrounding as it can result in race under Automount
- provide a fallback PATH to correctly run `fusermount` under systemd as it
  runs mount units without standard environment variables
- correctly handle ^C pressed while background process is being setting up
2021-10-02 23:45:16 +03:00
Ivan Andreev
8c10dee510 mountlib: use procfs to validate mount on linux - #5593
Current way of checking whether mountpoint has been already mounted (directory
list) can result in race if rclone runs under Automount (classic or systemd).

This patch adopts Linux ProcFS for the check. Note that mountpoint is considered
empty if it's tagged as "mounted" by autofs. Also ProcFS is used to check whether
rclone mount was successful (ie. tagged by a string containing "rclone").

On macOS/BSD where ProcFS is unavailable the old method is still used.

This patch also moves a few utility functions unchanged to utils.go:
CheckOverlap, CheckAllowings, SetVolumeName.
2021-10-02 23:45:16 +03:00
Ivan Andreev
68be24c88d log: optionally print pid in logs - #5593
This option is useful to troubleshoot `rclone mount --daemon`
2021-10-02 23:45:16 +03:00
albertony
fbc7f2e61b lib/file: improve error message when attempting to create dir on nonexistent drive on windows
This replaces built-in os.MkdirAll with a patched version that stops the recursion
when reaching the volume part of the path. The original version would continue recursion,
and for extended length paths end up with \\? as the top-level directory, and the error
message would then be something like:
mkdir \\?: The filename, directory name, or volume label syntax is incorrect.
2021-10-01 23:18:39 +02:00
Nolan Woods
b30731c9d0 lib/http: Add auth to http service
Fixes https://github.com/rclone/rclone/issues/5620
2021-10-01 15:51:48 +01:00
albertony
26b6c83e49 docs: extend documentation on valid remote names 2021-10-01 15:18:04 +02:00
albertony
59c74ea1b8 config: support hyphen in remote name from environment variable 2021-10-01 15:18:04 +02:00
Ivan Andreev
2d05b28b0a ftp: enable CI for ProFtpd, PureFtpd, VsFtpd 2021-10-01 10:09:57 +03:00
Ivan Andreev
dc589d3070 ftp: provide valid encoding for ProFtpd, PureFtpd, VsFtpd 2021-10-01 10:09:57 +03:00
Ivan Andreev
48e7246163 lib/encoder: add encoding of square brackets 2021-10-01 10:09:57 +03:00
Ivan Andreev
69f4b48719 ftp: fix deadlock after failed update when concurrency=1 2021-10-01 10:03:59 +03:00
Nick Craig-Wood
bb0c4ad2d8 union: fix rename not working with union of local disk and bucket based remote
Before this change the union's feature flags were a strict AND of the
underlying remotes. This means that a union of a local disk (which can
Move but not Copy) and a bucket based remote (which can Copy but not
Move) could neither Move nor Copy.

This fix advertises Move in the union if all the remotes can Move or
Copy. It also implements Move as Copy+Delete (like rclone does
normally) if the underlying union does not support Move.

This enables renames to work with unions of local disk and bucket
based remotes expected.

Fixes #5632
2021-09-30 20:09:02 +01:00
albertony
b389b84685 jottacloud: refactor all file state checks into common functions 2021-09-30 19:34:48 +02:00
albertony
b0f06d9920 jottacloud: improved error handling with SetModTime and corrupt files in general 2021-09-30 19:34:48 +02:00
albertony
159229527d jottacloud: implement SetModTime to support modtime-only changes - #5627 2021-09-30 19:34:48 +02:00
albertony
b5a27b1c75 docs: cleanup header levels 2021-09-30 17:54:57 +02:00
albertony
db7db952c1 Add Jonta to contributors 2021-09-30 15:38:24 +02:00
Jonta
d8d621c175 docs: grammar/readability (#5633) 2021-09-30 15:34:00 +02:00
Nick Craig-Wood
0902e5c48e vfs: Ignore ECLOSED in Setattr when truncating file handles
Before this change file handles could get closed while the truncate
the file handles loop was running.

This would mean that ocassionally an ECLOSED (which is translated into
EBADF by cmd/mount) would spuriously be returned if Release happened
to happen in the middle of a Truncate call (Setattr called with
size=0).

This change ignores the ECLOSED while truncating file handles.

See: https://forum.rclone.org/t/writes-to-wasabi-mount-failing-with-bad-file-descriptor-intermittently/26321
2021-09-28 11:51:41 +01:00
Nick Craig-Wood
5b6bcfc184 Add HNGamingUK to contributors 2021-09-28 11:51:41 +01:00
HNGamingUK
1409b89f6c swift: document OVH Cloud Archive - fixes ##3041
Added documentation for OVH Cloud Archive, provides information on how to restore/unfreeze/download objects.
2021-09-20 17:32:13 +01:00
Fred
00c6642fad seafile: fix 2fa state machine 2021-09-18 12:44:59 +01:00
Nick Craig-Wood
badefdb060 pcloud: try harder to delete a failed upload
This fixes the integration tests when testing errored uploads
2021-09-17 10:43:45 +01:00
Nick Craig-Wood
9c2533821d pcloud: return an early error when Put is called with an unknown size
This stops the 10 minute pause in the integration tests
2021-09-17 10:43:45 +01:00
Nick Craig-Wood
c718fe4330 pcloud: fix sha256 hashes #5496
This was started in

3626f10f26 pcloud: add sha256 support - fixes #5496

But this support turned out to be incomplete and caused the
integration tests to fail.
2021-09-17 10:43:45 +01:00
Nick Craig-Wood
3298493b0b Add wzl to contributors 2021-09-17 10:43:45 +01:00
Abhinav Sharma
18f3929186 docs: update ignored email as per #5586 2021-09-12 18:46:37 +01:00
wzl
b35db61a80 docs: add a step for drive.md 2021-09-11 23:00:59 +03:00
Abhinav Sharma
3c17762c4e update the email 2021-09-11 13:08:29 +03:00
Ivan Andreev
24de896df2 build: apply gofmt from golang 1.17 2021-09-09 20:43:59 +03:00
Ivan Andreev
2bc2546d5c test: skip mount2 test on single-CPU runners 2021-09-06 15:01:44 +03:00
Tatsuya Noyori
05f128868f azureblob: add --azureblob-no-head-object 2021-09-06 10:41:54 +01:00
x0b
f7f4468cbc build: update Go to 1.16 and NDK to 22b for android/any 2021-09-03 13:32:48 +03:00
Ivan Andreev
aa0ceb6c5c cmd/version: add support for openbsd/386
After this patch the version command will be fully
supported on openbsd/amd64 and openbsd/386.
Remaining os/arch combinations stay as is.
2021-09-02 11:13:12 +03:00
albertony
f1f923a986 Change byte unit format from MiByte to MiB 2021-08-31 09:57:27 +02:00
albertony
8500d95579 test: consider global option for printing human-readable sizes and avoid unsigned integer overflow 2021-08-31 09:57:27 +02:00
albertony
8c4b06da79 tree: option to print human-readable sizes removed in favor of global option 2021-08-31 09:57:27 +02:00
albertony
6d25ba7c02 about: make human-readable output more consistent with other commands 2021-08-31 09:57:27 +02:00
albertony
774efeabf0 ncdu: introduce key u to toggle human-readable 2021-08-31 09:57:27 +02:00
albertony
d24f87c6a9 size: include human-readable count 2021-08-31 09:57:27 +02:00
albertony
721a9786a7 ls: introduce and global option to print human-readable sizes and consider it for ls commands
Fixes #1890
2021-08-31 09:57:27 +02:00
albertony
94521959f8 docs/config: remove use of backticks around words within a larger code block 2021-08-31 09:02:31 +02:00
Nick Craig-Wood
6a9ef27b09 cache: don't run failing tests on windows/386
After updating rclone's dependencies these tests started failing on
windows/386

- TestInternalDoubleWrittenContentMatches
- TestInternalMaxChunkSizeRespected

The failures look like this. The root cause is unknown. The `Wait(n=1)
would exceed context deadline` errors come from golang.org/x/time/rate
but it isn't clear what is calling them.

2021/08/20 21:57:16 ERROR : worker-0 <one>: object open failed 0: rate: Wait(n=1) would exceed context deadline
[snip ~10 duplicates]
2021/08/20 21:57:56 ERROR : tidwcm1629496636/one: (0/26) error (chunk not found 0) response
2021/08/20 21:58:02 ERROR : worker-0 <one>: object open failed 0: rate: Wait(n=1) would exceed context deadline
--- FAIL: TestInternalDoubleWrittenContentMatches (45.77s)
    cache_internal_test.go:310:
        	Error Trace:	cache_internal_test.go:310
        	Error:      	Not equal:
        	            	expected: "one content updated double"
        	            	actual  : ""

        	            	Diff:
        	            	--- Expected
        	            	+++ Actual
        	            	@@ -1 +1 @@
        	            	-one content updated double
        	            	+
        	Test:       	TestInternalDoubleWrittenContentMatches
2021/08/20 21:58:03 original size: 23592960
2021/08/20 21:58:03 updated size: 12
2021-08-20 23:28:18 +01:00
Nick Craig-Wood
09fd258b5c build: update all dependencies 2021-08-20 22:03:38 +01:00
Nick Craig-Wood
2cefae51a1 build: make go1.14 the minimum supported Go for building 2021-08-20 19:04:29 +01:00
Nick Craig-Wood
e4fb5e99ef build: use go1.17 for building 2021-08-20 19:04:29 +01:00
Nick Craig-Wood
8bd26c663a build: update golang.org/x/sys for go1.17 build 2021-08-20 19:04:29 +01:00
Nick Craig-Wood
dd97fbc55f Add Parth Shukla to contributors 2021-08-20 19:04:23 +01:00
Nick Craig-Wood
b32d00ba37 Add Justin Hellings to contributors 2021-08-20 19:04:23 +01:00
albertony
3a2f748aeb vfs: ensure names used in cache path are legal on current os
Fixes #5360
2021-08-19 20:14:50 +02:00
albertony
18be4ad10d vfs: fix issue where empty dirs would build up in cache meta dir 2021-08-19 20:14:50 +02:00
albertony
9a2811f0b2 local: refactor default os encoding out from local backend into shared encoder lib 2021-08-19 20:14:50 +02:00
albertony
63708d73be docs/vfs: Merge duplicate chunked reading documentation from mount docs 2021-08-19 19:29:41 +02:00
Parth Shukla
60323dc5e2 googlephotos: Use encoder for album names 2021-08-19 16:38:31 +01:00
Justin Hellings
359648e002 docs: Removed ambiguity from copy command docs
Switched from talking about "unchanged" files to "identical" files.

I found out the hard way that the rclone copy will overwrite newer files.
Looking at posts in the rclone forum, this is a common experience.

The docs for copy have referred to "unchanged" files.
This is ambiguous because it intuitively introduces a sense
of chronology, but chronology is irrelevant.
Rclone only "cares" about difference, not change.
2021-08-19 16:34:57 +01:00
Ivan Andreev
e45c23ab79 cmd/version: add support for openbsd/amd64
After this patch the version command will be
- fully supported on openbsd/amd64
- stay stub on openbsd/i386 until we deprecate go 1.17
Remaining os/arch combinations stay as is.
2021-08-16 11:39:34 +03:00
Nick Craig-Wood
890b6a45b5 sugarsync: fix initial connection after config re-arrangement - Fixes #5525
In this commit the config system was re-arranged

    94dbfa4ea fs: change Config callback into state based callback #3455

This passed the password as a temporary config parameter but forgot to
reveal it in the API call.
2021-08-14 12:53:36 +01:00
Nick Craig-Wood
227119da16 Add Ken Enrique Morel to contributors 2021-08-14 12:53:36 +01:00
Ken Enrique Morel
3626f10f26 pcloud: add sha256 support - fixes #5496 2021-08-14 12:48:25 +01:00
negative0
82ad9a30b9 rc: fix speed does not update in core/stats 2021-08-14 12:45:51 +01:00
Ivan Andreev
448a03181f cmd/mount: --fast-list does nothing on a mount 2021-08-13 21:11:56 +03:00
Ivan Andreev
3615619645 serve/docker: retry saveState to fix sporadic test failure on macOS/Windows 2021-08-13 21:00:21 +03:00
Nick Craig-Wood
33ddd540b6 accounting: fix maximum bwlimit by scaling scale max token bucket size
Before this fix, on Windows, the --bwlimit would max out at 2.5Gbps
even when set to 10 Gbps.

This turned out to be because of the maximum token bucket size.

This fix scales up the token bucket size linearly above a bwlimit of
2Gbps.

Fixes #5507
2021-08-13 16:55:24 +01:00
Nick Craig-Wood
a5f277f47e vfs: fix crash when truncating a just uploaded object - Fixes #5522 2021-08-11 11:55:09 +01:00
Nick Craig-Wood
bec253fd39 Add vinibali to contributors 2021-08-11 11:55:09 +01:00
Nick Craig-Wood
815a6ac8aa Add hota to contributors 2021-08-11 11:55:09 +01:00
Ivan Andreev
8106f65e0b Add yedamo to contributors 2021-08-11 11:07:13 +03:00
yedamo
96f77ebe5a selfupdate: fix --quiet option, not quite quiet
Fixes #5505
2021-08-11 10:14:19 +03:00
Greg Sadetsky
36f0231082 docs/drive: Fix lsf example without drive-impersonate (#5504) 2021-08-10 21:59:36 +02:00
albertony
168cb65c61 Add Greg Sadetsky to contributors 2021-08-10 21:50:26 +02:00
Greg Sadetsky
e00db968aa docs/s3: fix typo in s3 documentation (#5515) 2021-08-10 21:45:49 +02:00
partev
bb6b44d199 DOC: "OS X" -> "macOS" 2021-08-10 10:12:30 +03:00
vinibali
88b35bc32d Update yandex.md
add mail subscription exception
2021-08-09 23:28:41 +03:00
Nathan Collins
c32d5dd1f3 fs: move with --ignore-existing will not delete skipped files - #5463 2021-08-01 17:46:45 +01:00
Greg Sadetsky
3d9da896d2 drive: fix instructions for auto config #5499 2021-08-01 15:17:07 +01:00
hota
839c20bb35 s3: add Wasabi's AP-Northeast endpoint info
* Wasabi starts to provide AP Northeast (Tokyo) endpoint for all customers, so add it to the list

Signed-off-by: lindwurm <lindwurm.q@gmail.com>
2021-08-01 14:56:52 +01:00
Nick Craig-Wood
7c58148840 Start v1.57.0-DEV development 2021-08-01 13:43:36 +01:00
Nick Craig-Wood
6545755758 sftp: remove spurious error message on --sftp-disable-concurrent-reads 2021-07-31 11:04:45 +01:00
Nick Craig-Wood
c86a55c798 vfs: fix duplicates on rename - fixes #5469
Before this change, if there was an existing file being uploaded when
a file was renamed on top of it, then both would be uploaded. This
causes a duplicate in Google Drive as both files get uploaded at the
same time. This was triggered reliably by LibreOffice saving doc
files.

This fix removes any duplicates in the upload queue on rename.
2021-07-30 19:31:02 +01:00
Nick Craig-Wood
1d280081d4 Add Mariano Absatz (git) to contributors 2021-07-30 19:31:02 +01:00
Nick Craig-Wood
f48cb5985f Add Justin Winokur (Jwink3101) to contributors 2021-07-30 19:31:02 +01:00
Ivan Andreev
55e766f4e8 mountlib: restore daemon mode after #5415 2021-07-29 13:35:04 +03:00
Alex Chen
63a24255f8 onedrive: handle HTTP 400 better in PublicLink() (#5419) 2021-07-27 17:55:57 +08:00
Cnly
bc74f0621e http: fix serve http exits directly after starting 2021-07-25 14:06:43 +01:00
Mariano Absatz (git)
f39a08c9d7 clarification of the process for creating custom client_id 2021-07-24 09:19:48 +03:00
Justin Winokur (Jwink3101)
675548070d fs/operations: add rmdirs -v output - fixes #5464 2021-07-24 09:16:23 +03:00
Nick Craig-Wood
37ff05a5fa Version v1.56.0 2021-07-20 19:45:41 +01:00
Nick Craig-Wood
c67c1ab4ee test makefiles: fix documentation so it doesn't have HTML in 2021-07-20 19:37:09 +01:00
Nick Craig-Wood
76f8095bc5 hdfs: fix documentation so it doesn't have HTML in 2021-07-20 19:36:30 +01:00
Nick Craig-Wood
f646cd0a2a librclone: add missing sync/* rc methods
See: https://forum.rclone.org/t/missing-directory-copy-move-methods-in-librclone/24503
2021-07-20 16:59:02 +01:00
Nick Craig-Wood
d38f6bb0ab gphotos: fix read only scope not being used properly
Before this change the read only scope was being ignored and rclone
was asking for a read-write scope.

https://forum.rclone.org/t/google-photos-copy-sync-errors/25153
2021-07-20 16:57:55 +01:00
Nick Craig-Wood
11d86c74b2 docs: expand contents and make docs full screen 2021-07-20 16:53:21 +01:00
Nick Craig-Wood
feb6046a8a docs: add table of contents to every page 2021-07-20 16:53:21 +01:00
Nick Craig-Wood
807102ada2 drive: fix config system overwriting team drive ID - fixes #5454 2021-07-20 16:51:59 +01:00
Nick Craig-Wood
770b3496a1 config: fix in memory config not saving on the fly backend config
Before this fix, saving a :backend config gave the error

    Can't save config "token" = "XXX" for on the fly backend ":backend"

Even when using the in-memory config `--config ""`

This fixes the problem by
- always using the in memory config if it is configured
- moving the check for a :backend config save to the file config backend

It also removes the contents of the config items being saved from the
log which saves confidential tokens being logged.

Fixes #5451
2021-07-20 12:09:38 +01:00
buengese
da36ce08e4 docs/jottacloud: add short note on how no versions option works 2021-07-15 17:29:30 +02:00
buengese
8652cfe575 jottacloud: add no versions option 2021-07-15 17:29:30 +02:00
Nick Craig-Wood
94b1439299 drive: fix some google docs being treated as files - fixes #5455
At some point some google docs files started having sizes returned in
their listing information.

This then caused rclone to treat the docs as files which caused
downloads to fail.

The API docs now state that google docs may have sizes (whereas I'm
pretty sure it didn't earlier).

This fix removes the check for size, so google docs are identified
solely by not having an MD5 checksum.
2021-07-14 11:40:58 +01:00
Nick Craig-Wood
97c9e55ddb Add Antoine GIRARD to contributors 2021-07-14 11:40:57 +01:00
Ivan Andreev
c0b2832509 docs: serve docker: fix URL of systemd contrib files (#5415) 2021-07-11 13:23:00 +03:00
Ivan Andreev
7436768d62 docs for serve docker and docker plugin (#5415) 2021-07-10 23:56:09 +03:00
Ivan Andreev
55153403aa build docker plugin (#5415) 2021-07-10 23:56:09 +03:00
Antoine GIRARD
daf449b5f2 cmd/serve: add serve docker command (#5415)
Fixes #4750

Co-authored-by: Ivan Andreev <ivandeex@gmail.com>
2021-07-10 23:56:09 +03:00
Antoine GIRARD
221dfc3882 mountlib: refactor before adding serve docker (#5415)
Co-authored-by: Ivan Andreev <ivandeex@gmail.com>
2021-07-10 23:56:09 +03:00
Nick Craig-Wood
aab29353d1 Update email address for Serge Pouliquen 2021-07-08 12:49:13 +01:00
Nick Craig-Wood
c24504b793 Add Chuan Zh to contributors 2021-07-08 12:47:35 +01:00
Nick Craig-Wood
6338d0026e Add Michael Hanselmann to contributors 2021-07-08 12:47:35 +01:00
Chuan Zh
ba836d45ff s3: update Alibaba OSS endpoints 2021-07-08 12:03:04 +01:00
Ole Frost
367cf984af docs: added tip to reduce SharePoint throttling - fixes #5404 2021-07-08 11:39:52 +01:00
Michael Hanselmann
6b7d7d0441 atexit: Terminate with non-zero status after receiving signal
When rclone received a SIGINT (Ctrl+C) or SIGTERM signal while an atexit
function is registered it always terminated with status code 0. Unix
convention is to exit with a non-zero status code. Often it's
`128 + int(signum), but at least not zero.

With this change fatal signals handled by the `atexit` package cause
a non-zero exit code. On Unix systems it's `128 + int(signum)` while
on other systems, such as Windows, it's always 2 ("error not otherwise
categorised").

Resolves #5437.

Signed-off-by: Michael Hanselmann <public@hansmi.ch>
2021-07-07 17:59:26 +01:00
Michael Hanselmann
cf19073ac9 cmd: Move exit status codes to separate package
Signal handling by the `atexit` package needs acceess to
`exitCodeUncategorizedError`. With this change all exit status values
are moved to a dedicated package so that they can be reused.

Signed-off-by: Michael Hanselmann <public@hansmi.ch>
2021-07-07 17:59:26 +01:00
Nick Craig-Wood
ba5c559fec fs/sync: fix tests by only --compare-dest timestamp if have hash
This fixes the integration test errors introduced in #5410
2021-07-07 16:59:51 +01:00
Nick Craig-Wood
abb8fe8ba1 Add Haochen Tong to contributors 2021-07-07 16:59:51 +01:00
Nick Craig-Wood
765af387e6 Add Dmitry Sitnikov to contributors 2021-07-07 16:59:51 +01:00
Nick Craig-Wood
d05cf6aba8 Add partev to contributors 2021-07-07 16:59:51 +01:00
Nick Craig-Wood
76a3fef24d Add Xuanchen Wu to contributors 2021-07-07 16:59:51 +01:00
Ivan Andreev
b40d9bd4c4 cmd: add hashSUM file support (#5352)
Currently rclone check supports matching two file trees by sizes and hashes.
This change adds support for SUM files produced by GNU utilities like sha1sum.

Fixes #1005 

Note: checksum by default checks, hashsum by default prints sums.
New flag is named "--checkfile" but carries hash name.
Summary of introduced command forms:

```
rclone check sums.sha1 remote:path --checkfile sha1
rclone checksum sha1 sums.sha1 remote:path             
rclone hashsum sha1 remote:path --checkfile sums.sha1
rclone sha1sum remote:path --checkfile sums.sha1
rclone md5sum remote:path --checkfile sums.md5
```
2021-07-07 18:34:16 +03:00
Ivan Andreev
4680c0776d backend/local: skip entries removed concurrently with List() (#5297)
This change fixes the bug described below:
if a file is removed while the local backend List() runs,
the call will flag an accounting error.
The bug manifests itself if local backend is the Sync target
due to intrinsic concurrency.
The odds to hit this bug depend on --checkers and --transfers.
Chunker over local backend is affected even more because
updating a composite object with a smaller size content
translates into removing chunks on the underlying file system
and involves a number of List() calls.
2021-07-07 16:50:19 +03:00
buengese
fb305b5976 fichier: check that we actually got a download token and retry if we didn't 2021-07-06 14:58:50 +02:00
Ole Frost
5e91b93e59 cmdtest: end-to-end test for commands, flags and environment variables
There was no easy way to automatically test the end-to-end functionality
of commands, flags, environment variables etc.

The need for end-to-end testing was highlighted by the issues fixed
in #5341. There was no automated test to continually verify current
behaviour, nor a framework to quickly test the correctness of the fixes.

This change adds an end-to-end testing framework in the cmdtest folder.
It has some simple examples in func TestCmdTest in cmdtest_test.go. The
tests should be readable by anybody familiar with rclone and look like
this:

    // Test the rclone version command with debug logging (-vv)
    out, err = rclone("version", "-vv")
    if assert.NoError(t, err) {
        assert.Contains(t, out, "rclone v")
        assert.Contains(t, out, "os/version:")
        assert.Contains(t, out, " DEBUG : ")
    }

The end-to-end tests are executed just like the Go unit tests, that is:

    go test ./cmdtest -v

The change also contains a thorough test of environment variables in
environment_test.go.

Thanks to @ncw for encouragement and introduction to the TestMain trick.
2021-07-05 16:38:20 +01:00
Ole Frost
58c99427b3 config: fixed issues with flags/options set by environment vars.
Some environment variables didn’t behave like their corresponding
command line flags. The affected flags were --stats, --log-level,
--separator, --multi-tread-streams, --rc-addr, --rc-user and --rc-pass.
Example:

    RCLONE_STATS='10s'
    rclone check remote: remote: --progress
    # Expected: rclone check remote: remote: --progress –-stats=10s
    # Actual: rclone check remote: remote: --progress

Remote specific options set by environment variables was overruled by
less specific backend options set by environment variables. Example:

    RCLONE_DRIVE_USE_TRASH='false'
    RCLONE_CONFIG_MYDRIVE_USE_TRASH='true'
    rclone deletefile myDrive:my-test-file
    # Expected: my-test-file is recoverable in the trash folder
    # Actual: my-test-file is permanently deleted (not recoverable)

Backend specific options set by environment variables was overruled by
general backend options set by environment variables. Example:

    RCLONE_SKIP_LINKS='true'
    RCLONE_LOCAL_SKIP_LINKS='false'
    rclone lsd local:
    # Expected result: Warnings when symlinks are skipped
    # Actual result: No warnings when symlinks are skipped
    # That is RCLONE_SKIP_LINKS takes precedence

The above issues have been fixed.

The debug logging (-vv) has been enhanced to show when flags are set by
environment variables.

The documentation has been enhanced with details on the precedence of
configuration options.

See pull request #5341 for more information.
2021-07-05 16:38:20 +01:00
albertony
fee0abf513 docs: add note about use of user and logname environment variables for current username 2021-07-05 16:31:16 +01:00
Nick Gaya
40024990b7 fs/operations: Don't update timestamps of files in --compare-dest 2021-07-05 16:29:44 +01:00
Haochen Tong
04aa6969a4 accounting: calculate rolling average speed 2021-07-05 16:27:33 +01:00
Haochen Tong
d2050523de accounting: fix startTime of statsGroups.sum 2021-07-05 16:27:33 +01:00
Ivan Andreev
1cc6dd349e Add google search widget to rclone.org 2021-07-05 16:21:36 +01:00
Ole Frost
721bae11c3 docs: ease contribution for beginners in Go, Git and GitHub
Improved/added steps to:
 * Install Git with basic setup
 * Use both SSH and HTTPS for the git origin
 * Install Go and verify the GOPATH
 * Update the forked master
 * Find a popular editor for Go
2021-07-05 16:03:53 +01:00
Dmitry Sitnikov
b439199578 azureblob: Fix typo in Azure Blob help
Change the command to create RBAC file to the correct one
`az ad sp create-for-rbac`
Add the link to the command documentation
https://docs.microsoft.com/en-us/cli/azure/ad/sp?view=azure-cli-latest#az_ad_sp_create_for_rbac
2021-07-05 15:58:41 +01:00
partev
0bfd6f793b docs: replace OSX with macOS 2021-07-05 14:51:00 +01:00
Nick Craig-Wood
76ea716abf ftp: make upload error 250 indicate success
Some servers seem to send return code 250 to indicate successful
upload - previously rclone was treating this as an error.

See: https://forum.rclone.org/t/transfer-on-mega-in-ftp-mode-is-not-working/24642/
2021-07-05 10:35:02 +01:00
Alex Chen
e635f4c0be fs: make --dump imply -vv (#5418) 2021-06-23 00:32:26 +08:00
Xuanchen Wu
0cb973f127 onedrive: Make link return direct download link (#5417)
Co-authored-by: Cnly <minecnly@gmail.com>
2021-06-22 21:25:08 +08:00
Alex Chen
96ace599a8 fs: fix logging level mentioned in docs of Logf 2021-06-21 23:30:26 +08:00
Ivan Andreev
80bccacd83 fs: split overgrown fs.go (#5405)
Nothing is added or removed and no package is renamed by this change.
Just rearrange definitions between source files in the fs directory.

New source files:
- types.go      Filesystem types and interfaces
- features.go   Features and optional interfaces
- registry.go   Filesystem registry and backend options
- newfs.go      NewFs and its helpers
- configmap.go  Getters and Setters for ConfigMap
- pacer.go      Pacer with logging and calculator
The final fs.go contains what is left.

Also rename options.go to open_options.go
to dissociate from registry options.
2021-06-14 14:42:49 +03:00
Nick Craig-Wood
3349b055f5 fichier: fix move of files in the same directory
See: https://forum.rclone.org/t/1fichier-rclone-does-not-allow-to-rename-files-and-folders-when-you-mount-a-1fichier-disk-drive/24726/24
2021-06-11 14:21:23 +01:00
Nick Craig-Wood
bef0c23e00 fichier: make error messages report text from the API
See: https://forum.rclone.org/t/1fichier-rclone-does-not-allow-to-rename-files-and-folders-when-you-mount-a-1fichier-disk-drive/24726/24
2021-06-11 14:21:23 +01:00
Nick Craig-Wood
84201ed891 zoho: improve wording for region - fixes #5377 2021-06-11 14:21:23 +01:00
Nick Craig-Wood
04608428bf Add Florian Penzkofer to contributors 2021-06-11 14:21:23 +01:00
Nick Craig-Wood
6aaa06d7be Add darrenrhs to contributors 2021-06-11 14:21:23 +01:00
Nick Craig-Wood
e53bad5353 Add Reid Buzby to contributors 2021-06-11 14:21:23 +01:00
Nick Craig-Wood
f5397246eb Add Chris Lu to contributors 2021-06-11 14:21:23 +01:00
Nick Craig-Wood
b8b73f2656 Add database64128 to contributors 2021-06-11 14:21:23 +01:00
Nick Craig-Wood
96b67ce0ec Add Tyson Moore to contributors 2021-06-11 14:21:23 +01:00
Nick Craig-Wood
e2beeffd76 Add Tom to contributors 2021-06-11 14:21:23 +01:00
Nick Craig-Wood
30b949642d Add acsfer to contributors 2021-06-11 14:21:23 +01:00
Florian Penzkofer
92b3518c78 fichier: support downloading password protected files and folders 2021-06-10 19:00:26 +02:00
Ivan Andreev
062919e08c deprecate cache backend (#5382) 2021-06-10 19:52:55 +03:00
darrenrhs
654f5309b0 docs: drive: include requirement to publish app in step-by-step - fixes #5393 2021-06-10 17:00:52 +01:00
albertony
318fa4472b docs: fix incorrect syntax in config update example 2021-06-10 08:59:18 +02:00
Reid Buzby
5104e24153 docs: fix incorrect token type for yandex
https://forum.rclone.org/t/yandex-documentation/24445/2
2021-06-09 13:04:55 +02:00
albertony
9d87a5192d docs: fix code section formatting in filtering docs
Fixes #5387
2021-06-08 18:53:18 +02:00
Ivan Andreev
29f967dba3 make commanddocs for v1.56 (#5383) 2021-06-08 18:57:04 +03:00
Chris Lu
1f846c18d4 s3: Add SeaweedFS 2021-06-08 09:59:57 +01:00
albertony
41f561bf26 jottacloud: fix invalid url in output from link command
Fixes #5370
2021-05-31 10:40:21 +02:00
database64128
df60e6323c 🧹 GCS: Clean up time format constants 2021-05-28 14:44:50 +01:00
database64128
58006a925a 📑 GCS: Update docs on mtime
- Mention the new modification time behavior and the modify window issue.
- Unify markdown format.
- ref rclone/rclone#5331
2021-05-28 14:44:50 +01:00
database64128
ee2fac1855 🕰️ GCS: Compatible with gsutil's mtime metadata
- Write `goog-reserved-file-mtime` in addition to `mtime`.
- Fallback to `goog-reserved-file-mtime` if `mtime` doesn't exist.
- ref rclone/rclone#5331
2021-05-28 14:44:50 +01:00
Tyson Moore
2188fe38e5 docs: add caveat about DSCP on Windows 2021-05-28 13:43:38 +01:00
Tyson Moore
b5f8f0973b fshttp: implement graceful DSCP error handling 2021-05-28 13:43:38 +01:00
Tyson Moore
85b8ba9469 fshttp: rework address parsing for DSCP (fixes #5293) 2021-05-28 13:43:38 +01:00
Tom
04a1f673f0 serve sftp: add --stdio flag to serve via stdio - fixes #5311 2021-05-28 13:40:32 +01:00
albertony
0574ebf44a vfs: do not print notice about missing poll-interval support when set to 0
Fixes #5359
2021-05-28 13:09:15 +02:00
albertony
22e86ce335 vfs: fix that umask option cannot be set as environment variable (#5351)
Fixes #5350
2021-05-22 20:48:02 +02:00
acsfer
c9fce20249 tardigrade: add warning about too many open files - Fixes #5310 2021-05-21 20:04:57 +01:00
Ivan Andreev
5b6f637461 fs/hash: align hashsum names and update documentation (#5339)
- Unify all hash names as lowercase alphanumerics without punctuation.
- Legacy names continue to work but disappear from docs, they can be depreciated or dropped later.
- Make rclone hashsum print supported hash list in case of wrong spelling.
- Update documentation.

Fixes #5071
Fixes #4841
2021-05-21 17:32:33 +03:00
albertony
07f2f3a62e docs: fix link to paths on windows section 2021-05-19 22:11:17 +02:00
albertony
6dc190ec93 docs: mention that network/unc paths are supported in local filesystem on windows 2021-05-19 22:11:17 +02:00
Nick Craig-Wood
71f75a1d95 operations: fix tests work on compress by supplying incompressible data 2021-05-18 17:38:32 +01:00
Nick Craig-Wood
1b44035e45 filefabric: fix listing after change of from field from "int" to int. 2021-05-18 17:11:16 +01:00
Nick Craig-Wood
054b467f32 check: log the hash in use like cryptcheck does
See: https://forum.rclone.org/t/does-a-rclone-check-on-similar-remotes-still-compute-hashes/24288/15
2021-05-18 16:21:19 +01:00
Ivan Andreev
23da913d03 dbhashsum: drop command deprecated a year ago - #4837 (#5336)
dbhashsum was deprecated in rclone 1.52 on 2020-05-27
this patch drops the command completely since rclone 1.56
2021-05-18 12:27:17 +03:00
Nick Craig-Wood
c0cda087a8 s3: don't check to see if remote is object if it ends with /
Before this change, rclone would always check the root to see if it
was an object.

This change doesn't check to see if the root is an object if the path
ends with a /

This avoids a transaction where rclone HEADs the path to see if it
exists.

See #4990
2021-05-17 16:43:34 +01:00
Nick Craig-Wood
1773717a47 fs/march: improve errors when root source/destination doesn't exist
See: https://forum.rclone.org/t/rclone-attempts-to-read-files-in-the-destination-directory-when-the-source-doesnt-exist/23412
2021-05-17 16:38:03 +01:00
Nick Craig-Wood
04308dcaa1 local: add --local-unicode-normalization (and remove --local-no-unicode-normalization)
macOS stores files in NFD form and transferring them like this to some
systems causes the Korean language to display incorrectly.

This adds the flag --local-unicode-normalization to optionally
normalize the file names to NFC.

This also removes the (long deprecated) --local-no-unicode-normalization flag

See: https://forum.rclone.org/t/support-for-korean-jaso-conversion/19435
2021-05-17 16:34:25 +01:00
Nick Craig-Wood
06f27384dd b2: fix versions and .files with no extension - fixes #5244 2021-05-17 16:20:29 +01:00
Nick Craig-Wood
82f1f7d2c4 config: expand docs on config protocol #3455 2021-05-17 12:10:58 +01:00
Nick Craig-Wood
6555d3eb33 onedrive: fix failed to configure: empty token found error #3455
This bug was caused as part of the config rework
2021-05-17 12:10:58 +01:00
Nick Craig-Wood
03229cf394 bin/config.py: add --rc flag for testing to an rclone rcd #3455 2021-05-17 12:10:58 +01:00
Nick Craig-Wood
f572bf7829 Add sp31415t1 to contributors 2021-05-17 12:10:58 +01:00
sp31415t1
f593558dc2 docs: improve --disable help 2021-05-14 15:44:58 +01:00
Ivan Andreev
08040a57b0 dropbox: improve "own App IP" instructions (#5325)
Instructions in https://rclone.org/dropbox/#get-your-own-dropbox-app-id
are a little incomplete. I had to guess a few extra details to make things work.
This patch adds missing parts.

Fixes #5242
2021-05-14 17:42:30 +03:00
Alexey Ivanov
2fa7a3c0fb dropbox: simplify chunked uploads
Signed-off-by: Alexey Ivanov <rbtz@dropbox.com>
2021-05-14 14:07:44 +01:00
Nick Craig-Wood
798d1293df Add Alexey Ivanov to contributors 2021-05-14 14:07:44 +01:00
Nick Craig-Wood
75c417ad93 dropbox: fix async batch missing the last few entries 2021-05-14 14:07:44 +01:00
Nick Craig-Wood
5ee646f264 dropbox: make batcher retry all errors so it doesn't exit early
See: https://forum.rclone.org/t/dropbox-too-many-requests-or-write-operations-trying-again-in-15-seconds/23316/18
2021-05-14 14:07:44 +01:00
Nick Craig-Wood
4a4aca4da7 dropbox: fix deadlock in batch Commit 2021-05-14 14:07:44 +01:00
Nick Craig-Wood
2e4b65f888 dropbox: add --dropbox-batch-mode flag to speed up uploading #5156
This adds 3 upload modes for dropbox off, sync and async and makes
sync the default.

This should improve uploads (especially for small files) greatly.
2021-05-14 14:07:44 +01:00
Nick Craig-Wood
77cda6773c config: tidy code to use UpdateRemote/CreateRemote instead of editOptions #3455 2021-05-14 14:07:44 +01:00
Nick Craig-Wood
dbc5167281 bin: add config.py as an example of how to use the state based config #3455 2021-05-14 14:07:44 +01:00
Nick Craig-Wood
635d1e10ae config create: add --state and --result parameters #3455 2021-05-14 14:07:44 +01:00
Nick Craig-Wood
296ceadda6 fs: add --all to rclone config create/update to ask all the config questions #3455
This also factors the config questions into a state based mechanism so
a backend can be configured using the same dialog as rclone config but
remotely.
2021-05-14 14:07:44 +01:00
Nick Craig-Wood
7ae2891252 fs: Add Exclusive parameter to Option to choose Examples only #3455 2021-05-14 14:07:44 +01:00
Nick Craig-Wood
99caf79ffe config: allow config create and friends to take key=value parameters #3455 2021-05-14 14:07:44 +01:00
Nick Craig-Wood
095cf9e4be config create: add --non-interactive and --continue parameters #3455
This adds a mechanism to add external interfaces to rclone using the
state based configuration.
2021-05-14 14:07:44 +01:00
buengese
e57553930f jottacloud: fix legacy auth with state based config system
...also some minor cleanup
2021-05-14 14:07:44 +01:00
Nick Craig-Wood
f122808d86 fs: add names to each config parameter so we can override them #3455 2021-05-14 14:07:44 +01:00
Nick Craig-Wood
94dbfa4ea6 fs: change Config callback into state based callback #3455
This is a very large change which turns the post Config function in
backends into a state based call and response system so that
alternative user interfaces can be added.

The existing config logic has been converted, but it is quite
complicated and folloup commits will likely be needed to fix it!

Follow up commits will add a command line and API based way of using
this configuration system.
2021-05-14 14:07:44 +01:00
Nick Craig-Wood
6f2e525821 Add Antoon Prins to contributors 2021-05-14 14:07:44 +01:00
Ivan Andreev
119bddc10b selfupdate: fix archive name on macos 2021-05-13 22:35:39 +03:00
albertony
28e9fd45cc vfs: avoid unnecessary subdir in cache path
Fixes #5316
2021-05-13 11:16:42 +02:00
Antoon Prins
326f3b35ff webdav: add headers option 2021-05-12 09:52:07 +01:00
albertony
ce83228cb2 sftp: expand tilde and environment variables in configured known_hosts_file (#5322)
Fixes #5220
2021-05-11 19:58:26 +02:00
Chris Macklin
732bc08ced config: replace defaultConfig with a thread-safe in-memory implementation 2021-05-07 16:04:09 +01:00
Nick Craig-Wood
6ef7178ee4 local: always use readlink to read symlink size
It was discovered on some Android systems, the stat size of a symlink
is different to the size that readlink returns.

This was giving errors like this

    transport connection broken: http: ContentLength=30 with Body length 28

There are enough exceptions to the size of readlink being different to
the size of stat that this patch now always does readlink to work out
the size of a symlink.

Since symlinks are relatively uncommon this shouldn't affect
performance too much and will mean that the size is always correct.

This deprecates the --local-zero-size-links flag which is now
effectively always enabled.

See: https://forum.rclone.org/t/problem-with-symlinks-and-links/23840/
2021-05-04 08:53:09 +01:00
Nick Craig-Wood
9ff6f48d74 Remove accidentally committed *.orig and *.rej files and ignore 2021-05-03 07:58:29 +01:00
Nick Craig-Wood
532af77fd1 Add Chris Macklin to contributors 2021-05-03 07:58:29 +01:00
Nolan Woods
ab7dfe0c87 http: clean up Bind to better use middleware 2021-05-02 11:31:01 +01:00
Nolan Woods
e489a101f6 lib/http: add default 404 handler 2021-05-02 11:30:02 +01:00
Chris Macklin
35a86193b7 accounting: deglobalize startTime/elapsedTime - fixes #5282 2021-05-01 14:51:21 +01:00
x0b
2833941da8 build: add gomobile android build 2021-04-30 20:39:04 +01:00
Nick Craig-Wood
9e6c23d9af fs: add --disable-http2 for global http2 disable #5253 2021-04-30 20:31:04 +01:00
Nick Craig-Wood
8bef972262 Add Gautam Kumar to contributors 2021-04-30 20:31:04 +01:00
Nick Craig-Wood
0a968818f6 Add Nolan Woods to contributors 2021-04-30 20:31:04 +01:00
Nick Craig-Wood
c2ac353183 Add lewisxy to contributors 2021-04-30 20:31:04 +01:00
Nick Craig-Wood
773da395fb Add Tatsuya Noyori to contributors 2021-04-30 20:31:04 +01:00
Gautam Kumar
9e8cd6bff9 docs: fixed some typos 2021-04-28 22:55:27 +01:00
Nolan Woods
5d2e327b6f http: Replace httplib with lib/http 2021-04-28 22:54:15 +01:00
Nolan Woods
77221d7528 httplib: Deprecate package 2021-04-28 22:54:15 +01:00
Nolan Woods
1971c1ef87 httplib: Move httplib/serve/data to ../serve/http/data 2021-04-28 22:54:15 +01:00
Nolan Woods
7e7dbe16c2 httplib: Add --template config and flags to serve/data 2021-04-28 22:54:15 +01:00
Nolan Woods
002d323c94 lib/http: Move HTTP object serialization logic to lib/http 2021-04-28 22:54:15 +01:00
Nolan Woods
4ad62ec016 lib/http: Add authentication middleware with basic auth implementation 2021-04-28 22:54:15 +01:00
Nolan Woods
95ee14bb2c feat: Add lib/http
lib/http provides an abstraction for a central http server that services can bind routes to
2021-04-28 22:54:15 +01:00
Romeo Kienzler
88aabd1f71 docs: corrected spelling
from "Check the integrity of an encrypted remote." to "Check the integrity of a crypted remote."
2021-04-28 22:50:55 +01:00
Nick Craig-Wood
34627c5c7e librclone: update docs for merge #4891 2021-04-28 20:42:00 +01:00
Nick Craig-Wood
e33303df94 librclone: add basic Python bindings with tests #4891 2021-04-28 16:55:08 +01:00
Nick Craig-Wood
665eceaec3 librclone: catch panics at the language change boundary #4891 2021-04-28 16:55:08 +01:00
Nick Craig-Wood
ba09ee18bb librclone: factor into gomobile and internal implementation #4891
This was needed because gomobile can't use a main package wheras this
is required to make a normal shared C library.
2021-04-28 16:55:08 +01:00
Nick Craig-Wood
62bf63d36f librclone: add tests for build and execute them in the actions #4891 2021-04-28 16:55:08 +01:00
Nick Craig-Wood
f38c262471 librclone: change interface for C code and add Mobile interface #4891
This changes the interface for the C code to return a struct on the
stack that is defined in the code rather than one which is defined by
the cgo compiler. This is more future proof and inline with the
gomobile interface.

This also adds a gomobile interface RcloneMobileRPC which uses generic
go types conforming to the gobind restrictions.

It also fixes up initialisation errors.
2021-04-28 16:55:08 +01:00
Nick Craig-Wood
5db88fed2b librclone: exports, errors, docs and examples #4891
- rename C exports to be namespaced with Rclone prefix
- fix error handling in RcloneRPC
- add more examples
- add more docs
- add README
- simplify ctest Makefile
2021-04-28 16:55:08 +01:00
lewisxy
316e65589b librclone: export the rclone RC as a C library #4891 2021-04-28 16:55:08 +01:00
Tatsuya Noyori
4401d180aa s3: add --s3-no-head-object
This stops rclone doing any HEAD requests on objects.
2021-04-28 11:05:54 +01:00
Nick Craig-Wood
9ccd870267 Move the how to use GitHub info in the bug/issue templates to the end
This is so that we see the text of the bug/issue first rather than the
how to use GitHub issue which is very useful when posting bug reports
to the forum or social media.
2021-04-28 09:40:19 +01:00
Nick Craig-Wood
16d1da2c1e vfs: remove item.metaDirty as it was confusing and not used
See discussion in #5277
2021-04-28 09:33:22 +01:00
Nick Craig-Wood
00a0ee1899 vfs: fix modtime changing when reading file into cache - fixes #5277
Before this change but after:

aea8776a43 vfs: fix modtimes not updating when writing via cache #4763

When a file was opened read-only the modtime was read from the cached
file. However this modtime wasn't correct leading to an incorrect
result.

This change fixes the definition of `item.IsDirty` to be true only
when the data is dirty. This fixes the problem as a read only file
isn't considered dirty.
2021-04-28 09:33:22 +01:00
Nick Craig-Wood
b78c9a65fa backends: remove log.Fatal and replace with error returns #5234
This changes the Config interface so that it returns an error.
2021-04-27 18:18:08 +01:00
Nick Craig-Wood
ef3c350686 box: return errors instead of calling log.Fatal with them #5234 2021-04-27 18:18:08 +01:00
Nick Craig-Wood
742af80972 Add jtagcat to contributors 2021-04-27 18:18:08 +01:00
albertony
08a2df51be Use decimal prefixes for counts
Fixes #5126
2021-04-27 02:25:52 +03:00
albertony
2925e1384c Use binary prefixes for size and rate units
Includes adding support for additional size input suffix Mi and MiB, treated equivalent to M.
Extends binary suffix output with letter i, e.g. Ki and Mi.
Centralizes creation of bit/byte unit strings.
2021-04-27 02:25:52 +03:00
albertony
2ec0c8d45f stats: correct spelling of data rate units 2021-04-27 02:25:52 +03:00
albertony
98579608ec docs: cleanup spelling of size and rate units 2021-04-27 02:25:52 +03:00
Caleb Case
a1a41aa0c1 backend/tardigrade: use negative offset
v1.4.6 of uplink allows us to do a negative offset from the end of the
file. This removes a round trip when requesting the last N bytes of a
file.

Previous to v1.4.6 of uplink it wasn't possible to do a negative offset
on download. This meant that to fulfill the semantics of http range
headers it was necessary to first fetch the size of the object via a
stat call and compute absolute offset and length.
2021-04-27 02:20:08 +03:00
albertony
f8d56bebaf config: delay load config file (#5258)
Restructuring of config code in v1.55 resulted in config
file being loaded early at process startup. If configuration
file is encrypted this means user will need to supply the password,
even when running commands that does not use config.
This also lead to an issue where mount with --deamon failed to
decrypt the config file when it had to prompt user for passord.

Fixes #5236
Fixes #5228
2021-04-26 23:37:49 +02:00
jtagcat
5d799431a7 GitHub issue templates: Add GH Etiquette. 2021-04-26 18:12:37 +01:00
Leo Luan
8f23cae1c0 vfs: Add cache reset for --vfs-cache-max-size handling at cache poll interval
The vfs-cache-max-size parameter is probably confusing to many users.
The cache cleaner checks cache size periodically at the --vfs-cache-poll-interval
(default 60 seconds) interval and remove cache items in the following order.

(1) cache items that are not in use and with age > vfs-cache-max-age
(2) if the cache space used at this time still is larger than
vfs-cache-max-size, the cleaner continues to remove cache items that are
not in use.

The cache cleaning process does not remove cache items that are currently in use.
If the total space consumed by in-use cache items exceeds vfs-cache-max-size, the
periodical cache cleaner thread does not do anything further and leaves the in-use
cache items alone with a total space larger than vfs-cache-max-size.

A cache reset feature was introduced in 1.53 which resets in-use (but not dirty,
i.e., not being updated) cache items when additional cache data incurs an ENOSPC
error.  But this code was not activated in the periodical cache cleaning thread.

This patch adds the cache reset step in the cache cleaner thread during cache
poll to reset cache items until the total size of the remaining cache items is
below vfs-cache-max-size.
2021-04-26 17:55:52 +01:00
Mathieu Carbou
964088affa build: Only run event-based workflow scripts under rclone repo with manual override
This updates the actions to only run event-based workflow scripts
under the rclone repository only and not forks. It also adds the
ability to manually trigger a build from a branch in rclone repository
and forks.

Fixes #5272
2021-04-26 17:52:03 +01:00
1197 changed files with 126485 additions and 59547 deletions

View File

@@ -9,7 +9,7 @@ We understand you are having a problem with rclone; we want to help you with tha
**STOP and READ**
**YOUR POST WILL BE REMOVED IF IT IS LOW QUALITY**:
Please show the effort you've put in to solving the problem and please be specific.
Please show the effort you've put into solving the problem and please be specific.
People are volunteering their time to help! Low effort posts are not likely to get good answers!
If you think you might have found a bug, try to replicate it with the latest beta (or stable).
@@ -37,7 +37,6 @@ The Rclone Developers
-->
#### The associated forum post URL from `https://forum.rclone.org`
@@ -65,3 +64,11 @@ The Rclone Developers
#### A log from the command with the `-vv` flag (e.g. output from `rclone -vv copy /tmp remote:tmp`)
<!--- Please keep the note below for others who read your bug report. -->
#### How to use GitHub
* Please use the 👍 [reaction](https://blog.github.com/2016-03-10-add-reactions-to-pull-requests-issues-and-comments/) to show that you are affected by the same issue.
* Please don't comment if you have no relevant information to add. It's just extra noise for everyone subscribed to this issue.
* Subscribe to receive notifications on status change and new comments.

View File

@@ -26,7 +26,6 @@ The Rclone Developers
-->
#### The associated forum post URL from `https://forum.rclone.org`
@@ -42,3 +41,11 @@ The Rclone Developers
#### How do you think rclone should be changed to solve that?
<!--- Please keep the note below for others who read your feature request. -->
#### How to use GitHub
* Please use the 👍 [reaction](https://blog.github.com/2016-03-10-add-reactions-to-pull-requests-issues-and-comments/) to show that you are affected by the same issue.
* Please don't comment if you have no relevant information to add. It's just extra noise for everyone subscribed to this issue.
* Subscribe to receive notifications on status change and new comments.

View File

@@ -22,7 +22,7 @@ Link issues and relevant forum posts here.
#### Checklist
- [ ] I have read the [contribution guidelines](https://github.com/rclone/rclone/blob/master/CONTRIBUTING.md#submitting-a-pull-request).
- [ ] I have read the [contribution guidelines](https://github.com/rclone/rclone/blob/master/CONTRIBUTING.md#submitting-a-new-feature-or-bug-fix).
- [ ] I have added tests for all changes in this PR if appropriate.
- [ ] I have added documentation for the changes if appropriate.
- [ ] All commit messages are in [house style](https://github.com/rclone/rclone/blob/master/CONTRIBUTING.md#commit-messages).

View File

@@ -12,29 +12,36 @@ on:
tags:
- '*'
pull_request:
workflow_dispatch:
inputs:
manual:
required: true
default: true
jobs:
build:
if: ${{ github.repository == 'rclone/rclone' || github.event.inputs.manual }}
timeout-minutes: 60
strategy:
fail-fast: false
matrix:
job_name: ['linux', 'mac_amd64', 'mac_arm64', 'windows_amd64', 'windows_386', 'other_os', 'go1.13', 'go1.14', 'go1.15']
job_name: ['linux', 'mac_amd64', 'mac_arm64', 'windows_amd64', 'windows_386', 'other_os', 'go1.15', 'go1.16']
include:
- job_name: linux
os: ubuntu-latest
go: '1.16.x'
go: '1.17.x'
gotags: cmount
build_flags: '-include "^linux/"'
check: true
quicktest: true
racequicktest: true
librclonetest: true
deploy: true
- job_name: mac_amd64
os: macOS-latest
go: '1.16.x'
os: macos-11
go: '1.17.x'
gotags: 'cmount'
build_flags: '-include "^darwin/amd64" -cgo'
quicktest: true
@@ -42,15 +49,15 @@ jobs:
deploy: true
- job_name: mac_arm64
os: macOS-latest
go: '1.16.x'
os: macos-11
go: '1.17.x'
gotags: 'cmount'
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -macos-sdk macosx11.1 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
deploy: true
- job_name: windows_amd64
os: windows-latest
go: '1.16.x'
go: '1.17.x'
gotags: cmount
build_flags: '-include "^windows/amd64" -cgo'
build_args: '-buildmode exe'
@@ -60,7 +67,7 @@ jobs:
- job_name: windows_386
os: windows-latest
go: '1.16.x'
go: '1.17.x'
gotags: cmount
goarch: '386'
cgo: '1'
@@ -71,28 +78,23 @@ jobs:
- job_name: other_os
os: ubuntu-latest
go: '1.16.x'
build_flags: '-exclude "^(windows/|darwin/|linux/)"'
go: '1.17.x'
build_flags: '-exclude "^(windows/(386|amd64)|darwin/|linux/)"'
compile_all: true
deploy: true
- job_name: go1.13
os: ubuntu-latest
go: '1.13.x'
quicktest: true
- job_name: go1.14
os: ubuntu-latest
go: '1.14.x'
quicktest: true
racequicktest: true
- job_name: go1.15
os: ubuntu-latest
go: '1.15.x'
quicktest: true
racequicktest: true
- job_name: go1.16
os: ubuntu-latest
go: '1.16.x'
quicktest: true
racequicktest: true
name: ${{ matrix.job_name }}
runs-on: ${{ matrix.os }}
@@ -108,6 +110,7 @@ jobs:
with:
stable: 'false'
go-version: ${{ matrix.go }}
check-latest: true
- name: Set environment variables
shell: bash
@@ -132,7 +135,7 @@ jobs:
run: |
brew update
brew install --cask macfuse
if: matrix.os == 'macOS-latest'
if: matrix.os == 'macos-11'
- name: Install Libraries on Windows
shell: powershell
@@ -187,12 +190,13 @@ jobs:
make racequicktest
if: matrix.racequicktest
- name: Code quality test
- name: Run librclone tests
shell: bash
run: |
make build_dep
make check
if: matrix.check
make -C librclone/ctest test
make -C librclone/ctest clean
librclone/python/test_rclone.py
if: matrix.librclonetest
- name: Compile all architectures test
shell: bash
@@ -213,100 +217,126 @@ jobs:
# Deploy binaries if enabled in config && not a PR && not a fork
if: matrix.deploy && github.head_ref == '' && github.repository == 'rclone/rclone'
lint:
if: ${{ github.repository == 'rclone/rclone' || github.event.inputs.manual }}
timeout-minutes: 30
name: "lint"
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Code quality test
uses: golangci/golangci-lint-action@v2
with:
# Optional: version of golangci-lint to use in form of v1.2 or v1.2.3 or `latest` to use the latest version
version: latest
android:
timeout-minutes: 30
name: "android-all"
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
with:
fetch-depth: 0
if: ${{ github.repository == 'rclone/rclone' || github.event.inputs.manual }}
timeout-minutes: 30
name: "android-all"
runs-on: ubuntu-latest
# Upgrade together with NDK version
- name: Set up Go 1.14
uses: actions/setup-go@v1
with:
go-version: 1.14
steps:
- name: Checkout
uses: actions/checkout@v2
with:
fetch-depth: 0
# Upgrade together with Go version. Using a GitHub-provided version saves around 2 minutes.
- name: Force NDK version
run: echo "y" | sudo ${ANDROID_HOME}/tools/bin/sdkmanager --install "ndk;21.4.7075529" | grep -v = || true
# Upgrade together with NDK version
- name: Set up Go 1.16
uses: actions/setup-go@v1
with:
go-version: 1.16
- name: Go module cache
uses: actions/cache@v2
with:
path: ~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-
# Upgrade together with Go version. Using a GitHub-provided version saves around 2 minutes.
- name: Force NDK version
run: echo "y" | sudo ${ANDROID_HOME}/tools/bin/sdkmanager --install "ndk;22.1.7171670" | grep -v = || true
- name: Set global environment variables
shell: bash
run: |
echo "VERSION=$(make version)" >> $GITHUB_ENV
- name: Go module cache
uses: actions/cache@v2
with:
path: ~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-
- name: build native rclone
run: |
make
- name: Set global environment variables
shell: bash
run: |
echo "VERSION=$(make version)" >> $GITHUB_ENV
- name: arm-v7a Set environment variables
shell: bash
run: |
echo "CC=$(echo $ANDROID_HOME/ndk/21.4.7075529/toolchains/llvm/prebuilt/linux-x86_64/bin/armv7a-linux-androideabi16-clang)" >> $GITHUB_ENV
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
echo 'GOOS=android' >> $GITHUB_ENV
echo 'GOARCH=arm' >> $GITHUB_ENV
echo 'GOARM=7' >> $GITHUB_ENV
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
- name: arm-v7a build
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-16-armv7a .
- name: build native rclone
run: |
make
- name: arm64-v8a Set environment variables
shell: bash
run: |
echo "CC=$(echo $ANDROID_HOME/ndk/21.4.7075529/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android21-clang)" >> $GITHUB_ENV
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
echo 'GOOS=android' >> $GITHUB_ENV
echo 'GOARCH=arm64' >> $GITHUB_ENV
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
- name: install gomobile
run: |
go get golang.org/x/mobile/cmd/gobind
go get golang.org/x/mobile/cmd/gomobile
env PATH=$PATH:~/go/bin gomobile init
- name: arm64-v8a build
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-21-armv8a .
- name: arm-v7a gomobile build
run: env PATH=$PATH:~/go/bin gomobile bind -v -target=android/arm -javapkg=org.rclone -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} github.com/rclone/rclone/librclone/gomobile
- name: x86 Set environment variables
shell: bash
run: |
echo "CC=$(echo $ANDROID_HOME/ndk/21.4.7075529/toolchains/llvm/prebuilt/linux-x86_64/bin/i686-linux-android16-clang)" >> $GITHUB_ENV
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
echo 'GOOS=android' >> $GITHUB_ENV
echo 'GOARCH=386' >> $GITHUB_ENV
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
- name: arm-v7a Set environment variables
shell: bash
run: |
echo "CC=$(echo $ANDROID_HOME/ndk/22.1.7171670/toolchains/llvm/prebuilt/linux-x86_64/bin/armv7a-linux-androideabi16-clang)" >> $GITHUB_ENV
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
echo 'GOOS=android' >> $GITHUB_ENV
echo 'GOARCH=arm' >> $GITHUB_ENV
echo 'GOARM=7' >> $GITHUB_ENV
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
- name: arm-v7a build
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-16-armv7a .
- name: x86 build
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-16-x86 .
- name: arm64-v8a Set environment variables
shell: bash
run: |
echo "CC=$(echo $ANDROID_HOME/ndk/22.1.7171670/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android21-clang)" >> $GITHUB_ENV
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
echo 'GOOS=android' >> $GITHUB_ENV
echo 'GOARCH=arm64' >> $GITHUB_ENV
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
- name: x64 Set environment variables
shell: bash
run: |
echo "CC=$(echo $ANDROID_HOME/ndk/21.4.7075529/toolchains/llvm/prebuilt/linux-x86_64/bin/x86_64-linux-android21-clang)" >> $GITHUB_ENV
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
echo 'GOOS=android' >> $GITHUB_ENV
echo 'GOARCH=amd64' >> $GITHUB_ENV
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
- name: arm64-v8a build
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-21-armv8a .
- name: x64 build
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-21-x64 .
- name: x86 Set environment variables
shell: bash
run: |
echo "CC=$(echo $ANDROID_HOME/ndk/22.1.7171670/toolchains/llvm/prebuilt/linux-x86_64/bin/i686-linux-android16-clang)" >> $GITHUB_ENV
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
echo 'GOOS=android' >> $GITHUB_ENV
echo 'GOARCH=386' >> $GITHUB_ENV
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
- name: Upload artifacts
run: |
make ci_upload
env:
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
# Upload artifacts if not a PR && not a fork
if: github.head_ref == '' && github.repository == 'rclone/rclone'
- name: x86 build
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-16-x86 .
- name: x64 Set environment variables
shell: bash
run: |
echo "CC=$(echo $ANDROID_HOME/ndk/22.1.7171670/toolchains/llvm/prebuilt/linux-x86_64/bin/x86_64-linux-android21-clang)" >> $GITHUB_ENV
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
echo 'GOOS=android' >> $GITHUB_ENV
echo 'GOARCH=amd64' >> $GITHUB_ENV
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
- name: x64 build
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-21-x64 .
- name: Upload artifacts
run: |
make ci_upload
env:
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
# Upload artifacts if not a PR && not a fork
if: github.head_ref == '' && github.repository == 'rclone/rclone'

View File

@@ -7,6 +7,7 @@ on:
jobs:
build:
if: github.repository == 'rclone/rclone'
runs-on: ubuntu-latest
name: Build image job
steps:

View File

@@ -6,6 +6,7 @@ on:
jobs:
build:
if: github.repository == 'rclone/rclone'
runs-on: ubuntu-latest
name: Build image job
steps:
@@ -31,3 +32,28 @@ jobs:
publish: true
dockerHubUser: ${{ secrets.DOCKER_HUB_USER }}
dockerHubPassword: ${{ secrets.DOCKER_HUB_PASSWORD }}
build_docker_volume_plugin:
if: github.repository == 'rclone/rclone'
needs: build
runs-on: ubuntu-latest
name: Build docker plugin job
steps:
- name: Checkout master
uses: actions/checkout@v2
with:
fetch-depth: 0
- name: Build and publish docker plugin
shell: bash
run: |
VER=${GITHUB_REF#refs/tags/}
PLUGIN_USER=rclone
docker login --username ${{ secrets.DOCKER_HUB_USER }} \
--password-stdin <<< "${{ secrets.DOCKER_HUB_PASSWORD }}"
for PLUGIN_ARCH in amd64 arm64 arm/v7 ;do
export PLUGIN_USER PLUGIN_ARCH
make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}
make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}-${VER#v}
done
make docker-plugin PLUGIN_ARCH=amd64 PLUGIN_TAG=latest
make docker-plugin PLUGIN_ARCH=amd64 PLUGIN_TAG=${VER#v}

4
.gitignore vendored
View File

@@ -11,3 +11,7 @@ rclone.iml
*.log
*.iml
fuzz-build.zip
*.orig
*.rej
Thumbs.db
__pycache__

View File

@@ -5,7 +5,7 @@ linters:
- deadcode
- errcheck
- goimports
- golint
- revive
- ineffassign
- structcheck
- varcheck
@@ -24,3 +24,7 @@ issues:
# Maximum count of issues with the same text. Set to 0 to disable. Default is 3.
max-same-issues: 0
run:
# timeout for analysis, e.g. 30s, 5m, default is 1m
timeout: 10m

View File

@@ -12,95 +12,164 @@ When filing an issue, please include the following information if
possible as well as a description of the problem. Make sure you test
with the [latest beta of rclone](https://beta.rclone.org/):
* Rclone version (e.g. output from `rclone -V`)
* Which OS you are using and how many bits (e.g. Windows 7, 64 bit)
* Rclone version (e.g. output from `rclone version`)
* Which OS you are using and how many bits (e.g. Windows 10, 64 bit)
* The command you were trying to run (e.g. `rclone copy /tmp remote:tmp`)
* A log of the command with the `-vv` flag (e.g. output from `rclone -vv copy /tmp remote:tmp`)
* if the log contains secrets then edit the file with a text editor first to obscure them
## Submitting a pull request ##
## Submitting a new feature or bug fix ##
If you find a bug that you'd like to fix, or a new feature that you'd
like to implement then please submit a pull request via GitHub.
If it is a big feature then make an issue first so it can be discussed.
If it is a big feature, then [make an issue](https://github.com/rclone/rclone/issues) first so it can be discussed.
You'll need a Go environment set up with GOPATH set. See [the Go
getting started docs](https://golang.org/doc/install) for more info.
First in your web browser press the fork button on [rclone's GitHub
To prepare your pull request first press the fork button on [rclone's GitHub
page](https://github.com/rclone/rclone).
Now in your terminal
Then [install Git](https://git-scm.com/downloads) and set your public contribution [name](https://docs.github.com/en/github/getting-started-with-github/setting-your-username-in-git) and [email](https://docs.github.com/en/github/setting-up-and-managing-your-github-user-account/setting-your-commit-email-address#setting-your-commit-email-address-in-git).
Next open your terminal, change directory to your preferred folder and initialise your local rclone project:
git clone https://github.com/rclone/rclone.git
cd rclone
git remote rename origin upstream
# if you have SSH keys setup in your GitHub account:
git remote add origin git@github.com:YOURUSER/rclone.git
go build
# otherwise:
git remote add origin https://github.com/YOURUSER/rclone.git
Make a branch to add your new feature
Note that most of the terminal commands in the rest of this guide must be executed from the rclone folder created above.
Now [install Go](https://golang.org/doc/install) and verify your installation:
go version
Great, you can now compile and execute your own version of rclone:
go build
./rclone version
(Note that you can also replace `go build` with `make`, which will include a
more accurate version number in the executable as well as enable you to specify
more build options.) Finally make a branch to add your new feature
git checkout -b my-new-feature
And get hacking.
When ready - run the unit tests for the code you changed
You may like one of the [popular editors/IDE's for Go](https://github.com/golang/go/wiki/IDEsAndTextEditorPlugins) and a quick view on the rclone [code organisation](#code-organisation).
When ready - test the affected functionality and run the unit tests for the code you changed
cd folder/with/changed/files
go test -v
Note that you may need to make a test remote, e.g. `TestSwift` for some
of the unit tests.
Note the top level Makefile targets
* make check
* make test
Both of these will be run by Travis when you make a pull request but
you can do this yourself locally too. These require some extra go
packages which you can install with
* make build_dep
This is typically enough if you made a simple bug fix, otherwise please read the rclone [testing](#testing) section too.
Make sure you
* Add [unit tests](#testing) for a new feature.
* Add [documentation](#writing-documentation) for a new feature.
* Follow the [commit message guidelines](#commit-messages).
* Add [unit tests](#testing) for a new feature
* squash commits down to one per feature
* rebase to master with `git rebase master`
* [Commit your changes](#committing-your-changes) using the [message guideline](#commit-messages).
When you are done with that
When you are done with that push your changes to Github:
git push -u origin my-new-feature
Go to the GitHub website and click [Create pull
and open the GitHub website to [create your pull
request](https://help.github.com/articles/creating-a-pull-request/).
You patch will get reviewed and you might get asked to fix some stuff.
Your changes will then get reviewed and you might get asked to fix some stuff. If so, then make the changes in the same branch, commit and push your updates to GitHub.
If so, then make the changes in the same branch, squash the commits (make multiple commits one commit) by running:
```
git log # See how many commits you want to squash
git reset --soft HEAD~2 # This squashes the 2 latest commits together.
git status # Check what will happen, if you made a mistake resetting, you can run git reset 'HEAD@{1}' to undo.
git commit # Add a new commit message.
git push --force # Push the squashed commit to your GitHub repo.
# For more, see Stack Overflow, Git docs, or generally Duck around the web. jtagcat also recommends wizardzines.com
```
You may sometimes be asked to [base your changes on the latest master](#basing-your-changes-on-the-latest-master) or [squash your commits](#squashing-your-commits).
## CI for your fork ##
## Using Git and Github ##
### Committing your changes ###
Follow the guideline for [commit messages](#commit-messages) and then:
git checkout my-new-feature # To switch to your branch
git status # To see the new and changed files
git add FILENAME # To select FILENAME for the commit
git status # To verify the changes to be committed
git commit # To do the commit
git log # To verify the commit. Use q to quit the log
You can modify the message or changes in the latest commit using:
git commit --amend
If you amend to commits that have been pushed to GitHub, then you will have to [replace your previously pushed commits](#replacing-your-previously-pushed-commits).
### Replacing your previously pushed commits ###
Note that you are about to rewrite the GitHub history of your branch. It is good practice to involve your collaborators before modifying commits that have been pushed to GitHub.
Your previously pushed commits are replaced by:
git push --force origin my-new-feature
### Basing your changes on the latest master ###
To base your changes on the latest version of the [rclone master](https://github.com/rclone/rclone/tree/master) (upstream):
git checkout master
git fetch upstream
git merge --ff-only
git push origin --follow-tags # optional update of your fork in GitHub
git checkout my-new-feature
git rebase master
If you rebase commits that have been pushed to GitHub, then you will have to [replace your previously pushed commits](#replacing-your-previously-pushed-commits).
### Squashing your commits ###
To combine your commits into one commit:
git log # To count the commits to squash, e.g. the last 2
git reset --soft HEAD~2 # To undo the 2 latest commits
git status # To check everything is as expected
If everything is fine, then make the new combined commit:
git commit # To commit the undone commits as one
otherwise, you may roll back using:
git reflog # To check that HEAD{1} is your previous state
git reset --soft 'HEAD@{1}' # To roll back to your previous state
If you squash commits that have been pushed to GitHub, then you will have to [replace your previously pushed commits](#replacing-your-previously-pushed-commits).
Tip: You may like to use `git rebase -i master` if you are experienced or have a more complex situation.
### GitHub Continuous Integration ###
rclone currently uses [GitHub Actions](https://github.com/rclone/rclone/actions) to build and test the project, which should be automatically available for your fork too from the `Actions` tab in your repository.
## Testing ##
### Quick testing ###
rclone's tests are run from the go testing framework, so at the top
level you can run this to run all the tests.
go test -v ./...
You can also use `make`, if supported by your platform
make quicktest
The quicktest is [automatically run by GitHub](#github-continuous-integration) when you push your branch to GitHub.
### Backend testing ###
rclone contains a mixture of unit tests and integration tests.
Because it is difficult (and in some respects pointless) to test cloud
storage systems by mocking all their interfaces, rclone unit tests can
@@ -134,12 +203,19 @@ project root:
go install github.com/rclone/rclone/fstest/test_all
test_all -backend drive
### Full integration testing ###
If you want to run all the integration tests against all the remotes,
then change into the project root and run
make check
make test
This command is run daily on the integration test server. You can
The commands may require some extra go packages which you can install with
make build_dep
The full integration tests are run daily on the integration test server. You can
find the results at https://pub.rclone.org/integration-tests/
## Code Organisation ##
@@ -147,16 +223,17 @@ find the results at https://pub.rclone.org/integration-tests/
Rclone code is organised into a small number of top level directories
with modules beneath.
* backend - the rclone backends for interfacing to cloud providers -
* backend - the rclone backends for interfacing to cloud providers -
* all - import this to load all the cloud providers
* ...providers
* bin - scripts for use while building or maintaining rclone
* cmd - the rclone commands
* all - import this to load all the commands
* ...commands
* cmdtest - end-to-end tests of commands, flags, environment variables,...
* docs - the documentation and website
* content - adjust these docs only - everything else is autogenerated
* command - these are auto generated - edit the corresponding .go file
* command - these are auto-generated - edit the corresponding .go file
* fs - main rclone definitions - minimal amount of code
* accounting - bandwidth limiting and statistics
* asyncreader - an io.Reader which reads ahead
@@ -198,18 +275,39 @@ If you add a new general flag (not for a backend), then document it in
alphabetical order.
If you add a new backend option/flag, then it should be documented in
the source file in the `Help:` field. The first line of this is used
for the flag help, the remainder is shown to the user in `rclone
config` and is added to the docs with `make backenddocs`.
the source file in the `Help:` field.
* Start with the most important information about the option,
as a single sentence on a single line.
* This text will be used for the command-line flag help.
* It will be combined with other information, such as any default value,
and the result will look odd if not written as a single sentence.
* It should end with a period/full stop character, which will be shown
in docs but automatically removed when producing the flag help.
* Try to keep it below 80 characters, to reduce text wrapping in the terminal.
* More details can be added in a new paragraph, after an empty line (`"\n\n"`).
* Like with docs generated from Markdown, a single line break is ignored
and two line breaks creates a new paragraph.
* This text will be shown to the user in `rclone config`
and in the docs (where it will be added by `make backenddocs`,
normally run some time before next release).
* To create options of enumeration type use the `Examples:` field.
* Each example value have their own `Help:` field, but they are treated
a bit different than the main option help text. They will be shown
as an unordered list, therefore a single line break is enough to
create a new list item. Also, for enumeration texts like name of
countries, it looks better without an ending period/full stop character.
The only documentation you need to edit are the `docs/content/*.md`
files. The `MANUAL.*`, `rclone.1`, web site, etc. are all auto generated
files. The `MANUAL.*`, `rclone.1`, website, etc. are all auto-generated
from those during the release process. See the `make doc` and `make
website` targets in the Makefile if you are interested in how. You
don't need to run these when adding a feature.
Documentation for rclone sub commands is with their code, e.g.
`cmd/ls/ls.go`.
`cmd/ls/ls.go`. Write flag help strings as a single sentence on a single
line, without a period/full stop character at the end, as it will be
combined unmodified with other information (such as any default value).
Note that you can use [GitHub's online editor](https://help.github.com/en/github/managing-files-in-a-repository/editing-files-in-another-users-repository)
for small changes in the docs which makes it very easy.
@@ -252,7 +350,7 @@ And here is an example of a longer one:
```
mount: fix hang on errored upload
In certain circumstances if an upload failed then the mount could hang
In certain circumstances, if an upload failed then the mount could hang
indefinitely. This was fixed by closing the read pipe after the Put
completed. This will cause the write side to return a pipe closed
error fixing the hang.
@@ -284,7 +382,7 @@ and `go.sum` in the same commit as your other changes.
If you need to update a dependency then run
GO111MODULE=on go get -u github.com/pkg/errors
GO111MODULE=on go get -u golang.org/x/crypto
Check in a single commit as above.
@@ -327,8 +425,8 @@ Research
Getting going
* Create `backend/remote/remote.go` (copy this from a similar remote)
* box is a good one to start from if you have a directory based remote
* b2 is a good one to start from if you have a bucket based remote
* box is a good one to start from if you have a directory-based remote
* b2 is a good one to start from if you have a bucket-based remote
* Add your remote to the imports in `backend/all/all.go`
* HTTP based remotes are easiest to maintain if they use rclone's rest module, but if there is a really good go SDK then use that instead.
* Try to implement as many optional methods as possible as it makes the remote more usable.

View File

@@ -15,11 +15,11 @@ Current active maintainers of rclone are:
| Ivan Andreev | @ivandeex | chunker & mailru backends |
| Max Sum | @Max-Sum | union backend |
| Fred | @creativeprojects | seafile backend |
| Caleb Case | @calebcase | tardigrade backend |
| Caleb Case | @calebcase | storj backend |
**This is a work in progress Draft**
This is a guide for how to be an rclone maintainer. This is mostly a writeup of what I (@ncw) attempt to do.
This is a guide for how to be an rclone maintainer. This is mostly a write-up of what I (@ncw) attempt to do.
## Triaging Tickets ##
@@ -27,15 +27,15 @@ When a ticket comes in it should be triaged. This means it should be classified
Rclone uses the labels like this:
* `bug` - a definite verified bug
* `bug` - a definitely verified bug
* `can't reproduce` - a problem which we can't reproduce
* `doc fix` - a bug in the documentation - if users need help understanding the docs add this label
* `duplicate` - normally close these and ask the user to subscribe to the original
* `enhancement: new remote` - a new rclone backend
* `enhancement` - a new feature
* `FUSE` - to do with `rclone mount` command
* `good first issue` - mark these if you find a small self contained issue - these get shown to new visitors to the project
* `help` wanted - mark these if you find a self contained issue - these get shown to new visitors to the project
* `good first issue` - mark these if you find a small self-contained issue - these get shown to new visitors to the project
* `help` wanted - mark these if you find a self-contained issue - these get shown to new visitors to the project
* `IMPORTANT` - note to maintainers not to forget to fix this for the release
* `maintenance` - internal enhancement, code re-organisation, etc.
* `Needs Go 1.XX` - waiting for that version of Go to be released
@@ -51,7 +51,7 @@ The milestones have these meanings:
* v1.XX - stuff we would like to fit into this release
* v1.XX+1 - stuff we are leaving until the next release
* Soon - stuff we think is a good idea - waiting to be scheduled to a release
* Soon - stuff we think is a good idea - waiting to be scheduled for a release
* Help wanted - blue sky stuff that might get moved up, or someone could help with
* Known bugs - bugs waiting on external factors or we aren't going to fix for the moment
@@ -65,7 +65,7 @@ Close tickets as soon as you can - make sure they are tagged with a release. Po
Try to process pull requests promptly!
Merging pull requests on GitHub itself works quite well now-a-days so you can squash and rebase or rebase pull requests. rclone doesn't use merge commits. Use the squash and rebase option if you need to edit the commit message.
Merging pull requests on GitHub itself works quite well nowadays so you can squash and rebase or rebase pull requests. rclone doesn't use merge commits. Use the squash and rebase option if you need to edit the commit message.
After merging the commit, in your local master branch, do `git pull` then run `bin/update-authors.py` to update the authors file then `git push`.
@@ -81,15 +81,15 @@ Rclone aims for a 6-8 week release cycle. Sometimes release cycles take longer
High impact regressions should be fixed before the next release.
Near the start of the release cycle the dependencies should be updated with `make update` to give time for bugs to surface.
Near the start of the release cycle, the dependencies should be updated with `make update` to give time for bugs to surface.
Towards the end of the release cycle try not to merge anything too big so let things settle down.
Follow the instructions in RELEASE.md for making the release. Note that the testing part is the most time consuming often needing several rounds of test and fix depending on exactly how many new features rclone has gained.
Follow the instructions in RELEASE.md for making the release. Note that the testing part is the most time-consuming often needing several rounds of test and fix depending on exactly how many new features rclone has gained.
## Mailing list ##
There is now an invite only mailing list for rclone developers `rclone-dev` on google groups.
There is now an invite-only mailing list for rclone developers `rclone-dev` on google groups.
## TODO ##

11653
MANUAL.html generated

File diff suppressed because it is too large Load Diff

14914
MANUAL.md generated

File diff suppressed because it is too large Load Diff

15639
MANUAL.txt generated

File diff suppressed because it is too large Load Diff

View File

@@ -104,10 +104,14 @@ showupdates:
@echo "*** Direct dependencies that could be updated ***"
@GO111MODULE=on go list -u -f '{{if (and (not (or .Main .Indirect)) .Update)}}{{.Path}}: {{.Version}} -> {{.Update.Version}}{{end}}' -m all 2> /dev/null
# Update direct dependencies only
updatedirect:
GO111MODULE=on go get -d $$(go list -m -f '{{if not (or .Main .Indirect)}}{{.Path}}{{end}}' all)
GO111MODULE=on go mod tidy
# Update direct and indirect dependencies and test dependencies
update:
GO111MODULE=on go get -u -t ./...
-#GO111MODULE=on go get -d $(go list -m -f '{{if not (or .Main .Indirect)}}{{.Path}}{{end}}' all)
GO111MODULE=on go get -d -u -t ./...
GO111MODULE=on go mod tidy
# Tidy the module dependencies
@@ -256,3 +260,33 @@ startstable:
winzip:
zip -9 rclone-$(TAG).zip rclone.exe
# docker volume plugin
PLUGIN_USER ?= rclone
PLUGIN_TAG ?= latest
PLUGIN_BASE_TAG ?= latest
PLUGIN_ARCH ?= amd64
PLUGIN_IMAGE := $(PLUGIN_USER)/docker-volume-rclone:$(PLUGIN_TAG)
PLUGIN_BASE := $(PLUGIN_USER)/rclone:$(PLUGIN_BASE_TAG)
PLUGIN_BUILD_DIR := ./build/docker-plugin
PLUGIN_CONTRIB_DIR := ./contrib/docker-plugin/managed
docker-plugin-create:
docker buildx inspect |grep -q /${PLUGIN_ARCH} || \
docker run --rm --privileged tonistiigi/binfmt --install all
rm -rf ${PLUGIN_BUILD_DIR}
docker buildx build \
--no-cache --pull \
--build-arg BASE_IMAGE=${PLUGIN_BASE} \
--platform linux/${PLUGIN_ARCH} \
--output ${PLUGIN_BUILD_DIR}/rootfs \
${PLUGIN_CONTRIB_DIR}
cp ${PLUGIN_CONTRIB_DIR}/config.json ${PLUGIN_BUILD_DIR}
docker plugin rm --force ${PLUGIN_IMAGE} 2>/dev/null || true
docker plugin create ${PLUGIN_IMAGE} ${PLUGIN_BUILD_DIR}
docker-plugin-push:
docker plugin push ${PLUGIN_IMAGE}
docker plugin rm ${PLUGIN_IMAGE}
docker-plugin: docker-plugin-create docker-plugin-push

View File

@@ -1,8 +1,9 @@
[<img src="https://rclone.org/img/logo_on_light__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/)
[<img src="https://rclone.org/img/logo_on_light__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/#gh-light-mode-only)
[<img src="https://rclone.org/img/logo_on_dark__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/#gh-dark-mode-only)
[Website](https://rclone.org) |
[Documentation](https://rclone.org/docs/) |
[Download](https://rclone.org/downloads/) |
[Download](https://rclone.org/downloads/) |
[Contributing](CONTRIBUTING.md) |
[Changelog](https://rclone.org/changelog/) |
[Installation](https://rclone.org/install/) |
@@ -10,16 +11,17 @@
[![Build Status](https://github.com/rclone/rclone/workflows/build/badge.svg)](https://github.com/rclone/rclone/actions?query=workflow%3Abuild)
[![Go Report Card](https://goreportcard.com/badge/github.com/rclone/rclone)](https://goreportcard.com/report/github.com/rclone/rclone)
[![GoDoc](https://godoc.org/github.com/rclone/rclone?status.svg)](https://godoc.org/github.com/rclone/rclone)
[![GoDoc](https://godoc.org/github.com/rclone/rclone?status.svg)](https://godoc.org/github.com/rclone/rclone)
[![Docker Pulls](https://img.shields.io/docker/pulls/rclone/rclone)](https://hub.docker.com/r/rclone/rclone)
# Rclone
Rclone *("rsync for cloud storage")* is a command line program to sync files and directories to and from different cloud storage providers.
Rclone *("rsync for cloud storage")* is a command-line program to sync files and directories to and from different cloud storage providers.
## Storage providers
* 1Fichier [:page_facing_up:](https://rclone.org/fichier/)
* Akamai Netstorage [:page_facing_up:](https://rclone.org/netstorage/)
* Alibaba Cloud (Aliyun) Object Storage System (OSS) [:page_facing_up:](https://rclone.org/s3/#alibaba-oss)
* Amazon Drive [:page_facing_up:](https://rclone.org/amazonclouddrive/) ([See note](https://rclone.org/amazonclouddrive/#status))
* Amazon S3 [:page_facing_up:](https://rclone.org/s3/)
@@ -28,11 +30,11 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and
* Ceph [:page_facing_up:](https://rclone.org/s3/#ceph)
* Citrix ShareFile [:page_facing_up:](https://rclone.org/sharefile/)
* DigitalOcean Spaces [:page_facing_up:](https://rclone.org/s3/#digitalocean-spaces)
* Digi Storage [:page_facing_up:](https://rclone.org/koofr/#digi-storage)
* Dreamhost [:page_facing_up:](https://rclone.org/s3/#dreamhost)
* Dropbox [:page_facing_up:](https://rclone.org/dropbox/)
* Enterprise File Fabric [:page_facing_up:](https://rclone.org/filefabric/)
* FTP [:page_facing_up:](https://rclone.org/ftp/)
* GetSky [:page_facing_up:](https://rclone.org/jottacloud/)
* Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/)
* Google Drive [:page_facing_up:](https://rclone.org/drive/)
* Google Photos [:page_facing_up:](https://rclone.org/googlephotos/)
@@ -60,19 +62,21 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and
* put.io [:page_facing_up:](https://rclone.org/putio/)
* QingStor [:page_facing_up:](https://rclone.org/qingstor/)
* Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
* RackCorp Object Storage [:page_facing_up:](https://rclone.org/s3/#RackCorp)
* Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)
* Seafile [:page_facing_up:](https://rclone.org/seafile/)
* SeaweedFS [:page_facing_up:](https://rclone.org/s3/#seaweedfs)
* SFTP [:page_facing_up:](https://rclone.org/sftp/)
* StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
* Storj [:page_facing_up:](https://rclone.org/storj/)
* SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)
* Tardigrade [:page_facing_up:](https://rclone.org/tardigrade/)
* Tencent Cloud Object Storage (COS) [:page_facing_up:](https://rclone.org/s3/#tencent-cos)
* Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi)
* WebDAV [:page_facing_up:](https://rclone.org/webdav/)
* Yandex Disk [:page_facing_up:](https://rclone.org/yandex/)
* Zoho WorkDrive [:page_facing_up:](https://rclone.org/zoho/)
* The local filesystem [:page_facing_up:](https://rclone.org/local/)
Please see [the full list of all storage providers and their features](https://rclone.org/overview/)
## Features
@@ -87,7 +91,6 @@ Please see [the full list of all storage providers and their features](https://r
* Optional large file chunking ([Chunker](https://rclone.org/chunker/))
* Optional transparent compression ([Compress](https://rclone.org/compress/))
* Optional encryption ([Crypt](https://rclone.org/crypt/))
* Optional cache ([Cache](https://rclone.org/cache/))
* Optional FUSE mount ([rclone mount](https://rclone.org/commands/rclone_mount/))
* Multi-threaded downloads to local disk
* Can [serve](https://rclone.org/commands/rclone_serve/) local or remote files over HTTP/WebDav/FTP/SFTP/dlna

View File

@@ -34,13 +34,24 @@ This file describes how to make the various kinds of releases
* make startdev # make startstable for stable branch
* # announce with forum post, twitter post, patreon post
## Update dependencies
Early in the next release cycle update the dependencies
* Review any pinned packages in go.mod and remove if possible
* make update
* git status
* git add new files
* make updatedirect
* make
* git commit -a -v
* make update
* make
* roll back any updates which didn't compile
* git commit -a -v --amend
Note that `make update` updates all direct and indirect dependencies
and there can occasionally be forwards compatibility problems with
doing that so it may be necessary to roll back dependencies to the
version specified by `make updatedirect` in order to get rclone to
build.
## Making a point release

View File

@@ -1 +1 @@
v1.56.0
v1.58.0

View File

@@ -20,7 +20,7 @@ func init() {
NewFs: NewFs,
Options: []fs.Option{{
Name: "remote",
Help: "Remote or path to alias.\nCan be \"myremote:path/to/dir\", \"myremote:bucket\", \"myremote:\" or \"/local/path\".",
Help: "Remote or path to alias.\n\nCan be \"myremote:path/to/dir\", \"myremote:bucket\", \"myremote:\" or \"/local/path\".",
Required: true,
}},
}

View File

@@ -20,7 +20,7 @@ var (
)
func prepare(t *testing.T, root string) {
configfile.LoadConfig(context.Background())
configfile.Install()
// Configure the remote
config.FileSet(remoteName, "type", "alias")

View File

@@ -18,6 +18,7 @@ import (
_ "github.com/rclone/rclone/backend/ftp"
_ "github.com/rclone/rclone/backend/googlecloudstorage"
_ "github.com/rclone/rclone/backend/googlephotos"
_ "github.com/rclone/rclone/backend/hasher"
_ "github.com/rclone/rclone/backend/hdfs"
_ "github.com/rclone/rclone/backend/http"
_ "github.com/rclone/rclone/backend/hubic"
@@ -27,6 +28,7 @@ import (
_ "github.com/rclone/rclone/backend/mailru"
_ "github.com/rclone/rclone/backend/mega"
_ "github.com/rclone/rclone/backend/memory"
_ "github.com/rclone/rclone/backend/netstorage"
_ "github.com/rclone/rclone/backend/onedrive"
_ "github.com/rclone/rclone/backend/opendrive"
_ "github.com/rclone/rclone/backend/pcloud"
@@ -37,9 +39,10 @@ import (
_ "github.com/rclone/rclone/backend/seafile"
_ "github.com/rclone/rclone/backend/sftp"
_ "github.com/rclone/rclone/backend/sharefile"
_ "github.com/rclone/rclone/backend/sia"
_ "github.com/rclone/rclone/backend/storj"
_ "github.com/rclone/rclone/backend/sugarsync"
_ "github.com/rclone/rclone/backend/swift"
_ "github.com/rclone/rclone/backend/tardigrade"
_ "github.com/rclone/rclone/backend/union"
_ "github.com/rclone/rclone/backend/uptobox"
_ "github.com/rclone/rclone/backend/webdav"

View File

@@ -14,16 +14,15 @@ we ignore assets completely!
import (
"context"
"encoding/json"
"errors"
"fmt"
"io"
"log"
"net/http"
"path"
"strings"
"time"
acd "github.com/ncw/go-acd"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
@@ -70,11 +69,10 @@ func init() {
Prefix: "acd",
Description: "Amazon Drive",
NewFs: NewFs,
Config: func(ctx context.Context, name string, m configmap.Mapper) {
err := oauthutil.Config(ctx, "amazon cloud drive", name, m, acdConfig, nil)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
return oauthutil.ConfigOut("", &oauthutil.Options{
OAuth2Config: acdConfig,
})
},
Options: append(oauthutil.SharedOptions, []fs.Option{{
Name: "checkpoint",
@@ -83,16 +81,16 @@ func init() {
Advanced: true,
}, {
Name: "upload_wait_per_gb",
Help: `Additional time per GB to wait after a failed complete upload to see if it appears.
Help: `Additional time per GiB to wait after a failed complete upload to see if it appears.
Sometimes Amazon Drive gives an error when a file has been fully
uploaded but the file appears anyway after a little while. This
happens sometimes for files over 1GB in size and nearly every time for
files bigger than 10GB. This parameter controls the time rclone waits
happens sometimes for files over 1 GiB in size and nearly every time for
files bigger than 10 GiB. This parameter controls the time rclone waits
for the file to appear.
The default value for this parameter is 3 minutes per GB, so by
default it will wait 3 minutes for every GB uploaded to see if the
The default value for this parameter is 3 minutes per GiB, so by
default it will wait 3 minutes for every GiB uploaded to see if the
file appears.
You can disable this feature by setting it to 0. This may cause
@@ -112,7 +110,7 @@ in this situation.`,
Files this size or more will be downloaded via their "tempLink". This
is to work around a problem with Amazon Drive which blocks downloads
of files bigger than about 10GB. The default for this is 9GB which
of files bigger than about 10 GiB. The default for this is 9 GiB which
shouldn't need to be changed.
To download files above this threshold, rclone requests a "tempLink"
@@ -261,7 +259,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
}
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(ctx, name, m, acdConfig, baseClient)
if err != nil {
return nil, errors.Wrap(err, "failed to configure Amazon Drive")
return nil, fmt.Errorf("failed to configure Amazon Drive: %w", err)
}
c := acd.NewClient(oAuthClient)
@@ -294,13 +292,13 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
return f.shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "failed to get endpoints")
return nil, fmt.Errorf("failed to get endpoints: %w", err)
}
// Get rootID
rootInfo, err := f.getRootInfo(ctx)
if err != nil || rootInfo.Id == nil {
return nil, errors.Wrap(err, "failed to get root")
return nil, fmt.Errorf("failed to get root: %w", err)
}
f.trueRootID = *rootInfo.Id

View File

@@ -1,5 +1,6 @@
// Test AmazonCloudDrive filesystem interface
//go:build acd
// +build acd
package amazonclouddrive_test

View File

@@ -1,6 +1,7 @@
// Package azureblob provides an interface to the Microsoft Azure blob object storage system
// +build !plan9,!solaris,!js,go1.14
//go:build !plan9 && !solaris && !js
// +build !plan9,!solaris,!js
package azureblob
@@ -9,12 +10,14 @@ import (
"encoding/base64"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"path"
"strconv"
"strings"
"sync"
"time"
@@ -22,7 +25,6 @@ import (
"github.com/Azure/azure-pipeline-go/pipeline"
"github.com/Azure/azure-storage-blob-go/azblob"
"github.com/Azure/go-autorest/autorest/adal"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
@@ -47,9 +49,7 @@ const (
timeFormatIn = time.RFC3339
timeFormatOut = "2006-01-02T15:04:05.000000000Z07:00"
storageDefaultBaseURL = "blob.core.windows.net"
defaultChunkSize = 4 * fs.MebiByte
maxChunkSize = 100 * fs.MebiByte
uploadConcurrency = 4
defaultChunkSize = 4 * fs.Mebi
defaultAccessTier = azblob.AccessTierNone
maxTryTimeout = time.Hour * 24 * 365 //max time of an azure web request response window (whether or not data is flowing)
// Default storage account, key and blob endpoint for emulator support,
@@ -73,30 +73,29 @@ func init() {
NewFs: NewFs,
Options: []fs.Option{{
Name: "account",
Help: "Storage Account Name (leave blank to use SAS URL or Emulator)",
Help: "Storage Account Name.\n\nLeave blank to use SAS URL or Emulator.",
}, {
Name: "service_principal_file",
Help: `Path to file containing credentials for use with a service principal.
Leave blank normally. Needed only if you want to use a service principal instead of interactive login.
$ az sp create-for-rbac --name "<name>" \
$ az ad sp create-for-rbac --name "<name>" \
--role "Storage Blob Data Owner" \
--scopes "/subscriptions/<subscription>/resourceGroups/<resource-group>/providers/Microsoft.Storage/storageAccounts/<storage-account>/blobServices/default/containers/<container>" \
> azure-principal.json
See [Use Azure CLI to assign an Azure role for access to blob and queue data](https://docs.microsoft.com/en-us/azure/storage/common/storage-auth-aad-rbac-cli)
for more details.
See ["Create an Azure service principal"](https://docs.microsoft.com/en-us/cli/azure/create-an-azure-service-principal-azure-cli) and ["Assign an Azure role for access to blob data"](https://docs.microsoft.com/en-us/azure/storage/common/storage-auth-aad-rbac-cli) pages for more details.
`,
}, {
Name: "key",
Help: "Storage Account Key (leave blank to use SAS URL or Emulator)",
Help: "Storage Account Key.\n\nLeave blank to use SAS URL or Emulator.",
}, {
Name: "sas_url",
Help: "SAS URL for container level access only\n(leave blank if using account/key or Emulator)",
Help: "SAS URL for container level access only.\n\nLeave blank if using account/key or Emulator.",
}, {
Name: "use_msi",
Help: `Use a managed service identity to authenticate (only works in Azure)
Help: `Use a managed service identity to authenticate (only works in Azure).
When true, use a [managed service identity](https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/)
to authenticate to Azure Storage instead of a SAS token or account key.
@@ -109,36 +108,57 @@ msi_client_id, or msi_mi_res_id parameters.`,
Default: false,
}, {
Name: "msi_object_id",
Help: "Object ID of the user-assigned MSI to use, if any. Leave blank if msi_client_id or msi_mi_res_id specified.",
Help: "Object ID of the user-assigned MSI to use, if any.\n\nLeave blank if msi_client_id or msi_mi_res_id specified.",
Advanced: true,
}, {
Name: "msi_client_id",
Help: "Object ID of the user-assigned MSI to use, if any. Leave blank if msi_object_id or msi_mi_res_id specified.",
Help: "Object ID of the user-assigned MSI to use, if any.\n\nLeave blank if msi_object_id or msi_mi_res_id specified.",
Advanced: true,
}, {
Name: "msi_mi_res_id",
Help: "Azure resource ID of the user-assigned MSI to use, if any. Leave blank if msi_client_id or msi_object_id specified.",
Help: "Azure resource ID of the user-assigned MSI to use, if any.\n\nLeave blank if msi_client_id or msi_object_id specified.",
Advanced: true,
}, {
Name: "use_emulator",
Help: "Uses local storage emulator if provided as 'true' (leave blank if using real azure storage endpoint)",
Help: "Uses local storage emulator if provided as 'true'.\n\nLeave blank if using real azure storage endpoint.",
Default: false,
}, {
Name: "endpoint",
Help: "Endpoint for the service\nLeave blank normally.",
Help: "Endpoint for the service.\n\nLeave blank normally.",
Advanced: true,
}, {
Name: "upload_cutoff",
Help: "Cutoff for switching to chunked upload (<= 256MB). (Deprecated)",
Help: "Cutoff for switching to chunked upload (<= 256 MiB) (deprecated).",
Advanced: true,
}, {
Name: "chunk_size",
Help: `Upload chunk size (<= 100MB).
Help: `Upload chunk size.
Note that this is stored in memory and there may be up to
"--transfers" chunks stored at once in memory.`,
"--transfers" * "--azureblob-upload-concurrency" chunks stored at once
in memory.`,
Default: defaultChunkSize,
Advanced: true,
}, {
Name: "upload_concurrency",
Help: `Concurrency for multipart uploads.
This is the number of chunks of the same file that are uploaded
concurrently.
If you are uploading small numbers of large files over high-speed
links and these uploads do not fully utilize your bandwidth, then
increasing this may help to speed up the transfers.
In tests, upload speed increases almost linearly with upload
concurrency. For example to fill a gigabit pipe it may be necessary to
raise this to 64. Note that this will use more memory.
Note that chunks are stored in memory and there may be up to
"--transfers" * "--azureblob-upload-concurrency" chunks stored at once
in memory.`,
Default: 16,
Advanced: true,
}, {
Name: "list_chunk",
Help: `Size of blob list.
@@ -200,6 +220,7 @@ to start uploading.`,
Default: memoryPoolFlushTime,
Advanced: true,
Help: `How often internal memory buffer pools will be flushed.
Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations.
This option controls how often unused buffers will be removed from the pool.`,
}, {
@@ -219,12 +240,12 @@ This option controls how often unused buffers will be removed from the pool.`,
encoder.EncodeRightPeriod),
}, {
Name: "public_access",
Help: "Public access level of a container: blob, container.",
Help: "Public access level of a container: blob or container.",
Default: string(azblob.PublicAccessNone),
Examples: []fs.OptionExample{
{
Value: string(azblob.PublicAccessNone),
Help: "The container and its blobs can be accessed only with an authorized request. It's a default value",
Help: "The container and its blobs can be accessed only with an authorized request.\nIt's a default value.",
}, {
Value: string(azblob.PublicAccessBlob),
Help: "Blob data within this container can be read via anonymous request.",
@@ -234,6 +255,11 @@ This option controls how often unused buffers will be removed from the pool.`,
},
},
Advanced: true,
}, {
Name: "no_head_object",
Help: `If set, do not do HEAD before GET when getting objects.`,
Default: false,
Advanced: true,
}},
})
}
@@ -250,6 +276,7 @@ type Options struct {
Endpoint string `config:"endpoint"`
SASURL string `config:"sas_url"`
ChunkSize fs.SizeSuffix `config:"chunk_size"`
UploadConcurrency int `config:"upload_concurrency"`
ListChunkSize uint `config:"list_chunk"`
AccessTier string `config:"access_tier"`
ArchiveTierDelete bool `config:"archive_tier_delete"`
@@ -259,6 +286,7 @@ type Options struct {
MemoryPoolUseMmap bool `config:"memory_pool_use_mmap"`
Enc encoder.MultiEncoder `config:"encoding"`
PublicAccess string `config:"public_access"`
NoHeadObject bool `config:"no_head_object"`
}
// Fs represents a remote azure server
@@ -404,12 +432,9 @@ func (f *Fs) shouldRetry(ctx context.Context, err error) (bool, error) {
}
func checkUploadChunkSize(cs fs.SizeSuffix) error {
const minChunkSize = fs.Byte
const minChunkSize = fs.SizeSuffixBase
if cs < minChunkSize {
return errors.Errorf("%s is less than %s", cs, minChunkSize)
}
if cs > maxChunkSize {
return errors.Errorf("%s is greater than %s", cs, maxChunkSize)
return fmt.Errorf("%s is less than %s", cs, minChunkSize)
}
return nil
}
@@ -451,11 +476,11 @@ const azureStorageEndpoint = "https://storage.azure.com/"
func newServicePrincipalTokenRefresher(ctx context.Context, credentialsData []byte) (azblob.TokenRefresher, error) {
var spCredentials servicePrincipalCredentials
if err := json.Unmarshal(credentialsData, &spCredentials); err != nil {
return nil, errors.Wrap(err, "error parsing credentials from JSON file")
return nil, fmt.Errorf("error parsing credentials from JSON file: %w", err)
}
oauthConfig, err := adal.NewOAuthConfig(azureActiveDirectoryEndpoint, spCredentials.Tenant)
if err != nil {
return nil, errors.Wrap(err, "error creating oauth config")
return nil, fmt.Errorf("error creating oauth config: %w", err)
}
// Create service principal token for Azure Storage.
@@ -465,7 +490,7 @@ func newServicePrincipalTokenRefresher(ctx context.Context, credentialsData []by
spCredentials.Password,
azureStorageEndpoint)
if err != nil {
return nil, errors.Wrap(err, "error creating service principal token")
return nil, fmt.Errorf("error creating service principal token: %w", err)
}
// Wrap token inside a refresher closure.
@@ -518,10 +543,10 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
err = checkUploadChunkSize(opt.ChunkSize)
if err != nil {
return nil, errors.Wrap(err, "azure: chunk size")
return nil, fmt.Errorf("azure: chunk size: %w", err)
}
if opt.ListChunkSize > maxListChunkSize {
return nil, errors.Errorf("azure: blob list size can't be greater than %v - was %v", maxListChunkSize, opt.ListChunkSize)
return nil, fmt.Errorf("azure: blob list size can't be greater than %v - was %v", maxListChunkSize, opt.ListChunkSize)
}
if opt.Endpoint == "" {
opt.Endpoint = storageDefaultBaseURL
@@ -530,12 +555,12 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
if opt.AccessTier == "" {
opt.AccessTier = string(defaultAccessTier)
} else if !validateAccessTier(opt.AccessTier) {
return nil, errors.Errorf("Azure Blob: Supported access tiers are %s, %s and %s",
return nil, fmt.Errorf("Azure Blob: Supported access tiers are %s, %s and %s",
string(azblob.AccessTierHot), string(azblob.AccessTierCool), string(azblob.AccessTierArchive))
}
if !validatePublicAccess((opt.PublicAccess)) {
return nil, errors.Errorf("Azure Blob: Supported public access level are %s and %s",
return nil, fmt.Errorf("Azure Blob: Supported public access level are %s and %s",
string(azblob.PublicAccessBlob), string(azblob.PublicAccessContainer))
}
@@ -577,11 +602,11 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
case opt.UseEmulator:
credential, err := azblob.NewSharedKeyCredential(emulatorAccount, emulatorAccountKey)
if err != nil {
return nil, errors.Wrapf(err, "Failed to parse credentials")
return nil, fmt.Errorf("Failed to parse credentials: %w", err)
}
u, err = url.Parse(emulatorBlobEndpoint)
if err != nil {
return nil, errors.Wrap(err, "failed to make azure storage url from account and endpoint")
return nil, fmt.Errorf("failed to make azure storage url from account and endpoint: %w", err)
}
pipeline := f.newPipeline(credential, azblob.PipelineOptions{Retry: azblob.RetryOptions{TryTimeout: maxTryTimeout}})
serviceURL = azblob.NewServiceURL(*u, pipeline)
@@ -623,12 +648,12 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
})
if err != nil {
return nil, errors.Wrapf(err, "Failed to acquire MSI token")
return nil, fmt.Errorf("Failed to acquire MSI token: %w", err)
}
u, err = url.Parse(fmt.Sprintf("https://%s.%s", opt.Account, opt.Endpoint))
if err != nil {
return nil, errors.Wrap(err, "failed to make azure storage url from account and endpoint")
return nil, fmt.Errorf("failed to make azure storage url from account and endpoint: %w", err)
}
credential := azblob.NewTokenCredential(token.AccessToken, func(credential azblob.TokenCredential) time.Duration {
fs.Debugf(f, "Token refresher called.")
@@ -658,19 +683,19 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
case opt.Account != "" && opt.Key != "":
credential, err := azblob.NewSharedKeyCredential(opt.Account, opt.Key)
if err != nil {
return nil, errors.Wrapf(err, "Failed to parse credentials")
return nil, fmt.Errorf("Failed to parse credentials: %w", err)
}
u, err = url.Parse(fmt.Sprintf("https://%s.%s", opt.Account, opt.Endpoint))
if err != nil {
return nil, errors.Wrap(err, "failed to make azure storage url from account and endpoint")
return nil, fmt.Errorf("failed to make azure storage url from account and endpoint: %w", err)
}
pipeline := f.newPipeline(credential, azblob.PipelineOptions{Retry: azblob.RetryOptions{TryTimeout: maxTryTimeout}})
serviceURL = azblob.NewServiceURL(*u, pipeline)
case opt.SASURL != "":
u, err = url.Parse(opt.SASURL)
if err != nil {
return nil, errors.Wrapf(err, "failed to parse SAS URL")
return nil, fmt.Errorf("failed to parse SAS URL: %w", err)
}
// use anonymous credentials in case of sas url
pipeline := f.newPipeline(azblob.NewAnonymousCredential(), azblob.PipelineOptions{Retry: azblob.RetryOptions{TryTimeout: maxTryTimeout}})
@@ -690,17 +715,17 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
// Create a standard URL.
u, err = url.Parse(fmt.Sprintf("https://%s.%s", opt.Account, opt.Endpoint))
if err != nil {
return nil, errors.Wrap(err, "failed to make azure storage url from account and endpoint")
return nil, fmt.Errorf("failed to make azure storage url from account and endpoint: %w", err)
}
// Try loading service principal credentials from file.
loadedCreds, err := ioutil.ReadFile(env.ShellExpand(opt.ServicePrincipalFile))
if err != nil {
return nil, errors.Wrap(err, "error opening service principal credentials file")
return nil, fmt.Errorf("error opening service principal credentials file: %w", err)
}
// Create a token refresher from service principal credentials.
tokenRefresher, err := newServicePrincipalTokenRefresher(ctx, loadedCreds)
if err != nil {
return nil, errors.Wrap(err, "failed to create a service principal token")
return nil, fmt.Errorf("failed to create a service principal token: %w", err)
}
options := azblob.PipelineOptions{Retry: azblob.RetryOptions{TryTimeout: maxTryTimeout}}
pipe := f.newPipeline(azblob.NewTokenCredential("", tokenRefresher), options)
@@ -757,7 +782,7 @@ func (f *Fs) newObjectWithInfo(remote string, info *azblob.BlobItemInternal) (fs
if err != nil {
return nil, err
}
} else {
} else if !o.fs.opt.NoHeadObject {
err := o.readMetaData() // reads info and headers, returning an error
if err != nil {
return nil, err
@@ -1316,7 +1341,7 @@ func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
}
data, err := base64.StdEncoding.DecodeString(o.md5)
if err != nil {
return "", errors.Wrapf(err, "Failed to decode Content-MD5: %q", o.md5)
return "", fmt.Errorf("Failed to decode Content-MD5: %q: %w", o.md5, err)
}
return hex.EncodeToString(data), nil
}
@@ -1367,6 +1392,39 @@ func (o *Object) decodeMetaDataFromPropertiesResponse(info *azblob.BlobGetProper
return nil
}
func (o *Object) decodeMetaDataFromDownloadResponse(info *azblob.DownloadResponse) (err error) {
metadata := info.NewMetadata()
size := info.ContentLength()
if isDirectoryMarker(size, metadata, o.remote) {
return fs.ErrorNotAFile
}
// NOTE - Client library always returns MD5 as base64 decoded string, Object needs to maintain
// this as base64 encoded string.
o.md5 = base64.StdEncoding.EncodeToString(info.ContentMD5())
o.mimeType = info.ContentType()
o.size = size
o.modTime = info.LastModified()
o.accessTier = o.AccessTier()
o.setMetadata(metadata)
// If it was a Range request, the size is wrong, so correct it
if contentRange := info.ContentRange(); contentRange != "" {
slash := strings.IndexRune(contentRange, '/')
if slash >= 0 {
i, err := strconv.ParseInt(contentRange[slash+1:], 10, 64)
if err == nil {
o.size = i
} else {
fs.Debugf(o, "Failed to find parse integer from in %q: %v", contentRange, err)
}
} else {
fs.Debugf(o, "Failed to find length in %q", contentRange)
}
}
return nil
}
func (o *Object) decodeMetaDataFromBlob(info *azblob.BlobItemInternal) (err error) {
metadata := info.Metadata
size := *info.Properties.ContentLength
@@ -1403,6 +1461,10 @@ func (o *Object) clearMetaData() {
// o.size
// o.md5
func (o *Object) readMetaData() (err error) {
container, _ := o.split()
if !o.fs.containerOK(container) {
return fs.ErrorObjectNotFound
}
if !o.modTime.IsZero() {
return nil
}
@@ -1469,7 +1531,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
var offset int64
var count int64
if o.AccessTier() == azblob.AccessTierArchive {
return nil, errors.Errorf("Blob in archive tier, you need to set tier to hot or cool first")
return nil, fmt.Errorf("Blob in archive tier, you need to set tier to hot or cool first")
}
fs.FixRangeOption(options, o.size)
for _, option := range options {
@@ -1495,7 +1557,11 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
return o.fs.shouldRetry(ctx, err)
})
if err != nil {
return nil, errors.Wrap(err, "failed to open for download")
return nil, fmt.Errorf("failed to open for download: %w", err)
}
err = o.decodeMetaDataFromDownloadResponse(downloadResponse)
if err != nil {
return nil, fmt.Errorf("failed to decode metadata for download: %w", err)
}
in = downloadResponse.Body(azblob.RetryReaderOptions{})
return in, nil
@@ -1585,13 +1651,16 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
fs.Debugf(o, "deleting archive tier blob before updating")
err = o.Remove(ctx)
if err != nil {
return errors.Wrap(err, "failed to delete archive blob before updating")
return fmt.Errorf("failed to delete archive blob before updating: %w", err)
}
} else {
return errCantUpdateArchiveTierBlobs
}
}
container, _ := o.split()
container, containerPath := o.split()
if container == "" || containerPath == "" {
return fmt.Errorf("can't upload to root - need a container")
}
err = o.fs.makeContainer(ctx, container)
if err != nil {
return err
@@ -1622,10 +1691,10 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
putBlobOptions := azblob.UploadStreamToBlockBlobOptions{
BufferSize: int(o.fs.opt.ChunkSize),
MaxBuffers: uploadConcurrency,
MaxBuffers: o.fs.opt.UploadConcurrency,
Metadata: o.meta,
BlobHTTPHeaders: httpHeaders,
TransferManager: o.fs.newPoolWrapper(uploadConcurrency),
TransferManager: o.fs.newPoolWrapper(o.fs.opt.UploadConcurrency),
}
// Don't retry, return a retry error instead
@@ -1678,7 +1747,7 @@ func (o *Object) AccessTier() azblob.AccessTierType {
// SetTier performs changing object tier
func (o *Object) SetTier(tier string) error {
if !validateAccessTier(tier) {
return errors.Errorf("Tier %s not supported by Azure Blob Storage", tier)
return fmt.Errorf("Tier %s not supported by Azure Blob Storage", tier)
}
// Check if current tier already matches with desired tier
@@ -1694,7 +1763,7 @@ func (o *Object) SetTier(tier string) error {
})
if err != nil {
return errors.Wrap(err, "Failed to set Blob Tier")
return fmt.Errorf("Failed to set Blob Tier: %w", err)
}
// Set access tier on local object also, this typically

View File

@@ -1,4 +1,5 @@
// +build !plan9,!solaris,!js,go1.14
//go:build !plan9 && !solaris && !js
// +build !plan9,!solaris,!js
package azureblob

View File

@@ -1,6 +1,7 @@
// Test AzureBlob filesystem interface
// +build !plan9,!solaris,!js,go1.14
//go:build !plan9 && !solaris && !js
// +build !plan9,!solaris,!js
package azureblob
@@ -16,12 +17,10 @@ import (
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestAzureBlob:",
NilObject: (*Object)(nil),
TiersToTest: []string{"Hot", "Cool"},
ChunkedUpload: fstests.ChunkedUploadConfig{
MaxChunkSize: maxChunkSize,
},
RemoteName: "TestAzureBlob:",
NilObject: (*Object)(nil),
TiersToTest: []string{"Hot", "Cool"},
ChunkedUpload: fstests.ChunkedUploadConfig{},
})
}

View File

@@ -1,6 +1,7 @@
// Build for azureblob for unsupported platforms to stop go complaining
// about "no buildable Go source files "
// +build plan9 solaris js !go1.14
//go:build plan9 || solaris || js
// +build plan9 solaris js
package azureblob

View File

@@ -1,4 +1,5 @@
// +build !plan9,!solaris,!js,go1.14
//go:build !plan9 && !solaris && !js
// +build !plan9,!solaris,!js
package azureblob
@@ -12,7 +13,6 @@ import (
"net/http"
"github.com/Azure/go-autorest/autorest/adal"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/fshttp"
)
@@ -94,7 +94,7 @@ func GetMSIToken(ctx context.Context, identity *userMSI) (adal.Token, error) {
httpClient := fshttp.NewClient(ctx)
resp, err := httpClient.Do(req)
if err != nil {
return result, errors.Wrap(err, "MSI is not enabled on this VM")
return result, fmt.Errorf("MSI is not enabled on this VM: %w", err)
}
defer func() { // resp and Body should not be nil
_, err = io.Copy(ioutil.Discard, resp.Body)
@@ -119,7 +119,7 @@ func GetMSIToken(ctx context.Context, identity *userMSI) (adal.Token, error) {
b, err := ioutil.ReadAll(resp.Body)
if err != nil {
return result, errors.Wrap(err, "Couldn't read IMDS response")
return result, fmt.Errorf("Couldn't read IMDS response: %w", err)
}
// Remove BOM, if any. azcopy does this so I'm following along.
b = bytes.TrimPrefix(b, []byte("\xef\xbb\xbf"))
@@ -130,7 +130,7 @@ func GetMSIToken(ctx context.Context, identity *userMSI) (adal.Token, error) {
// storage API call.
err = json.Unmarshal(b, &result)
if err != nil {
return result, errors.Wrap(err, "Couldn't unmarshal IMDS response")
return result, fmt.Errorf("Couldn't unmarshal IMDS response: %w", err)
}
return result, nil

View File

@@ -1,4 +1,5 @@
// +build !plan9,!solaris,!js,go1.14
//go:build !plan9 && !solaris && !js
// +build !plan9,!solaris,!js
package azureblob

View File

@@ -9,6 +9,7 @@ import (
"bytes"
"context"
"crypto/sha1"
"errors"
"fmt"
gohash "hash"
"io"
@@ -19,7 +20,6 @@ import (
"sync"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/backend/b2/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
@@ -54,10 +54,10 @@ const (
decayConstant = 1 // bigger for slower decay, exponential
maxParts = 10000
maxVersions = 100 // maximum number of versions we search in --b2-versions mode
minChunkSize = 5 * fs.MebiByte
defaultChunkSize = 96 * fs.MebiByte
defaultUploadCutoff = 200 * fs.MebiByte
largeFileCopyCutoff = 4 * fs.GibiByte // 5E9 is the max
minChunkSize = 5 * fs.Mebi
defaultChunkSize = 96 * fs.Mebi
defaultUploadCutoff = 200 * fs.Mebi
largeFileCopyCutoff = 4 * fs.Gibi // 5E9 is the max
memoryPoolFlushTime = fs.Duration(time.Minute) // flush the cached buffers after this long
memoryPoolUseMmap = false
)
@@ -75,15 +75,15 @@ func init() {
NewFs: NewFs,
Options: []fs.Option{{
Name: "account",
Help: "Account ID or Application Key ID",
Help: "Account ID or Application Key ID.",
Required: true,
}, {
Name: "key",
Help: "Application Key",
Help: "Application Key.",
Required: true,
}, {
Name: "endpoint",
Help: "Endpoint for the service.\nLeave blank normally.",
Help: "Endpoint for the service.\n\nLeave blank normally.",
Advanced: true,
}, {
Name: "test_mode",
@@ -103,7 +103,7 @@ in the [b2 integrations checklist](https://www.backblaze.com/b2/docs/integration
Advanced: true,
}, {
Name: "versions",
Help: "Include old versions in directory listings.\nNote that when using this no file write operations are permitted,\nso you can't upload files or delete them.",
Help: "Include old versions in directory listings.\n\nNote that when using this no file write operations are permitted,\nso you can't upload files or delete them.",
Default: false,
Advanced: true,
}, {
@@ -116,32 +116,34 @@ in the [b2 integrations checklist](https://www.backblaze.com/b2/docs/integration
Files above this size will be uploaded in chunks of "--b2-chunk-size".
This value should be set no larger than 4.657GiB (== 5GB).`,
This value should be set no larger than 4.657 GiB (== 5 GB).`,
Default: defaultUploadCutoff,
Advanced: true,
}, {
Name: "copy_cutoff",
Help: `Cutoff for switching to multipart copy
Help: `Cutoff for switching to multipart copy.
Any files larger than this that need to be server-side copied will be
copied in chunks of this size.
The minimum is 0 and the maximum is 4.6GB.`,
The minimum is 0 and the maximum is 4.6 GiB.`,
Default: largeFileCopyCutoff,
Advanced: true,
}, {
Name: "chunk_size",
Help: `Upload chunk size. Must fit in memory.
Help: `Upload chunk size.
When uploading large files, chunk the file into this size. Note that
these chunks are buffered in memory and there might a maximum of
"--transfers" chunks in progress at once. 5,000,000 Bytes is the
minimum size.`,
When uploading large files, chunk the file into this size.
Must fit in memory. These chunks are buffered in memory and there
might a maximum of "--transfers" chunks in progress at once.
5,000,000 Bytes is the minimum size.`,
Default: defaultChunkSize,
Advanced: true,
}, {
Name: "disable_checksum",
Help: `Disable checksums for large (> upload cutoff) files
Help: `Disable checksums for large (> upload cutoff) files.
Normally rclone will calculate the SHA1 checksum of the input before
uploading it so it can add it to metadata on the object. This is great
@@ -158,7 +160,15 @@ free egress for data downloaded through the Cloudflare network.
Rclone works with private buckets by sending an "Authorization" header.
If the custom endpoint rewrites the requests for authentication,
e.g., in Cloudflare Workers, this header needs to be handled properly.
Leave blank if you want to use the endpoint provided by Backblaze.`,
Leave blank if you want to use the endpoint provided by Backblaze.
The URL provided here SHOULD have the protocol and SHOULD NOT have
a trailing slash or specify the /file/bucket subpath as rclone will
request files with "{download_url}/file/{bucket_name}/{path}".
Example:
> https://mysubdomain.mydomain.tld
(No trailing "/", "file" or "bucket")`,
Advanced: true,
}, {
Name: "download_auth_duration",
@@ -364,7 +374,7 @@ func errorHandler(resp *http.Response) error {
func checkUploadChunkSize(cs fs.SizeSuffix) error {
if cs < minChunkSize {
return errors.Errorf("%s is less than %s", cs, minChunkSize)
return fmt.Errorf("%s is less than %s", cs, minChunkSize)
}
return nil
}
@@ -379,7 +389,7 @@ func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error)
func checkUploadCutoff(opt *Options, cs fs.SizeSuffix) error {
if cs < opt.ChunkSize {
return errors.Errorf("%v is less than chunk size %v", cs, opt.ChunkSize)
return fmt.Errorf("%v is less than chunk size %v", cs, opt.ChunkSize)
}
return nil
}
@@ -412,11 +422,11 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
}
err = checkUploadCutoff(opt, opt.UploadCutoff)
if err != nil {
return nil, errors.Wrap(err, "b2: upload cutoff")
return nil, fmt.Errorf("b2: upload cutoff: %w", err)
}
err = checkUploadChunkSize(opt.ChunkSize)
if err != nil {
return nil, errors.Wrap(err, "b2: chunk size")
return nil, fmt.Errorf("b2: chunk size: %w", err)
}
if opt.Account == "" {
return nil, errors.New("account not found")
@@ -461,7 +471,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
}
err = f.authorizeAccount(ctx)
if err != nil {
return nil, errors.Wrap(err, "failed to authorize account")
return nil, fmt.Errorf("failed to authorize account: %w", err)
}
// If this is a key limited to a single bucket, it must exist already
if f.rootBucket != "" && f.info.Allowed.BucketID != "" {
@@ -470,7 +480,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
return nil, errors.New("bucket that application key is restricted to no longer exists")
}
if allowedBucket != f.rootBucket {
return nil, errors.Errorf("you must use bucket %q with this application key", allowedBucket)
return nil, fmt.Errorf("you must use bucket %q with this application key", allowedBucket)
}
f.cache.MarkOK(f.rootBucket)
f.setBucketID(f.rootBucket, f.info.Allowed.BucketID)
@@ -510,7 +520,7 @@ func (f *Fs) authorizeAccount(ctx context.Context) error {
return f.shouldRetryNoReauth(ctx, resp, err)
})
if err != nil {
return errors.Wrap(err, "failed to authenticate")
return fmt.Errorf("failed to authenticate: %w", err)
}
f.srv.SetRoot(f.info.APIURL+"/b2api/v1").SetHeader("Authorization", f.info.AuthorizationToken)
return nil
@@ -556,7 +566,7 @@ func (f *Fs) getUploadURL(ctx context.Context, bucket string) (upload *api.GetUp
return f.shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "failed to get upload URL")
return nil, fmt.Errorf("failed to get upload URL: %w", err)
}
return upload, nil
}
@@ -1046,7 +1056,7 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) error {
}
}
}
return errors.Wrap(err, "failed to create bucket")
return fmt.Errorf("failed to create bucket: %w", err)
}
f.setBucketID(bucket, response.ID)
f.setBucketType(bucket, response.Type)
@@ -1081,7 +1091,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
return f.shouldRetry(ctx, resp, err)
})
if err != nil {
return errors.Wrap(err, "failed to delete bucket")
return fmt.Errorf("failed to delete bucket: %w", err)
}
f.clearBucketID(bucket)
f.clearBucketType(bucket)
@@ -1122,7 +1132,7 @@ func (f *Fs) hide(ctx context.Context, bucket, bucketPath string) error {
return nil
}
}
return errors.Wrapf(err, "failed to hide %q", bucketPath)
return fmt.Errorf("failed to hide %q: %w", bucketPath, err)
}
return nil
}
@@ -1143,7 +1153,7 @@ func (f *Fs) deleteByID(ctx context.Context, ID, Name string) error {
return f.shouldRetry(ctx, resp, err)
})
if err != nil {
return errors.Wrapf(err, "failed to delete %q", Name)
return fmt.Errorf("failed to delete %q: %w", Name, err)
}
return nil
}
@@ -1362,7 +1372,7 @@ func (f *Fs) getDownloadAuthorization(ctx context.Context, bucket, remote string
return f.shouldRetry(ctx, resp, err)
})
if err != nil {
return "", errors.Wrap(err, "failed to get download authorization")
return "", fmt.Errorf("failed to get download authorization: %w", err)
}
return response.AuthorizationToken, nil
}
@@ -1667,14 +1677,14 @@ func (file *openFile) Close() (err error) {
// Check to see we read the correct number of bytes
if file.o.Size() != file.bytes {
return errors.Errorf("object corrupted on transfer - length mismatch (want %d got %d)", file.o.Size(), file.bytes)
return fmt.Errorf("object corrupted on transfer - length mismatch (want %d got %d)", file.o.Size(), file.bytes)
}
// Check the SHA1
receivedSHA1 := file.o.sha1
calculatedSHA1 := fmt.Sprintf("%x", file.hash.Sum(nil))
if receivedSHA1 != "" && receivedSHA1 != calculatedSHA1 {
return errors.Errorf("object corrupted on transfer - SHA1 mismatch (want %q got %q)", receivedSHA1, calculatedSHA1)
return fmt.Errorf("object corrupted on transfer - SHA1 mismatch (want %q got %q)", receivedSHA1, calculatedSHA1)
}
return nil
@@ -1714,7 +1724,7 @@ func (o *Object) getOrHead(ctx context.Context, method string, options []fs.Open
if resp != nil && (resp.StatusCode == http.StatusNotFound || resp.StatusCode == http.StatusBadRequest) {
return nil, nil, fs.ErrorObjectNotFound
}
return nil, nil, errors.Wrapf(err, "failed to %s for download", method)
return nil, nil, fmt.Errorf("failed to %s for download: %w", method, err)
}
// NB resp may be Open here - don't return err != nil without closing

View File

@@ -15,7 +15,6 @@ import (
"strings"
"sync"
"github.com/pkg/errors"
"github.com/rclone/rclone/backend/b2/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
@@ -102,7 +101,7 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
parts++
}
if parts > maxParts {
return nil, errors.Errorf("%q too big (%d bytes) makes too many parts %d > %d - increase --b2-chunk-size", remote, size, parts, maxParts)
return nil, fmt.Errorf("%q too big (%d bytes) makes too many parts %d > %d - increase --b2-chunk-size", remote, size, parts, maxParts)
}
sha1SliceSize = parts
}
@@ -185,7 +184,7 @@ func (up *largeUpload) getUploadURL(ctx context.Context) (upload *api.GetUploadP
return up.f.shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "failed to get upload URL")
return nil, fmt.Errorf("failed to get upload URL: %w", err)
}
} else {
upload, up.uploads = up.uploads[0], up.uploads[1:]
@@ -230,14 +229,14 @@ func (up *largeUpload) transferChunk(ctx context.Context, part int64, body []byt
//
// The number of bytes in the file being uploaded. Note that
// this header is required; you cannot leave it out and just
// use chunked encoding. The minimum size of every part but
// the last one is 100MB.
// use chunked encoding. The minimum size of every part but
// the last one is 100 MB (100,000,000 bytes)
//
// X-Bz-Content-Sha1
//
// The SHA1 checksum of the this part of the file. B2 will
// check this when the part is uploaded, to make sure that the
// data arrived correctly. The same SHA1 checksum must be
// data arrived correctly. The same SHA1 checksum must be
// passed to b2_finish_large_file.
opts := rest.Opts{
Method: "POST",
@@ -406,7 +405,7 @@ func (up *largeUpload) Stream(ctx context.Context, initialUploadBlock []byte) (e
up.size += int64(n)
if part > maxParts {
up.f.putBuf(buf, false)
return errors.Errorf("%q too big (%d bytes so far) makes too many parts %d > %d - increase --b2-chunk-size", up.o, up.size, up.parts, maxParts)
return fmt.Errorf("%q too big (%d bytes so far) makes too many parts %d > %d - increase --b2-chunk-size", up.o, up.size, up.parts, maxParts)
}
part := part // for the closure

View File

@@ -61,7 +61,7 @@ func (e *Error) Error() string {
var _ error = (*Error)(nil)
// ItemFields are the fields needed for FileInfo
var ItemFields = "type,id,sequence_id,etag,sha1,name,size,created_at,modified_at,content_created_at,content_modified_at,item_status,shared_link"
var ItemFields = "type,id,sequence_id,etag,sha1,name,size,created_at,modified_at,content_created_at,content_modified_at,item_status,shared_link,owned_by"
// Types of things in Item
const (
@@ -90,6 +90,12 @@ type Item struct {
URL string `json:"url,omitempty"`
Access string `json:"access,omitempty"`
} `json:"shared_link"`
OwnedBy struct {
Type string `json:"type"`
ID string `json:"id"`
Name string `json:"name"`
Login string `json:"login"`
} `json:"owned_by"`
}
// ModTime returns the modification time of the item
@@ -103,10 +109,11 @@ func (i *Item) ModTime() (t time.Time) {
// FolderItems is returned from the GetFolderItems call
type FolderItems struct {
TotalCount int `json:"total_count"`
Entries []Item `json:"entries"`
Offset int `json:"offset"`
Limit int `json:"limit"`
TotalCount int `json:"total_count"`
Entries []Item `json:"entries"`
Offset int `json:"offset"`
Limit int `json:"limit"`
NextMarker *string `json:"next_marker,omitempty"`
Order []struct {
By string `json:"by"`
Direction string `json:"direction"`

View File

@@ -14,24 +14,19 @@ import (
"crypto/rsa"
"encoding/json"
"encoding/pem"
"errors"
"fmt"
"io"
"io/ioutil"
"log"
"net/http"
"net/url"
"path"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/env"
"github.com/rclone/rclone/lib/jwtutil"
"github.com/youmark/pkcs8"
"github.com/pkg/errors"
"github.com/rclone/rclone/backend/box/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
@@ -42,9 +37,13 @@ import (
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/dircache"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/env"
"github.com/rclone/rclone/lib/jwtutil"
"github.com/rclone/rclone/lib/oauthutil"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/rest"
"github.com/youmark/pkcs8"
"golang.org/x/oauth2"
"golang.org/x/oauth2/jws"
)
@@ -57,7 +56,6 @@ const (
decayConstant = 2 // bigger for slower decay, exponential
rootURL = "https://api.box.com/2.0"
uploadURL = "https://upload.box.com/api/2.0"
listChunks = 1000 // chunk size to read directory listings
minUploadCutoff = 50000000 // upload cutoff can be no lower than this
defaultUploadCutoff = 50 * 1024 * 1024
tokenURL = "https://api.box.com/oauth2/token"
@@ -84,7 +82,7 @@ func init() {
Name: "box",
Description: "Box",
NewFs: NewFs,
Config: func(ctx context.Context, name string, m configmap.Mapper) {
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
jsonFile, ok := m.Get("box_config_file")
boxSubType, boxSubTypeOk := m.Get("box_sub_type")
boxAccessToken, boxAccessTokenOk := m.Get("access_token")
@@ -93,15 +91,15 @@ func init() {
if ok && boxSubTypeOk && jsonFile != "" && boxSubType != "" {
err = refreshJWTToken(ctx, jsonFile, boxSubType, name, m)
if err != nil {
log.Fatalf("Failed to configure token with jwt authentication: %v", err)
return nil, fmt.Errorf("failed to configure token with jwt authentication: %w", err)
}
// Else, if not using an access token, use oauth2
} else if boxAccessToken == "" || !boxAccessTokenOk {
err = oauthutil.Config(ctx, "box", name, m, oauthConfig, nil)
if err != nil {
log.Fatalf("Failed to configure token with oauth authentication: %v", err)
}
return oauthutil.ConfigOut("", &oauthutil.Options{
OAuth2Config: oauthConfig,
})
}
return nil, nil
},
Options: append(oauthutil.SharedOptions, []fs.Option{{
Name: "root_folder_id",
@@ -110,23 +108,23 @@ func init() {
Advanced: true,
}, {
Name: "box_config_file",
Help: "Box App config.json location\nLeave blank normally." + env.ShellExpandHelp,
Help: "Box App config.json location\n\nLeave blank normally." + env.ShellExpandHelp,
}, {
Name: "access_token",
Help: "Box App Primary Access Token\nLeave blank normally.",
Help: "Box App Primary Access Token\n\nLeave blank normally.",
}, {
Name: "box_sub_type",
Default: "user",
Examples: []fs.OptionExample{{
Value: "user",
Help: "Rclone should act on behalf of a user",
Help: "Rclone should act on behalf of a user.",
}, {
Value: "enterprise",
Help: "Rclone should act on behalf of a service account",
Help: "Rclone should act on behalf of a service account.",
}},
}, {
Name: "upload_cutoff",
Help: "Cutoff for switching to multipart upload (>= 50MB).",
Help: "Cutoff for switching to multipart upload (>= 50 MiB).",
Default: fs.SizeSuffix(defaultUploadCutoff),
Advanced: true,
}, {
@@ -134,6 +132,16 @@ func init() {
Help: "Max number of times to try committing a multipart file.",
Default: 100,
Advanced: true,
}, {
Name: "list_chunk",
Default: 1000,
Help: "Size of listing chunk 1-1000.",
Advanced: true,
}, {
Name: "owned_by",
Default: "",
Help: "Only show items owned by the login (email address) passed in.",
Advanced: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
@@ -157,15 +165,15 @@ func refreshJWTToken(ctx context.Context, jsonFile string, boxSubType string, na
jsonFile = env.ShellExpand(jsonFile)
boxConfig, err := getBoxConfig(jsonFile)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
return fmt.Errorf("get box config: %w", err)
}
privateKey, err := getDecryptedPrivateKey(boxConfig)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
return fmt.Errorf("get decrypted private key: %w", err)
}
claims, err := getClaims(boxConfig, boxSubType)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
return fmt.Errorf("get claims: %w", err)
}
signingHeaders := getSigningHeaders(boxConfig)
queryParams := getQueryParams(boxConfig)
@@ -177,11 +185,11 @@ func refreshJWTToken(ctx context.Context, jsonFile string, boxSubType string, na
func getBoxConfig(configFile string) (boxConfig *api.ConfigJSON, err error) {
file, err := ioutil.ReadFile(configFile)
if err != nil {
return nil, errors.Wrap(err, "box: failed to read Box config")
return nil, fmt.Errorf("box: failed to read Box config: %w", err)
}
err = json.Unmarshal(file, &boxConfig)
if err != nil {
return nil, errors.Wrap(err, "box: failed to parse Box config")
return nil, fmt.Errorf("box: failed to parse Box config: %w", err)
}
return boxConfig, nil
}
@@ -189,7 +197,7 @@ func getBoxConfig(configFile string) (boxConfig *api.ConfigJSON, err error) {
func getClaims(boxConfig *api.ConfigJSON, boxSubType string) (claims *jws.ClaimSet, err error) {
val, err := jwtutil.RandomHex(20)
if err != nil {
return nil, errors.Wrap(err, "box: failed to generate random string for jti")
return nil, fmt.Errorf("box: failed to generate random string for jti: %w", err)
}
claims = &jws.ClaimSet{
@@ -230,12 +238,12 @@ func getDecryptedPrivateKey(boxConfig *api.ConfigJSON) (key *rsa.PrivateKey, err
block, rest := pem.Decode([]byte(boxConfig.BoxAppSettings.AppAuth.PrivateKey))
if len(rest) > 0 {
return nil, errors.Wrap(err, "box: extra data included in private key")
return nil, fmt.Errorf("box: extra data included in private key: %w", err)
}
rsaKey, err := pkcs8.ParsePKCS8PrivateKey(block.Bytes, []byte(boxConfig.BoxAppSettings.AppAuth.Passphrase))
if err != nil {
return nil, errors.Wrap(err, "box: failed to decrypt private key")
return nil, fmt.Errorf("box: failed to decrypt private key: %w", err)
}
return rsaKey.(*rsa.PrivateKey), nil
@@ -248,6 +256,8 @@ type Options struct {
Enc encoder.MultiEncoder `config:"encoding"`
RootFolderID string `config:"root_folder_id"`
AccessToken string `config:"access_token"`
ListChunk int `config:"list_chunk"`
OwnedBy string `config:"owned_by"`
}
// Fs represents a remote box
@@ -327,6 +337,13 @@ func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, err
authRetry = true
fs.Debugf(nil, "Should retry: %v", err)
}
// Box API errors which should be retries
if apiErr, ok := err.(*api.Error); ok && apiErr.Code == "operation_blocked_temporary" {
fs.Debugf(nil, "Retrying API error %v", err)
return true, err
}
return authRetry || fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
}
@@ -341,7 +358,7 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.It
return nil, err
}
found, err := f.listAll(ctx, directoryID, false, true, func(item *api.Item) bool {
found, err := f.listAll(ctx, directoryID, false, true, true, func(item *api.Item) bool {
if strings.EqualFold(item.Name, leaf) {
info = item
return true
@@ -384,7 +401,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
}
if opt.UploadCutoff < minUploadCutoff {
return nil, errors.Errorf("box: upload cutoff (%v) must be greater than equal to %v", opt.UploadCutoff, fs.SizeSuffix(minUploadCutoff))
return nil, fmt.Errorf("box: upload cutoff (%v) must be greater than equal to %v", opt.UploadCutoff, fs.SizeSuffix(minUploadCutoff))
}
root = parsePath(root)
@@ -395,7 +412,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
if opt.AccessToken == "" {
client, ts, err = oauthutil.NewClient(ctx, name, m, oauthConfig)
if err != nil {
return nil, errors.Wrap(err, "failed to configure Box")
return nil, fmt.Errorf("failed to configure Box: %w", err)
}
}
@@ -516,7 +533,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
// FindLeaf finds a directory of name leaf in the folder with ID pathID
func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) {
// Find the leaf in pathID
found, err = f.listAll(ctx, pathID, true, false, func(item *api.Item) bool {
found, err = f.listAll(ctx, pathID, true, false, true, func(item *api.Item) bool {
if strings.EqualFold(item.Name, leaf) {
pathIDOut = item.ID
return true
@@ -572,17 +589,20 @@ type listAllFn func(*api.Item) bool
// Lists the directory required calling the user function on each item found
//
// If the user fn ever returns true then it early exits with found = true
func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, filesOnly bool, activeOnly bool, fn listAllFn) (found bool, err error) {
opts := rest.Opts{
Method: "GET",
Path: "/folders/" + dirID + "/items",
Parameters: fieldsValue(),
}
opts.Parameters.Set("limit", strconv.Itoa(listChunks))
offset := 0
opts.Parameters.Set("limit", strconv.Itoa(f.opt.ListChunk))
opts.Parameters.Set("usemarker", "true")
var marker *string
OUTER:
for {
opts.Parameters.Set("offset", strconv.Itoa(offset))
if marker != nil {
opts.Parameters.Set("marker", *marker)
}
var result api.FolderItems
var resp *http.Response
@@ -591,7 +611,7 @@ OUTER:
return shouldRetry(ctx, resp, err)
})
if err != nil {
return found, errors.Wrap(err, "couldn't list files")
return found, fmt.Errorf("couldn't list files: %w", err)
}
for i := range result.Entries {
item := &result.Entries[i]
@@ -607,7 +627,10 @@ OUTER:
fs.Debugf(f, "Ignoring %q - unknown type %q", item.Name, item.Type)
continue
}
if item.ItemStatus != api.ItemStatusActive {
if activeOnly && item.ItemStatus != api.ItemStatusActive {
continue
}
if f.opt.OwnedBy != "" && f.opt.OwnedBy != item.OwnedBy.Login {
continue
}
item.Name = f.opt.Enc.ToStandardName(item.Name)
@@ -616,8 +639,8 @@ OUTER:
break OUTER
}
}
offset += result.Limit
if offset >= result.TotalCount {
marker = result.NextMarker
if marker == nil {
break
}
}
@@ -639,7 +662,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
return nil, err
}
var iErr error
_, err = f.listAll(ctx, directoryID, false, false, func(info *api.Item) bool {
_, err = f.listAll(ctx, directoryID, false, false, true, func(info *api.Item) bool {
remote := path.Join(dir, info.Name)
if info.Type == api.ItemTypeFolder {
// cache the directory ID for later lookups
@@ -715,14 +738,14 @@ func (f *Fs) preUploadCheck(ctx context.Context, leaf, directoryID string, size
var conflict api.PreUploadCheckConflict
err = json.Unmarshal(apiErr.ContextInfo, &conflict)
if err != nil {
return "", errors.Wrap(err, "pre-upload check: JSON decode failed")
return "", fmt.Errorf("pre-upload check: JSON decode failed: %w", err)
}
if conflict.Conflicts.Type != api.ItemTypeFile {
return "", errors.Wrap(err, "pre-upload check: can't overwrite non file with file")
return "", fmt.Errorf("pre-upload check: can't overwrite non file with file: %w", err)
}
return conflict.Conflicts.ID, nil
}
return "", errors.Wrap(err, "pre-upload check")
return "", fmt.Errorf("pre-upload check: %w", err)
}
return "", nil
}
@@ -831,7 +854,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
return shouldRetry(ctx, resp, err)
})
if err != nil {
return errors.Wrap(err, "rmdir failed")
return fmt.Errorf("rmdir failed: %w", err)
}
f.dirCache.FlushDir(dir)
if err != nil {
@@ -875,7 +898,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
srcPath := srcObj.fs.rootSlash() + srcObj.remote
dstPath := f.rootSlash() + remote
if strings.ToLower(srcPath) == strings.ToLower(dstPath) {
return nil, errors.Errorf("can't copy %q -> %q as are same name when lowercase", srcPath, dstPath)
return nil, fmt.Errorf("can't copy %q -> %q as are same name when lowercase", srcPath, dstPath)
}
// Create temporary object
@@ -959,7 +982,7 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "failed to read user info")
return nil, fmt.Errorf("failed to read user info: %w", err)
}
// FIXME max upload size would be useful to use in Update
usage = &fs.Usage{
@@ -1093,45 +1116,36 @@ func (f *Fs) deletePermanently(ctx context.Context, itemType, id string) error {
// CleanUp empties the trash
func (f *Fs) CleanUp(ctx context.Context) (err error) {
opts := rest.Opts{
Method: "GET",
Path: "/folders/trash/items",
Parameters: url.Values{
"fields": []string{"type", "id"},
},
}
opts.Parameters.Set("limit", strconv.Itoa(listChunks))
offset := 0
for {
opts.Parameters.Set("offset", strconv.Itoa(offset))
var result api.FolderItems
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
return shouldRetry(ctx, resp, err)
})
if err != nil {
return errors.Wrap(err, "couldn't list trash")
}
for i := range result.Entries {
item := &result.Entries[i]
if item.Type == api.ItemTypeFolder || item.Type == api.ItemTypeFile {
var (
deleteErrors = int64(0)
concurrencyControl = make(chan struct{}, fs.GetConfig(ctx).Checkers)
wg sync.WaitGroup
)
_, err = f.listAll(ctx, "trash", false, false, false, func(item *api.Item) bool {
if item.Type == api.ItemTypeFolder || item.Type == api.ItemTypeFile {
wg.Add(1)
concurrencyControl <- struct{}{}
go func() {
defer func() {
<-concurrencyControl
wg.Done()
}()
err := f.deletePermanently(ctx, item.Type, item.ID)
if err != nil {
return errors.Wrap(err, "failed to delete file")
fs.Errorf(f, "failed to delete trash item %q (%q): %v", item.Name, item.ID, err)
atomic.AddInt64(&deleteErrors, 1)
}
} else {
fs.Debugf(f, "Ignoring %q - unknown type %q", item.Name, item.Type)
continue
}
}
offset += result.Limit
if offset >= result.TotalCount {
break
}()
} else {
fs.Debugf(f, "Ignoring %q - unknown type %q", item.Name, item.Type)
}
return false
})
wg.Wait()
if deleteErrors != 0 {
return fmt.Errorf("failed to delete %d trash items", deleteErrors)
}
return
return err
}
// DirCacheFlush resets the directory cache - used in testing as an
@@ -1185,8 +1199,11 @@ func (o *Object) Size() int64 {
// setMetaData sets the metadata from info
func (o *Object) setMetaData(info *api.Item) (err error) {
if info.Type == api.ItemTypeFolder {
return fs.ErrorIsDir
}
if info.Type != api.ItemTypeFile {
return errors.Wrapf(fs.ErrorNotAFile, "%q is %q", o.remote, info.Type)
return fmt.Errorf("%q is %q: %w", o.remote, info.Type, fs.ErrorNotAFile)
}
o.hasMetaData = true
o.size = int64(info.Size)
@@ -1286,7 +1303,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
// upload does a single non-multipart upload
//
// This is recommended for less than 50 MB of content
// This is recommended for less than 50 MiB of content
func (o *Object) upload(ctx context.Context, in io.Reader, leaf, directoryID string, modTime time.Time, options ...fs.OpenOption) (err error) {
upload := api.UploadFile{
Name: o.fs.opt.Enc.FromStandardName(leaf),
@@ -1322,7 +1339,7 @@ func (o *Object) upload(ctx context.Context, in io.Reader, leaf, directoryID str
return err
}
if result.TotalCount != 1 || len(result.Entries) != 1 {
return errors.Errorf("failed to upload %v - not sure why", o)
return fmt.Errorf("failed to upload %v - not sure why", o)
}
return o.setMetaData(&result.Entries[0])
}

View File

@@ -8,6 +8,7 @@ import (
"crypto/sha1"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
@@ -15,7 +16,6 @@ import (
"sync"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/backend/box/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
@@ -140,7 +140,7 @@ outer:
}
}
default:
return nil, errors.Errorf("unknown HTTP status return %q (%d)", resp.Status, resp.StatusCode)
return nil, fmt.Errorf("unknown HTTP status return %q (%d)", resp.Status, resp.StatusCode)
}
}
fs.Debugf(o, "commit multipart upload failed %d/%d - trying again in %d seconds (%s)", tries+1, maxTries, delay, why)
@@ -151,7 +151,7 @@ outer:
}
err = json.Unmarshal(body, &result)
if err != nil {
return nil, errors.Wrapf(err, "couldn't decode commit response: %q", body)
return nil, fmt.Errorf("couldn't decode commit response: %q: %w", body, err)
}
return result, nil
}
@@ -177,7 +177,7 @@ func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, leaf, direct
// Create upload session
session, err := o.createUploadSession(ctx, leaf, directoryID, size)
if err != nil {
return errors.Wrap(err, "multipart upload create session failed")
return fmt.Errorf("multipart upload create session failed: %w", err)
}
chunkSize := session.PartSize
fs.Debugf(o, "Multipart upload session started for %d parts of size %v", session.TotalParts, fs.SizeSuffix(chunkSize))
@@ -222,7 +222,7 @@ outer:
// Read the chunk
_, err = io.ReadFull(in, buf)
if err != nil {
err = errors.Wrap(err, "multipart upload failed to read source")
err = fmt.Errorf("multipart upload failed to read source: %w", err)
break outer
}
@@ -238,7 +238,7 @@ outer:
fs.Debugf(o, "Uploading part %d/%d offset %v/%v part size %v", part+1, session.TotalParts, fs.SizeSuffix(position), fs.SizeSuffix(size), fs.SizeSuffix(chunkSize))
partResponse, err := o.uploadPart(ctx, session.ID, position, size, buf, wrap, options...)
if err != nil {
err = errors.Wrap(err, "multipart upload failed to upload part")
err = fmt.Errorf("multipart upload failed to upload part: %w", err)
select {
case errs <- err:
default:
@@ -266,11 +266,11 @@ outer:
// Finalise the upload session
result, err := o.commitUpload(ctx, session.ID, parts, modTime, hash.Sum(nil))
if err != nil {
return errors.Wrap(err, "multipart upload failed to finalize")
return fmt.Errorf("multipart upload failed to finalize: %w", err)
}
if result.TotalCount != 1 || len(result.Entries) != 1 {
return errors.Errorf("multipart upload failed %v - not sure why", o)
return fmt.Errorf("multipart upload failed %v - not sure why", o)
}
return o.setMetaData(&result.Entries[0])
}

100
backend/cache/cache.go vendored
View File

@@ -1,9 +1,11 @@
//go:build !plan9 && !js
// +build !plan9,!js
package cache
import (
"context"
"errors"
"fmt"
"io"
"math"
@@ -18,7 +20,6 @@ import (
"syscall"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/backend/crypt"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/cache"
@@ -68,26 +69,26 @@ func init() {
CommandHelp: commandHelp,
Options: []fs.Option{{
Name: "remote",
Help: "Remote to cache.\nNormally should contain a ':' and a path, e.g. \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
Help: "Remote to cache.\n\nNormally should contain a ':' and a path, e.g. \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
Required: true,
}, {
Name: "plex_url",
Help: "The URL of the Plex server",
Help: "The URL of the Plex server.",
}, {
Name: "plex_username",
Help: "The username of the Plex user",
Help: "The username of the Plex user.",
}, {
Name: "plex_password",
Help: "The password of the Plex user",
Help: "The password of the Plex user.",
IsPassword: true,
}, {
Name: "plex_token",
Help: "The plex token for authentication - auto set normally",
Help: "The plex token for authentication - auto set normally.",
Hide: fs.OptionHideBoth,
Advanced: true,
}, {
Name: "plex_insecure",
Help: "Skip all certificate verification when connecting to the Plex server",
Help: "Skip all certificate verification when connecting to the Plex server.",
Advanced: true,
}, {
Name: "chunk_size",
@@ -98,14 +99,14 @@ changed, any downloaded chunks will be invalid and cache-chunk-path
will need to be cleared or unexpected EOF errors will occur.`,
Default: DefCacheChunkSize,
Examples: []fs.OptionExample{{
Value: "1m",
Help: "1MB",
Value: "1M",
Help: "1 MiB",
}, {
Value: "5M",
Help: "5 MB",
Help: "5 MiB",
}, {
Value: "10M",
Help: "10 MB",
Help: "10 MiB",
}},
}, {
Name: "info_age",
@@ -132,22 +133,22 @@ oldest chunks until it goes under this value.`,
Default: DefCacheTotalChunkSize,
Examples: []fs.OptionExample{{
Value: "500M",
Help: "500 MB",
Help: "500 MiB",
}, {
Value: "1G",
Help: "1 GB",
Help: "1 GiB",
}, {
Value: "10G",
Help: "10 GB",
Help: "10 GiB",
}},
}, {
Name: "db_path",
Default: filepath.Join(config.CacheDir, "cache-backend"),
Help: "Directory to store file structure metadata DB.\nThe remote name is used as the DB file name.",
Default: filepath.Join(config.GetCacheDir(), "cache-backend"),
Help: "Directory to store file structure metadata DB.\n\nThe remote name is used as the DB file name.",
Advanced: true,
}, {
Name: "chunk_path",
Default: filepath.Join(config.CacheDir, "cache-backend"),
Default: filepath.Join(config.GetCacheDir(), "cache-backend"),
Help: `Directory to cache chunk files.
Path to where partial file data (chunks) are stored locally. The remote
@@ -167,6 +168,7 @@ then "--cache-chunk-path" will use the same path as "--cache-db-path".`,
Name: "chunk_clean_interval",
Default: DefCacheChunkCleanInterval,
Help: `How often should the cache perform cleanups of the chunk storage.
The default value should be ok for most people. If you find that the
cache goes over "cache-chunk-total-size" too often then try to lower
this value to force it to perform cleanups more often.`,
@@ -220,7 +222,7 @@ available on the local machine.`,
}, {
Name: "rps",
Default: int(DefCacheRps),
Help: `Limits the number of requests per second to the source FS (-1 to disable)
Help: `Limits the number of requests per second to the source FS (-1 to disable).
This setting places a hard limit on the number of requests per second
that cache will be doing to the cloud provider remote and try to
@@ -241,7 +243,7 @@ still pass.`,
}, {
Name: "writes",
Default: DefCacheWrites,
Help: `Cache file data on writes through the FS
Help: `Cache file data on writes through the FS.
If you need to read files immediately after you upload them through
cache you can enable this flag to have their data stored in the
@@ -262,7 +264,7 @@ provider`,
}, {
Name: "tmp_wait_time",
Default: DefCacheTmpWaitTime,
Help: `How long should files be stored in local cache before being uploaded
Help: `How long should files be stored in local cache before being uploaded.
This is the duration that a file must wait in the temporary location
_cache-tmp-upload-path_ before it is selected for upload.
@@ -273,7 +275,7 @@ to start the upload if a queue formed for this purpose.`,
}, {
Name: "db_wait_time",
Default: DefCacheDbWaitTime,
Help: `How long to wait for the DB to be available - 0 is unlimited
Help: `How long to wait for the DB to be available - 0 is unlimited.
Only one process can have the DB open at any one time, so rclone waits
for this duration for the DB to become available before it gives an
@@ -339,8 +341,14 @@ func parseRootPath(path string) (string, error) {
return strings.Trim(path, "/"), nil
}
var warnDeprecated sync.Once
// NewFs constructs an Fs from the path, container:path
func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.Fs, error) {
warnDeprecated.Do(func() {
fs.Logf(nil, "WARNING: Cache backend is deprecated and may be removed in future. Please use VFS instead.")
})
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
@@ -348,7 +356,7 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
return nil, err
}
if opt.ChunkTotalSize < opt.ChunkSize*fs.SizeSuffix(opt.TotalWorkers) {
return nil, errors.Errorf("don't set cache-chunk-total-size(%v) less than cache-chunk-size(%v) * cache-workers(%v)",
return nil, fmt.Errorf("don't set cache-chunk-total-size(%v) less than cache-chunk-size(%v) * cache-workers(%v)",
opt.ChunkTotalSize, opt.ChunkSize, opt.TotalWorkers)
}
@@ -358,13 +366,13 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
rpath, err := parseRootPath(rootPath)
if err != nil {
return nil, errors.Wrapf(err, "failed to clean root path %q", rootPath)
return nil, fmt.Errorf("failed to clean root path %q: %w", rootPath, err)
}
remotePath := fspath.JoinRootPath(opt.Remote, rootPath)
wrappedFs, wrapErr := cache.Get(ctx, remotePath)
if wrapErr != nil && wrapErr != fs.ErrorIsFile {
return nil, errors.Wrapf(wrapErr, "failed to make remote %q to wrap", remotePath)
return nil, fmt.Errorf("failed to make remote %q to wrap: %w", remotePath, wrapErr)
}
var fsErr error
fs.Debugf(name, "wrapped %v:%v at root %v", wrappedFs.Name(), wrappedFs.Root(), rpath)
@@ -393,7 +401,7 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
if opt.PlexToken != "" {
f.plexConnector, err = newPlexConnectorWithToken(f, opt.PlexURL, opt.PlexToken, opt.PlexInsecure)
if err != nil {
return nil, errors.Wrapf(err, "failed to connect to the Plex API %v", opt.PlexURL)
return nil, fmt.Errorf("failed to connect to the Plex API %v: %w", opt.PlexURL, err)
}
} else {
if opt.PlexPassword != "" && opt.PlexUsername != "" {
@@ -405,7 +413,7 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
m.Set("plex_token", token)
})
if err != nil {
return nil, errors.Wrapf(err, "failed to connect to the Plex API %v", opt.PlexURL)
return nil, fmt.Errorf("failed to connect to the Plex API %v: %w", opt.PlexURL, err)
}
}
}
@@ -414,8 +422,8 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
dbPath := f.opt.DbPath
chunkPath := f.opt.ChunkPath
// if the dbPath is non default but the chunk path is default, we overwrite the last to follow the same one as dbPath
if dbPath != filepath.Join(config.CacheDir, "cache-backend") &&
chunkPath == filepath.Join(config.CacheDir, "cache-backend") {
if dbPath != filepath.Join(config.GetCacheDir(), "cache-backend") &&
chunkPath == filepath.Join(config.GetCacheDir(), "cache-backend") {
chunkPath = dbPath
}
if filepath.Ext(dbPath) != "" {
@@ -426,11 +434,11 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
}
err = os.MkdirAll(dbPath, os.ModePerm)
if err != nil {
return nil, errors.Wrapf(err, "failed to create cache directory %v", dbPath)
return nil, fmt.Errorf("failed to create cache directory %v: %w", dbPath, err)
}
err = os.MkdirAll(chunkPath, os.ModePerm)
if err != nil {
return nil, errors.Wrapf(err, "failed to create cache directory %v", chunkPath)
return nil, fmt.Errorf("failed to create cache directory %v: %w", chunkPath, err)
}
dbPath = filepath.Join(dbPath, name+".db")
@@ -442,7 +450,7 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
DbWaitTime: time.Duration(opt.DbWaitTime),
})
if err != nil {
return nil, errors.Wrapf(err, "failed to start cache db")
return nil, fmt.Errorf("failed to start cache db: %w", err)
}
// Trap SIGINT and SIGTERM to close the DB handle gracefully
c := make(chan os.Signal, 1)
@@ -476,12 +484,12 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
if f.opt.TempWritePath != "" {
err = os.MkdirAll(f.opt.TempWritePath, os.ModePerm)
if err != nil {
return nil, errors.Wrapf(err, "failed to create cache directory %v", f.opt.TempWritePath)
return nil, fmt.Errorf("failed to create cache directory %v: %w", f.opt.TempWritePath, err)
}
f.opt.TempWritePath = filepath.ToSlash(f.opt.TempWritePath)
f.tempFs, err = cache.Get(ctx, f.opt.TempWritePath)
if err != nil {
return nil, errors.Wrapf(err, "failed to create temp fs: %v", err)
return nil, fmt.Errorf("failed to create temp fs: %w", err)
}
fs.Infof(name, "Upload Temp Rest Time: %v", f.opt.TempWaitTime)
fs.Infof(name, "Upload Temp FS: %v", f.opt.TempWritePath)
@@ -598,7 +606,7 @@ func (f *Fs) httpStats(ctx context.Context, in rc.Params) (out rc.Params, err er
out = make(rc.Params)
m, err := f.Stats()
if err != nil {
return out, errors.Errorf("error while getting cache stats")
return out, fmt.Errorf("error while getting cache stats")
}
out["status"] = "ok"
out["stats"] = m
@@ -625,7 +633,7 @@ func (f *Fs) httpExpireRemote(ctx context.Context, in rc.Params) (out rc.Params,
out = make(rc.Params)
remoteInt, ok := in["remote"]
if !ok {
return out, errors.Errorf("remote is needed")
return out, fmt.Errorf("remote is needed")
}
remote := remoteInt.(string)
withData := false
@@ -636,7 +644,7 @@ func (f *Fs) httpExpireRemote(ctx context.Context, in rc.Params) (out rc.Params,
remote = f.unwrapRemote(remote)
if !f.cache.HasEntry(path.Join(f.Root(), remote)) {
return out, errors.Errorf("%s doesn't exist in cache", remote)
return out, fmt.Errorf("%s doesn't exist in cache", remote)
}
co := NewObject(f, remote)
@@ -645,7 +653,7 @@ func (f *Fs) httpExpireRemote(ctx context.Context, in rc.Params) (out rc.Params,
cd := NewDirectory(f, remote)
err := f.cache.ExpireDir(cd)
if err != nil {
return out, errors.WithMessage(err, "error expiring directory")
return out, fmt.Errorf("error expiring directory: %w", err)
}
// notify vfs too
f.notifyChangeUpstream(cd.Remote(), fs.EntryDirectory)
@@ -656,7 +664,7 @@ func (f *Fs) httpExpireRemote(ctx context.Context, in rc.Params) (out rc.Params,
// expire the entry
err = f.cache.ExpireObject(co, withData)
if err != nil {
return out, errors.WithMessage(err, "error expiring file")
return out, fmt.Errorf("error expiring file: %w", err)
}
// notify vfs too
f.notifyChangeUpstream(co.Remote(), fs.EntryObject)
@@ -677,24 +685,24 @@ func (f *Fs) rcFetch(ctx context.Context, in rc.Params) (rc.Params, error) {
case 1:
start, err = strconv.ParseInt(ints[0], 10, 64)
if err != nil {
return nil, errors.Errorf("invalid range: %q", part)
return nil, fmt.Errorf("invalid range: %q", part)
}
end = start + 1
case 2:
if ints[0] != "" {
start, err = strconv.ParseInt(ints[0], 10, 64)
if err != nil {
return nil, errors.Errorf("invalid range: %q", part)
return nil, fmt.Errorf("invalid range: %q", part)
}
}
if ints[1] != "" {
end, err = strconv.ParseInt(ints[1], 10, 64)
if err != nil {
return nil, errors.Errorf("invalid range: %q", part)
return nil, fmt.Errorf("invalid range: %q", part)
}
}
default:
return nil, errors.Errorf("invalid range: %q", part)
return nil, fmt.Errorf("invalid range: %q", part)
}
crs = append(crs, chunkRange{start: start, end: end})
}
@@ -749,18 +757,18 @@ func (f *Fs) rcFetch(ctx context.Context, in rc.Params) (rc.Params, error) {
delete(in, "chunks")
crs, err := parseChunks(s)
if err != nil {
return nil, errors.Wrap(err, "invalid chunks parameter")
return nil, fmt.Errorf("invalid chunks parameter: %w", err)
}
var files [][2]string
for k, v := range in {
if !strings.HasPrefix(k, "file") {
return nil, errors.Errorf("invalid parameter %s=%s", k, v)
return nil, fmt.Errorf("invalid parameter %s=%s", k, v)
}
switch v := v.(type) {
case string:
files = append(files, [2]string{v, f.unwrapRemote(v)})
default:
return nil, errors.Errorf("invalid parameter %s=%s", k, v)
return nil, fmt.Errorf("invalid parameter %s=%s", k, v)
}
}
type fileStatus struct {
@@ -1116,7 +1124,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
case fs.Directory:
_ = f.cache.AddDir(DirectoryFromOriginal(ctx, f, o))
default:
return errors.Errorf("Unknown object type %T", entry)
return fmt.Errorf("Unknown object type %T", entry)
}
}

View File

@@ -1,5 +1,5 @@
// +build !plan9,!js
// +build !race
//go:build !plan9 && !js && !race
// +build !plan9,!js,!race
package cache_test
@@ -7,6 +7,7 @@ import (
"bytes"
"context"
"encoding/base64"
"errors"
goflag "flag"
"fmt"
"io"
@@ -16,12 +17,12 @@ import (
"os"
"path"
"path/filepath"
"runtime"
"runtime/debug"
"strings"
"testing"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/backend/cache"
"github.com/rclone/rclone/backend/crypt"
_ "github.com/rclone/rclone/backend/drive"
@@ -293,6 +294,9 @@ func TestInternalCachedWrittenContentMatches(t *testing.T) {
}
func TestInternalDoubleWrittenContentMatches(t *testing.T) {
if runtime.GOOS == "windows" && runtime.GOARCH == "386" {
t.Skip("Skip test on windows/386")
}
id := fmt.Sprintf("tidwcm%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
@@ -442,7 +446,7 @@ func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
return err
}
if coSize != expectedSize {
return errors.Errorf("%v <> %v", coSize, expectedSize)
return fmt.Errorf("%v <> %v", coSize, expectedSize)
}
return nil
}, 12, time.Second*10)
@@ -498,7 +502,7 @@ func TestInternalMoveWithNotify(t *testing.T) {
}
if len(li) != 2 {
log.Printf("not expected listing /test: %v", li)
return errors.Errorf("not expected listing /test: %v", li)
return fmt.Errorf("not expected listing /test: %v", li)
}
li, err = runInstance.list(t, rootFs, "test/one")
@@ -508,7 +512,7 @@ func TestInternalMoveWithNotify(t *testing.T) {
}
if len(li) != 0 {
log.Printf("not expected listing /test/one: %v", li)
return errors.Errorf("not expected listing /test/one: %v", li)
return fmt.Errorf("not expected listing /test/one: %v", li)
}
li, err = runInstance.list(t, rootFs, "test/second")
@@ -518,21 +522,21 @@ func TestInternalMoveWithNotify(t *testing.T) {
}
if len(li) != 1 {
log.Printf("not expected listing /test/second: %v", li)
return errors.Errorf("not expected listing /test/second: %v", li)
return fmt.Errorf("not expected listing /test/second: %v", li)
}
if fi, ok := li[0].(os.FileInfo); ok {
if fi.Name() != "data.bin" {
log.Printf("not expected name: %v", fi.Name())
return errors.Errorf("not expected name: %v", fi.Name())
return fmt.Errorf("not expected name: %v", fi.Name())
}
} else if di, ok := li[0].(fs.DirEntry); ok {
if di.Remote() != "test/second/data.bin" {
log.Printf("not expected remote: %v", di.Remote())
return errors.Errorf("not expected remote: %v", di.Remote())
return fmt.Errorf("not expected remote: %v", di.Remote())
}
} else {
log.Printf("unexpected listing: %v", li)
return errors.Errorf("unexpected listing: %v", li)
return fmt.Errorf("unexpected listing: %v", li)
}
log.Printf("complete listing: %v", li)
@@ -587,17 +591,17 @@ func TestInternalNotifyCreatesEmptyParts(t *testing.T) {
found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test")))
if !found {
log.Printf("not found /test")
return errors.Errorf("not found /test")
return fmt.Errorf("not found /test")
}
found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test"), runInstance.encryptRemoteIfNeeded(t, "one")))
if !found {
log.Printf("not found /test/one")
return errors.Errorf("not found /test/one")
return fmt.Errorf("not found /test/one")
}
found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test"), runInstance.encryptRemoteIfNeeded(t, "one"), runInstance.encryptRemoteIfNeeded(t, "test2")))
if !found {
log.Printf("not found /test/one/test2")
return errors.Errorf("not found /test/one/test2")
return fmt.Errorf("not found /test/one/test2")
}
li, err := runInstance.list(t, rootFs, "test/one")
if err != nil {
@@ -606,21 +610,21 @@ func TestInternalNotifyCreatesEmptyParts(t *testing.T) {
}
if len(li) != 1 {
log.Printf("not expected listing /test/one: %v", li)
return errors.Errorf("not expected listing /test/one: %v", li)
return fmt.Errorf("not expected listing /test/one: %v", li)
}
if fi, ok := li[0].(os.FileInfo); ok {
if fi.Name() != "test2" {
log.Printf("not expected name: %v", fi.Name())
return errors.Errorf("not expected name: %v", fi.Name())
return fmt.Errorf("not expected name: %v", fi.Name())
}
} else if di, ok := li[0].(fs.DirEntry); ok {
if di.Remote() != "test/one/test2" {
log.Printf("not expected remote: %v", di.Remote())
return errors.Errorf("not expected remote: %v", di.Remote())
return fmt.Errorf("not expected remote: %v", di.Remote())
}
} else {
log.Printf("unexpected listing: %v", li)
return errors.Errorf("unexpected listing: %v", li)
return fmt.Errorf("unexpected listing: %v", li)
}
log.Printf("complete listing /test/one/test2")
return nil
@@ -681,6 +685,9 @@ func TestInternalCacheWrites(t *testing.T) {
}
func TestInternalMaxChunkSizeRespected(t *testing.T) {
if runtime.GOOS == "windows" && runtime.GOARCH == "386" {
t.Skip("Skip test on windows/386")
}
id := fmt.Sprintf("timcsr%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, map[string]string{"workers": "1"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
@@ -836,7 +843,7 @@ func newRun() *run {
if uploadDir == "" {
r.tmpUploadDir, err = ioutil.TempDir("", "rclonecache-tmp")
if err != nil {
log.Fatalf("Failed to create temp dir: %v", err)
panic(fmt.Sprintf("Failed to create temp dir: %v", err))
}
} else {
r.tmpUploadDir = uploadDir
@@ -919,9 +926,9 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
}
}
runInstance.rootIsCrypt = rootIsCrypt
runInstance.dbPath = filepath.Join(config.CacheDir, "cache-backend", cacheRemote+".db")
runInstance.chunkPath = filepath.Join(config.CacheDir, "cache-backend", cacheRemote)
runInstance.vfsCachePath = filepath.Join(config.CacheDir, "vfs", remote)
runInstance.dbPath = filepath.Join(config.GetCacheDir(), "cache-backend", cacheRemote+".db")
runInstance.chunkPath = filepath.Join(config.GetCacheDir(), "cache-backend", cacheRemote)
runInstance.vfsCachePath = filepath.Join(config.GetCacheDir(), "vfs", remote)
boltDb, err := cache.GetPersistent(runInstance.dbPath, runInstance.chunkPath, &cache.Features{PurgeDb: true})
require.NoError(t, err)
@@ -1055,7 +1062,7 @@ func (r *run) readDataFromRemote(t *testing.T, f fs.Fs, remote string, offset, e
checkSample = r.readDataFromObj(t, co, offset, end, noLengthCheck)
if !noLengthCheck && size != int64(len(checkSample)) {
return checkSample, errors.Errorf("read size doesn't match expected: %v <> %v", len(checkSample), size)
return checkSample, fmt.Errorf("read size doesn't match expected: %v <> %v", len(checkSample), size)
}
return checkSample, nil
}
@@ -1250,7 +1257,7 @@ func (r *run) listenForBackgroundUpload(t *testing.T, f fs.Fs, remote string) ch
case state = <-buCh:
// continue
case <-time.After(maxDuration):
waitCh <- errors.Errorf("Timed out waiting for background upload: %v", remote)
waitCh <- fmt.Errorf("Timed out waiting for background upload: %v", remote)
return
}
checkRemote := state.Remote
@@ -1267,7 +1274,7 @@ func (r *run) listenForBackgroundUpload(t *testing.T, f fs.Fs, remote string) ch
return
}
}
waitCh <- errors.Errorf("Too many attempts to wait for the background upload: %v", remote)
waitCh <- fmt.Errorf("Too many attempts to wait for the background upload: %v", remote)
}()
return waitCh
}

View File

@@ -1,7 +1,7 @@
// Test Cache filesystem interface
// +build !plan9,!js
// +build !race
//go:build !plan9 && !js && !race
// +build !plan9,!js,!race
package cache_test

View File

@@ -1,6 +1,7 @@
// Build for cache for unsupported platforms to stop go complaining
// about "no buildable Go source files "
//go:build plan9 || js
// +build plan9 js
package cache

View File

@@ -1,5 +1,5 @@
// +build !plan9,!js
// +build !race
//go:build !plan9 && !js && !race
// +build !plan9,!js,!race
package cache_test

View File

@@ -1,3 +1,4 @@
//go:build !plan9 && !js
// +build !plan9,!js
package cache

View File

@@ -1,9 +1,11 @@
//go:build !plan9 && !js
// +build !plan9,!js
package cache
import (
"context"
"errors"
"fmt"
"io"
"path"
@@ -12,7 +14,6 @@ import (
"sync"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/operations"
)
@@ -242,7 +243,7 @@ func (r *Handle) getChunk(chunkStart int64) ([]byte, error) {
return nil, io.ErrUnexpectedEOF
}
return nil, errors.Errorf("chunk not found %v", chunkStart)
return nil, fmt.Errorf("chunk not found %v", chunkStart)
}
// first chunk will be aligned with the start
@@ -322,7 +323,7 @@ func (r *Handle) Seek(offset int64, whence int) (int64, error) {
fs.Debugf(r, "moving offset end (%v) from %v to %v", r.cachedObject.Size(), r.offset, r.cachedObject.Size()+offset)
r.offset = r.cachedObject.Size() + offset
default:
err = errors.Errorf("cache: unimplemented seek whence %v", whence)
err = fmt.Errorf("cache: unimplemented seek whence %v", whence)
}
chunkStart := r.offset - (r.offset % int64(r.cacheFs().opt.ChunkSize))

View File

@@ -1,15 +1,16 @@
//go:build !plan9 && !js
// +build !plan9,!js
package cache
import (
"context"
"fmt"
"io"
"path"
"sync"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/readers"
@@ -177,10 +178,14 @@ func (o *Object) refreshFromSource(ctx context.Context, force bool) error {
}
if o.isTempFile() {
liveObject, err = o.ParentFs.NewObject(ctx, o.Remote())
err = errors.Wrapf(err, "in parent fs %v", o.ParentFs)
if err != nil {
err = fmt.Errorf("in parent fs %v: %w", o.ParentFs, err)
}
} else {
liveObject, err = o.CacheFs.Fs.NewObject(ctx, o.Remote())
err = errors.Wrapf(err, "in cache fs %v", o.CacheFs.Fs)
if err != nil {
err = fmt.Errorf("in cache fs %v: %w", o.CacheFs.Fs, err)
}
}
if err != nil {
fs.Errorf(o, "error refreshing object in : %v", err)
@@ -252,7 +257,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
defer o.CacheFs.backgroundRunner.play()
// don't allow started uploads
if o.isTempFile() && o.tempFileStartedUpload() {
return errors.Errorf("%v is currently uploading, can't update", o)
return fmt.Errorf("%v is currently uploading, can't update", o)
}
}
fs.Debugf(o, "updating object contents with size %v", src.Size())
@@ -291,7 +296,7 @@ func (o *Object) Remove(ctx context.Context) error {
defer o.CacheFs.backgroundRunner.play()
// don't allow started uploads
if o.isTempFile() && o.tempFileStartedUpload() {
return errors.Errorf("%v is currently uploading, can't delete", o)
return fmt.Errorf("%v is currently uploading, can't delete", o)
}
}
err := o.Object.Remove(ctx)

View File

@@ -1,3 +1,4 @@
//go:build !plan9 && !js
// +build !plan9,!js
package cache

View File

@@ -1,14 +1,15 @@
//go:build !plan9 && !js
// +build !plan9,!js
package cache
import (
"fmt"
"strconv"
"strings"
"time"
cache "github.com/patrickmn/go-cache"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
)
@@ -52,7 +53,7 @@ func (m *Memory) GetChunk(cachedObject *Object, offset int64) ([]byte, error) {
return data, nil
}
return nil, errors.Errorf("couldn't get cached object data at offset %v", offset)
return nil, fmt.Errorf("couldn't get cached object data at offset %v", offset)
}
// AddChunk adds a new chunk of a cached object

View File

@@ -1,3 +1,4 @@
//go:build !plan9 && !js
// +build !plan9,!js
package cache
@@ -16,7 +17,6 @@ import (
"sync"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/walk"
bolt "go.etcd.io/bbolt"
@@ -119,11 +119,11 @@ func (b *Persistent) connect() error {
err = os.MkdirAll(b.dataPath, os.ModePerm)
if err != nil {
return errors.Wrapf(err, "failed to create a data directory %q", b.dataPath)
return fmt.Errorf("failed to create a data directory %q: %w", b.dataPath, err)
}
b.db, err = bolt.Open(b.dbPath, 0644, &bolt.Options{Timeout: b.features.DbWaitTime})
if err != nil {
return errors.Wrapf(err, "failed to open a cache connection to %q", b.dbPath)
return fmt.Errorf("failed to open a cache connection to %q: %w", b.dbPath, err)
}
if b.features.PurgeDb {
b.Purge()
@@ -175,7 +175,7 @@ func (b *Persistent) GetDir(remote string) (*Directory, error) {
err := b.db.View(func(tx *bolt.Tx) error {
bucket := b.getBucket(remote, false, tx)
if bucket == nil {
return errors.Errorf("couldn't open bucket (%v)", remote)
return fmt.Errorf("couldn't open bucket (%v)", remote)
}
data := bucket.Get([]byte("."))
@@ -183,7 +183,7 @@ func (b *Persistent) GetDir(remote string) (*Directory, error) {
return json.Unmarshal(data, cd)
}
return errors.Errorf("%v not found", remote)
return fmt.Errorf("%v not found", remote)
})
return cd, err
@@ -208,7 +208,7 @@ func (b *Persistent) AddBatchDir(cachedDirs []*Directory) error {
bucket = b.getBucket(cachedDirs[0].Dir, true, tx)
}
if bucket == nil {
return errors.Errorf("couldn't open bucket (%v)", cachedDirs[0].Dir)
return fmt.Errorf("couldn't open bucket (%v)", cachedDirs[0].Dir)
}
for _, cachedDir := range cachedDirs {
@@ -225,7 +225,7 @@ func (b *Persistent) AddBatchDir(cachedDirs []*Directory) error {
encoded, err := json.Marshal(cachedDir)
if err != nil {
return errors.Errorf("couldn't marshal object (%v): %v", cachedDir, err)
return fmt.Errorf("couldn't marshal object (%v): %v", cachedDir, err)
}
err = b.Put([]byte("."), encoded)
if err != nil {
@@ -243,17 +243,17 @@ func (b *Persistent) GetDirEntries(cachedDir *Directory) (fs.DirEntries, error)
err := b.db.View(func(tx *bolt.Tx) error {
bucket := b.getBucket(cachedDir.abs(), false, tx)
if bucket == nil {
return errors.Errorf("couldn't open bucket (%v)", cachedDir.abs())
return fmt.Errorf("couldn't open bucket (%v)", cachedDir.abs())
}
val := bucket.Get([]byte("."))
if val != nil {
err := json.Unmarshal(val, cachedDir)
if err != nil {
return errors.Errorf("error during unmarshalling obj: %v", err)
return fmt.Errorf("error during unmarshalling obj: %v", err)
}
} else {
return errors.Errorf("missing cached dir: %v", cachedDir)
return fmt.Errorf("missing cached dir: %v", cachedDir)
}
c := bucket.Cursor()
@@ -268,7 +268,7 @@ func (b *Persistent) GetDirEntries(cachedDir *Directory) (fs.DirEntries, error)
// we try to find a cached meta for the dir
currentBucket := c.Bucket().Bucket(k)
if currentBucket == nil {
return errors.Errorf("couldn't open bucket (%v)", string(k))
return fmt.Errorf("couldn't open bucket (%v)", string(k))
}
metaKey := currentBucket.Get([]byte("."))
@@ -317,7 +317,7 @@ func (b *Persistent) RemoveDir(fp string) error {
err = b.db.Update(func(tx *bolt.Tx) error {
bucket := b.getBucket(cleanPath(parentDir), false, tx)
if bucket == nil {
return errors.Errorf("couldn't open bucket (%v)", fp)
return fmt.Errorf("couldn't open bucket (%v)", fp)
}
// delete the cached dir
err := bucket.DeleteBucket([]byte(cleanPath(dirName)))
@@ -377,13 +377,13 @@ func (b *Persistent) GetObject(cachedObject *Object) (err error) {
return b.db.View(func(tx *bolt.Tx) error {
bucket := b.getBucket(cachedObject.Dir, false, tx)
if bucket == nil {
return errors.Errorf("couldn't open parent bucket for %v", cachedObject.Dir)
return fmt.Errorf("couldn't open parent bucket for %v", cachedObject.Dir)
}
val := bucket.Get([]byte(cachedObject.Name))
if val != nil {
return json.Unmarshal(val, cachedObject)
}
return errors.Errorf("couldn't find object (%v)", cachedObject.Name)
return fmt.Errorf("couldn't find object (%v)", cachedObject.Name)
})
}
@@ -392,16 +392,16 @@ func (b *Persistent) AddObject(cachedObject *Object) error {
return b.db.Update(func(tx *bolt.Tx) error {
bucket := b.getBucket(cachedObject.Dir, true, tx)
if bucket == nil {
return errors.Errorf("couldn't open parent bucket for %v", cachedObject)
return fmt.Errorf("couldn't open parent bucket for %v", cachedObject)
}
// cache Object Info
encoded, err := json.Marshal(cachedObject)
if err != nil {
return errors.Errorf("couldn't marshal object (%v) info: %v", cachedObject, err)
return fmt.Errorf("couldn't marshal object (%v) info: %v", cachedObject, err)
}
err = bucket.Put([]byte(cachedObject.Name), encoded)
if err != nil {
return errors.Errorf("couldn't cache object (%v) info: %v", cachedObject, err)
return fmt.Errorf("couldn't cache object (%v) info: %v", cachedObject, err)
}
return nil
})
@@ -413,7 +413,7 @@ func (b *Persistent) RemoveObject(fp string) error {
return b.db.Update(func(tx *bolt.Tx) error {
bucket := b.getBucket(cleanPath(parentDir), false, tx)
if bucket == nil {
return errors.Errorf("couldn't open parent bucket for %v", cleanPath(parentDir))
return fmt.Errorf("couldn't open parent bucket for %v", cleanPath(parentDir))
}
err := bucket.Delete([]byte(cleanPath(objName)))
if err != nil {
@@ -445,7 +445,7 @@ func (b *Persistent) HasEntry(remote string) bool {
err := b.db.View(func(tx *bolt.Tx) error {
bucket := b.getBucket(dir, false, tx)
if bucket == nil {
return errors.Errorf("couldn't open parent bucket for %v", remote)
return fmt.Errorf("couldn't open parent bucket for %v", remote)
}
if f := bucket.Bucket([]byte(name)); f != nil {
return nil
@@ -454,7 +454,7 @@ func (b *Persistent) HasEntry(remote string) bool {
return nil
}
return errors.Errorf("couldn't find object (%v)", remote)
return fmt.Errorf("couldn't find object (%v)", remote)
})
if err == nil {
return true
@@ -554,7 +554,7 @@ func (b *Persistent) CleanChunksBySize(maxSize int64) {
err := b.db.Update(func(tx *bolt.Tx) error {
dataTsBucket := tx.Bucket([]byte(DataTsBucket))
if dataTsBucket == nil {
return errors.Errorf("Couldn't open (%v) bucket", DataTsBucket)
return fmt.Errorf("Couldn't open (%v) bucket", DataTsBucket)
}
// iterate through ts
c := dataTsBucket.Cursor()
@@ -732,7 +732,7 @@ func (b *Persistent) GetChunkTs(path string, offset int64) (time.Time, error) {
return nil
}
}
return errors.Errorf("not found %v-%v", path, offset)
return fmt.Errorf("not found %v-%v", path, offset)
})
return t, err
@@ -772,7 +772,7 @@ func (b *Persistent) addPendingUpload(destPath string, started bool) error {
return b.db.Update(func(tx *bolt.Tx) error {
bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket))
if err != nil {
return errors.Errorf("couldn't bucket for %v", tempBucket)
return fmt.Errorf("couldn't bucket for %v", tempBucket)
}
tempObj := &tempUploadInfo{
DestPath: destPath,
@@ -783,11 +783,11 @@ func (b *Persistent) addPendingUpload(destPath string, started bool) error {
// cache Object Info
encoded, err := json.Marshal(tempObj)
if err != nil {
return errors.Errorf("couldn't marshal object (%v) info: %v", destPath, err)
return fmt.Errorf("couldn't marshal object (%v) info: %v", destPath, err)
}
err = bucket.Put([]byte(destPath), encoded)
if err != nil {
return errors.Errorf("couldn't cache object (%v) info: %v", destPath, err)
return fmt.Errorf("couldn't cache object (%v) info: %v", destPath, err)
}
return nil
@@ -802,7 +802,7 @@ func (b *Persistent) getPendingUpload(inRoot string, waitTime time.Duration) (de
err = b.db.Update(func(tx *bolt.Tx) error {
bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket))
if err != nil {
return errors.Errorf("couldn't bucket for %v", tempBucket)
return fmt.Errorf("couldn't bucket for %v", tempBucket)
}
c := bucket.Cursor()
@@ -835,7 +835,7 @@ func (b *Persistent) getPendingUpload(inRoot string, waitTime time.Duration) (de
return nil
}
return errors.Errorf("no pending upload found")
return fmt.Errorf("no pending upload found")
})
return destPath, err
@@ -846,14 +846,14 @@ func (b *Persistent) SearchPendingUpload(remote string) (started bool, err error
err = b.db.View(func(tx *bolt.Tx) error {
bucket := tx.Bucket([]byte(tempBucket))
if bucket == nil {
return errors.Errorf("couldn't bucket for %v", tempBucket)
return fmt.Errorf("couldn't bucket for %v", tempBucket)
}
var tempObj = &tempUploadInfo{}
v := bucket.Get([]byte(remote))
err = json.Unmarshal(v, tempObj)
if err != nil {
return errors.Errorf("pending upload (%v) not found %v", remote, err)
return fmt.Errorf("pending upload (%v) not found %v", remote, err)
}
started = tempObj.Started
@@ -868,7 +868,7 @@ func (b *Persistent) searchPendingUploadFromDir(dir string) (remotes []string, e
err = b.db.View(func(tx *bolt.Tx) error {
bucket := tx.Bucket([]byte(tempBucket))
if bucket == nil {
return errors.Errorf("couldn't bucket for %v", tempBucket)
return fmt.Errorf("couldn't bucket for %v", tempBucket)
}
c := bucket.Cursor()
@@ -898,22 +898,22 @@ func (b *Persistent) rollbackPendingUpload(remote string) error {
return b.db.Update(func(tx *bolt.Tx) error {
bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket))
if err != nil {
return errors.Errorf("couldn't bucket for %v", tempBucket)
return fmt.Errorf("couldn't bucket for %v", tempBucket)
}
var tempObj = &tempUploadInfo{}
v := bucket.Get([]byte(remote))
err = json.Unmarshal(v, tempObj)
if err != nil {
return errors.Errorf("pending upload (%v) not found %v", remote, err)
return fmt.Errorf("pending upload (%v) not found %v", remote, err)
}
tempObj.Started = false
v2, err := json.Marshal(tempObj)
if err != nil {
return errors.Errorf("pending upload not updated %v", err)
return fmt.Errorf("pending upload not updated %v", err)
}
err = bucket.Put([]byte(tempObj.DestPath), v2)
if err != nil {
return errors.Errorf("pending upload not updated %v", err)
return fmt.Errorf("pending upload not updated %v", err)
}
return nil
})
@@ -926,7 +926,7 @@ func (b *Persistent) removePendingUpload(remote string) error {
return b.db.Update(func(tx *bolt.Tx) error {
bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket))
if err != nil {
return errors.Errorf("couldn't bucket for %v", tempBucket)
return fmt.Errorf("couldn't bucket for %v", tempBucket)
}
return bucket.Delete([]byte(remote))
})
@@ -941,17 +941,17 @@ func (b *Persistent) updatePendingUpload(remote string, fn func(item *tempUpload
return b.db.Update(func(tx *bolt.Tx) error {
bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket))
if err != nil {
return errors.Errorf("couldn't bucket for %v", tempBucket)
return fmt.Errorf("couldn't bucket for %v", tempBucket)
}
var tempObj = &tempUploadInfo{}
v := bucket.Get([]byte(remote))
err = json.Unmarshal(v, tempObj)
if err != nil {
return errors.Errorf("pending upload (%v) not found %v", remote, err)
return fmt.Errorf("pending upload (%v) not found %v", remote, err)
}
if tempObj.Started {
return errors.Errorf("pending upload already started %v", remote)
return fmt.Errorf("pending upload already started %v", remote)
}
err = fn(tempObj)
if err != nil {
@@ -969,11 +969,11 @@ func (b *Persistent) updatePendingUpload(remote string, fn func(item *tempUpload
}
v2, err := json.Marshal(tempObj)
if err != nil {
return errors.Errorf("pending upload not updated %v", err)
return fmt.Errorf("pending upload not updated %v", err)
}
err = bucket.Put([]byte(tempObj.DestPath), v2)
if err != nil {
return errors.Errorf("pending upload not updated %v", err)
return fmt.Errorf("pending upload not updated %v", err)
}
return nil
@@ -1014,11 +1014,11 @@ func (b *Persistent) ReconcileTempUploads(ctx context.Context, cacheFs *Fs) erro
// cache Object Info
encoded, err := json.Marshal(tempObj)
if err != nil {
return errors.Errorf("couldn't marshal object (%v) info: %v", queuedEntry, err)
return fmt.Errorf("couldn't marshal object (%v) info: %v", queuedEntry, err)
}
err = bucket.Put([]byte(destPath), encoded)
if err != nil {
return errors.Errorf("couldn't cache object (%v) info: %v", destPath, err)
return fmt.Errorf("couldn't cache object (%v) info: %v", destPath, err)
}
fs.Debugf(cacheFs, "reconciled temporary upload: %v", destPath)
}

View File

@@ -8,6 +8,7 @@ import (
"crypto/sha1"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
gohash "hash"
"io"
@@ -21,7 +22,6 @@ import (
"sync"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/cache"
@@ -150,12 +150,13 @@ func init() {
Name: "remote",
Required: true,
Help: `Remote to chunk/unchunk.
Normally should contain a ':' and a path, e.g. "myremote:path/to/dir",
"myremote:bucket" or maybe "myremote:" (not recommended).`,
}, {
Name: "chunk_size",
Advanced: false,
Default: fs.SizeSuffix(2147483648), // 2GB
Default: fs.SizeSuffix(2147483648), // 2 GiB
Help: `Files larger than chunk size will be split in chunks.`,
}, {
Name: "name_format",
@@ -163,6 +164,7 @@ Normally should contain a ':' and a path, e.g. "myremote:path/to/dir",
Hide: fs.OptionHideCommandLine,
Default: `*.rclone_chunk.###`,
Help: `String format of chunk file names.
The two placeholders are: base file name (*) and chunk number (#...).
There must be one and only one asterisk and one or more consecutive hash characters.
If chunk number has less digits than the number of hashes, it is left-padded by zeros.
@@ -174,48 +176,57 @@ Possible chunk files are ignored if their name does not match given format.`,
Hide: fs.OptionHideCommandLine,
Default: 1,
Help: `Minimum valid chunk number. Usually 0 or 1.
By default chunk numbers start from 1.`,
}, {
Name: "meta_format",
Advanced: true,
Hide: fs.OptionHideCommandLine,
Default: "simplejson",
Help: `Format of the metadata object or "none". By default "simplejson".
Help: `Format of the metadata object or "none".
By default "simplejson".
Metadata is a small JSON file named after the composite file.`,
Examples: []fs.OptionExample{{
Value: "none",
Help: `Do not use metadata files at all. Requires hash type "none".`,
Help: `Do not use metadata files at all.
Requires hash type "none".`,
}, {
Value: "simplejson",
Help: `Simple JSON supports hash sums and chunk validation.
It has the following fields: ver, size, nchunks, md5, sha1.`,
}},
}, {
Name: "hash_type",
Advanced: false,
Default: "md5",
Help: `Choose how chunker handles hash sums. All modes but "none" require metadata.`,
Help: `Choose how chunker handles hash sums.
All modes but "none" require metadata.`,
Examples: []fs.OptionExample{{
Value: "none",
Help: `Pass any hash supported by wrapped remote for non-chunked files, return nothing otherwise`,
Help: `Pass any hash supported by wrapped remote for non-chunked files.
Return nothing otherwise.`,
}, {
Value: "md5",
Help: `MD5 for composite files`,
Help: `MD5 for composite files.`,
}, {
Value: "sha1",
Help: `SHA1 for composite files`,
Help: `SHA1 for composite files.`,
}, {
Value: "md5all",
Help: `MD5 for all files`,
Help: `MD5 for all files.`,
}, {
Value: "sha1all",
Help: `SHA1 for all files`,
Help: `SHA1 for all files.`,
}, {
Value: "md5quick",
Help: `Copying a file to chunker will request MD5 from the source falling back to SHA1 if unsupported`,
Help: `Copying a file to chunker will request MD5 from the source.
Falling back to SHA1 if unsupported.`,
}, {
Value: "sha1quick",
Help: `Similar to "md5quick" but prefers SHA1 over MD5`,
Help: `Similar to "md5quick" but prefers SHA1 over MD5.`,
}},
}, {
Name: "fail_hard",
@@ -279,13 +290,13 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
baseName, basePath, err := fspath.SplitFs(remote)
if err != nil {
return nil, errors.Wrapf(err, "failed to parse remote %q to wrap", remote)
return nil, fmt.Errorf("failed to parse remote %q to wrap: %w", remote, err)
}
// Look for a file first
remotePath := fspath.JoinRootPath(basePath, rpath)
baseFs, err := cache.Get(ctx, baseName+remotePath)
if err != fs.ErrorIsFile && err != nil {
return nil, errors.Wrapf(err, "failed to make remote %q to wrap", baseName+remotePath)
return nil, fmt.Errorf("failed to make remote %q to wrap: %w", baseName+remotePath, err)
}
if !operations.CanServerSideMove(baseFs) {
return nil, errors.New("can't use chunker on a backend which doesn't support server-side move or copy")
@@ -375,7 +386,7 @@ type Fs struct {
// configure must be called only from NewFs or by unit tests.
func (f *Fs) configure(nameFormat, metaFormat, hashType, transactionMode string) error {
if err := f.setChunkNameFormat(nameFormat); err != nil {
return errors.Wrapf(err, "invalid name format '%s'", nameFormat)
return fmt.Errorf("invalid name format '%s': %w", nameFormat, err)
}
if err := f.setMetaFormat(metaFormat); err != nil {
return err
@@ -432,10 +443,10 @@ func (f *Fs) setHashType(hashType string) error {
f.hashFallback = true
case "md5all":
f.useMD5 = true
f.hashAll = !f.base.Hashes().Contains(hash.MD5)
f.hashAll = !f.base.Hashes().Contains(hash.MD5) || f.base.Features().SlowHash
case "sha1all":
f.useSHA1 = true
f.hashAll = !f.base.Hashes().Contains(hash.SHA1)
f.hashAll = !f.base.Hashes().Contains(hash.SHA1) || f.base.Features().SlowHash
default:
return fmt.Errorf("unsupported hash type '%s'", hashType)
}
@@ -812,7 +823,7 @@ func (f *Fs) processEntries(ctx context.Context, origEntries fs.DirEntries, dirP
tempEntries = append(tempEntries, wrapDir)
default:
if f.opt.FailHard {
return nil, fmt.Errorf("Unknown object type %T", entry)
return nil, fmt.Errorf("unknown object type %T", entry)
}
fs.Debugf(f, "unknown object type %T", entry)
}
@@ -867,7 +878,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
// ignores non-chunked objects and skips chunk size checks.
func (f *Fs) scanObject(ctx context.Context, remote string, quickScan bool) (fs.Object, error) {
if err := f.forbidChunk(false, remote); err != nil {
return nil, errors.Wrap(err, "can't access")
return nil, fmt.Errorf("can't access: %w", err)
}
var (
@@ -916,7 +927,7 @@ func (f *Fs) scanObject(ctx context.Context, remote string, quickScan bool) (fs.
case fs.ErrorDirNotFound:
entries = nil
default:
return nil, errors.Wrap(err, "can't detect composite file")
return nil, fmt.Errorf("can't detect composite file: %w", err)
}
if f.useNoRename {
@@ -1056,7 +1067,7 @@ func (o *Object) readMetadata(ctx context.Context) error {
case ErrMetaTooBig, ErrMetaUnknown:
return err // return these errors unwrapped for unit tests
default:
return errors.Wrap(err, "invalid metadata")
return fmt.Errorf("invalid metadata: %w", err)
}
if o.size != metaInfo.Size() || len(o.chunks) != metaInfo.nChunks {
return errors.New("metadata doesn't match file size")
@@ -1099,7 +1110,7 @@ func (o *Object) readXactID(ctx context.Context) (xactID string, err error) {
switch o.f.opt.MetaFormat {
case "simplejson":
if data != nil && len(data) > maxMetadataSizeWritten {
if len(data) > maxMetadataSizeWritten {
return "", nil // this was likely not a metadata object, return empty xactID but don't throw error
}
var metadata metaSimpleJSON
@@ -1121,7 +1132,7 @@ func (f *Fs) put(
// Perform consistency checks
if err := f.forbidChunk(src, remote); err != nil {
return nil, errors.Wrap(err, action+" refused")
return nil, fmt.Errorf("%s refused: %w", action, err)
}
if target == nil {
// Get target object with a quick directory scan
@@ -1135,7 +1146,7 @@ func (f *Fs) put(
obj := target.(*Object)
if err := obj.readMetadata(ctx); err == ErrMetaUnknown {
// refuse to update a file of unsupported format
return nil, errors.Wrap(err, "refusing to "+action)
return nil, fmt.Errorf("refusing to %s: %w", action, err)
}
}
@@ -1214,7 +1225,7 @@ func (f *Fs) put(
// and skips the "EOF" read. Hence, switch to next limit here.
if !(c.chunkLimit == 0 || c.chunkLimit == c.chunkSize || c.sizeTotal == -1 || c.done) {
silentlyRemove(ctx, chunk)
return nil, fmt.Errorf("Destination ignored %d data bytes", c.chunkLimit)
return nil, fmt.Errorf("destination ignored %d data bytes", c.chunkLimit)
}
c.chunkLimit = c.chunkSize
@@ -1223,7 +1234,7 @@ func (f *Fs) put(
// Validate uploaded size
if c.sizeTotal != -1 && c.readCount != c.sizeTotal {
return nil, fmt.Errorf("Incorrect upload size %d != %d", c.readCount, c.sizeTotal)
return nil, fmt.Errorf("incorrect upload size %d != %d", c.readCount, c.sizeTotal)
}
// Check for input that looks like valid metadata
@@ -1260,7 +1271,7 @@ func (f *Fs) put(
sizeTotal += chunk.Size()
}
if sizeTotal != c.readCount {
return nil, fmt.Errorf("Incorrect chunks size %d != %d", sizeTotal, c.readCount)
return nil, fmt.Errorf("incorrect chunks size %d != %d", sizeTotal, c.readCount)
}
// If previous object was chunked, remove its chunks
@@ -1448,7 +1459,7 @@ func (c *chunkingReader) dummyRead(in io.Reader, size int64) error {
c.accountBytes(size)
return nil
}
const bufLen = 1048576 // 1MB
const bufLen = 1048576 // 1 MiB
buf := make([]byte, bufLen)
for size > 0 {
n := size
@@ -1553,7 +1564,7 @@ func (f *Fs) Hashes() hash.Set {
// Shouldn't return an error if it already exists
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
if err := f.forbidChunk(dir, dir); err != nil {
return errors.Wrap(err, "can't mkdir")
return fmt.Errorf("can't mkdir: %w", err)
}
return f.base.Mkdir(ctx, dir)
}
@@ -1622,7 +1633,7 @@ func (o *Object) Remove(ctx context.Context) (err error) {
if err := o.f.forbidChunk(o, o.Remote()); err != nil {
// operations.Move can still call Remove if chunker's Move refuses
// to corrupt file in hard mode. Hence, refuse to Remove, too.
return errors.Wrap(err, "refuse to corrupt")
return fmt.Errorf("refuse to corrupt: %w", err)
}
if err := o.readMetadata(ctx); err == ErrMetaUnknown {
// Proceed but warn user that unexpected things can happen.
@@ -1650,12 +1661,12 @@ func (o *Object) Remove(ctx context.Context) (err error) {
// copyOrMove implements copy or move
func (f *Fs) copyOrMove(ctx context.Context, o *Object, remote string, do copyMoveFn, md5, sha1, opName string) (fs.Object, error) {
if err := f.forbidChunk(o, remote); err != nil {
return nil, errors.Wrapf(err, "can't %s", opName)
return nil, fmt.Errorf("can't %s: %w", opName, err)
}
if err := o.readMetadata(ctx); err != nil {
// Refuse to copy/move composite files with invalid or future
// metadata format which might involve unsupported chunk types.
return nil, errors.Wrapf(err, "can't %s this file", opName)
return nil, fmt.Errorf("can't %s this file: %w", opName, err)
}
if !o.isComposite() {
fs.Debugf(o, "%s non-chunked object...", opName)
@@ -2152,7 +2163,7 @@ func (o *Object) UnWrap() fs.Object {
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.ReadCloser, err error) {
if err := o.readMetadata(ctx); err != nil {
// refuse to open unsupported format
return nil, errors.Wrap(err, "can't open")
return nil, fmt.Errorf("can't open: %w", err)
}
if !o.isComposite() {
return o.mainChunk().Open(ctx, options...) // chain to wrapped non-chunked file
@@ -2440,7 +2451,7 @@ func marshalSimpleJSON(ctx context.Context, size int64, nChunks int, md5, sha1,
func unmarshalSimpleJSON(ctx context.Context, metaObject fs.Object, data []byte) (info *ObjectInfo, madeByChunker bool, err error) {
// Be strict about JSON format
// to reduce possibility that a random small file resembles metadata.
if data != nil && len(data) > maxMetadataSizeWritten {
if len(data) > maxMetadataSizeWritten {
return nil, false, ErrMetaTooBig
}
if data == nil || len(data) < 2 || data[0] != '{' || data[len(data)-1] != '}' {

View File

@@ -12,6 +12,8 @@ import (
"testing"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/fspath"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/fs/operations"
@@ -33,11 +35,35 @@ func testPutLarge(t *testing.T, f *Fs, kilobytes int) {
fstests.TestPutLarge(context.Background(), t, f, &fstest.Item{
ModTime: fstest.Time("2001-02-03T04:05:06.499999999Z"),
Path: fmt.Sprintf("chunker-upload-%dk", kilobytes),
Size: int64(kilobytes) * int64(fs.KibiByte),
Size: int64(kilobytes) * int64(fs.Kibi),
})
})
}
type settings map[string]interface{}
func deriveFs(ctx context.Context, t *testing.T, f fs.Fs, path string, opts settings) fs.Fs {
fsName := strings.Split(f.Name(), "{")[0] // strip off hash
configMap := configmap.Simple{}
for key, val := range opts {
configMap[key] = fmt.Sprintf("%v", val)
}
rpath := fspath.JoinRootPath(f.Root(), path)
remote := fmt.Sprintf("%s,%s:%s", fsName, configMap.String(), rpath)
fixFs, err := fs.NewFs(ctx, remote)
require.NoError(t, err)
return fixFs
}
var mtime1 = fstest.Time("2001-02-03T04:05:06.499999999Z")
func testPutFile(ctx context.Context, t *testing.T, f fs.Fs, name, contents, message string, check bool) fs.Object {
item := fstest.Item{Path: name, ModTime: mtime1}
_, obj := fstests.PutTestContents(ctx, t, f, &item, contents, check)
assert.NotNil(t, obj, message)
return obj
}
// test chunk name parser
func testChunkNameFormat(t *testing.T, f *Fs) {
saveOpt := f.opt
@@ -617,22 +643,13 @@ func testMetadataInput(t *testing.T, f *Fs) {
}()
f.opt.FailHard = false
modTime := fstest.Time("2001-02-03T04:05:06.499999999Z")
putFile := func(f fs.Fs, name, contents, message string, check bool) fs.Object {
item := fstest.Item{Path: name, ModTime: modTime}
_, obj := fstests.PutTestContents(ctx, t, f, &item, contents, check)
assert.NotNil(t, obj, message)
return obj
}
runSubtest := func(contents, name string) {
description := fmt.Sprintf("file with %s metadata", name)
filename := path.Join(dir, name)
require.True(t, len(contents) > 2 && len(contents) < minChunkForTest, description+" test data is correct")
part := putFile(f.base, f.makeChunkName(filename, 0, "", ""), "oops", "", true)
_ = putFile(f, filename, contents, "upload "+description, false)
part := testPutFile(ctx, t, f.base, f.makeChunkName(filename, 0, "", ""), "oops", "", true)
_ = testPutFile(ctx, t, f, filename, contents, "upload "+description, false)
obj, err := f.NewObject(ctx, filename)
assert.NoError(t, err, "access "+description)
@@ -678,7 +695,7 @@ func testMetadataInput(t *testing.T, f *Fs) {
// Test that chunker refuses to change on objects with future/unknown metadata
func testFutureProof(t *testing.T, f *Fs) {
if f.opt.MetaFormat == "none" {
if !f.useMeta {
t.Skip("this test requires metadata support")
}
@@ -844,6 +861,44 @@ func testChunkerServerSideMove(t *testing.T, f *Fs) {
_ = operations.Purge(ctx, f.base, dir)
}
// Test that md5all creates metadata even for small files
func testMD5AllSlow(t *testing.T, f *Fs) {
ctx := context.Background()
fsResult := deriveFs(ctx, t, f, "md5all", settings{
"chunk_size": "1P",
"name_format": "*.#",
"hash_type": "md5all",
"transactions": "rename",
"meta_format": "simplejson",
})
chunkFs, ok := fsResult.(*Fs)
require.True(t, ok, "fs must be a chunker remote")
baseFs := chunkFs.base
if !baseFs.Features().SlowHash {
t.Skipf("this test needs a base fs with slow hash, e.g. local")
}
assert.True(t, chunkFs.useMD5, "must use md5")
assert.True(t, chunkFs.hashAll, "must hash all files")
_ = testPutFile(ctx, t, chunkFs, "file", "-", "error", true)
obj, err := chunkFs.NewObject(ctx, "file")
require.NoError(t, err)
sum, err := obj.Hash(ctx, hash.MD5)
assert.NoError(t, err)
assert.Equal(t, "336d5ebc5436534e61d16e63ddfca327", sum)
list, err := baseFs.List(ctx, "")
require.NoError(t, err)
assert.Equal(t, 2, len(list))
_, err = baseFs.NewObject(ctx, "file")
assert.NoError(t, err, "metadata must be created")
_, err = baseFs.NewObject(ctx, "file.1")
assert.NoError(t, err, "first chunk must be created")
require.NoError(t, operations.Purge(ctx, baseFs, ""))
}
// InternalTest dispatches all internal tests
func (f *Fs) InternalTest(t *testing.T) {
t.Run("PutLarge", func(t *testing.T) {
@@ -876,6 +931,9 @@ func (f *Fs) InternalTest(t *testing.T) {
t.Run("ChunkerServerSideMove", func(t *testing.T) {
testChunkerServerSideMove(t, f)
})
t.Run("MD5AllSlow", func(t *testing.T) {
testMD5AllSlow(t, f)
})
}
var _ fstests.InternalTester = (*Fs)(nil)

View File

@@ -10,6 +10,7 @@ import (
"encoding/binary"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
@@ -21,7 +22,6 @@ import (
"github.com/buengese/sgzip"
"github.com/gabriel-vasile/mimetype"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/chunkedreader"
@@ -36,7 +36,7 @@ import (
// Globals
const (
initialChunkSize = 262144 // Initial and max sizes of chunks when reading parts of the file. Currently
maxChunkSize = 8388608 // at 256KB and 8 MB.
maxChunkSize = 8388608 // at 256 KiB and 8 MiB.
bufferSize = 8388608
heuristicBytes = 1048576
@@ -83,23 +83,23 @@ func init() {
Name: "level",
Help: `GZIP compression level (-2 to 9).
Generally -1 (default, equivalent to 5) is recommended.
Levels 1 to 9 increase compressiong at the cost of speed.. Going past 6
generally offers very little return.
Level -2 uses Huffmann encoding only. Only use if you now what you
are doing
Level 0 turns off compression.`,
Generally -1 (default, equivalent to 5) is recommended.
Levels 1 to 9 increase compression at the cost of speed. Going past 6
generally offers very little return.
Level -2 uses Huffmann encoding only. Only use if you know what you
are doing.
Level 0 turns off compression.`,
Default: sgzip.DefaultCompression,
Advanced: true,
}, {
Name: "ram_cache_limit",
Help: `Some remotes don't allow the upload of files with unknown size.
In this case the compressed file will need to be cached to determine
it's size.
Files smaller than this limit will be cached in RAM, file larger than
this limit will be cached on disk`,
In this case the compressed file will need to be cached to determine
it's size.
Files smaller than this limit will be cached in RAM, files larger than
this limit will be cached on disk.`,
Default: fs.SizeSuffix(20 * 1024 * 1024),
Advanced: true,
}},
@@ -143,7 +143,7 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
wInfo, wName, wPath, wConfig, err := fs.ConfigFs(remote)
if err != nil {
return nil, errors.Wrapf(err, "failed to parse remote %q to wrap", remote)
return nil, fmt.Errorf("failed to parse remote %q to wrap: %w", remote, err)
}
// Strip trailing slashes if they exist in rpath
@@ -158,7 +158,7 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
wrappedFs, err = wInfo.NewFs(ctx, wName, remotePath, wConfig)
}
if err != nil && err != fs.ErrorIsFile {
return nil, errors.Wrapf(err, "failed to make remote %s:%q to wrap", wName, remotePath)
return nil, fmt.Errorf("failed to make remote %s:%q to wrap: %w", wName, remotePath, err)
}
// Create the wrapping fs
@@ -304,7 +304,7 @@ func (f *Fs) processEntries(entries fs.DirEntries) (newEntries fs.DirEntries, er
case fs.Directory:
f.addDir(&newEntries, x)
default:
return nil, errors.Errorf("Unknown object type %T", entry)
return nil, fmt.Errorf("Unknown object type %T", entry)
}
}
return newEntries, nil
@@ -401,6 +401,10 @@ func isCompressible(r io.Reader) (bool, error) {
if err != nil {
return false, err
}
err = w.Close()
if err != nil {
return false, err
}
ratio := float64(n) / float64(b.Len())
return ratio > minCompressionRatio, nil
}
@@ -410,7 +414,7 @@ func (f *Fs) verifyObjectHash(ctx context.Context, o fs.Object, hasher *hash.Mul
srcHash := hasher.Sums()[ht]
dstHash, err := o.Hash(ctx, ht)
if err != nil {
return errors.Wrap(err, "failed to read destination hash")
return fmt.Errorf("failed to read destination hash: %w", err)
}
if srcHash != "" && dstHash != "" && srcHash != dstHash {
// remove object
@@ -418,7 +422,7 @@ func (f *Fs) verifyObjectHash(ctx context.Context, o fs.Object, hasher *hash.Mul
if err != nil {
fs.Errorf(o, "Failed to remove corrupted object: %v", err)
}
return errors.Errorf("corrupted on transfer: %v compressed hashes differ %q vs %q", ht, srcHash, dstHash)
return fmt.Errorf("corrupted on transfer: %v compressed hashes differ %q vs %q", ht, srcHash, dstHash)
}
return nil
}
@@ -462,10 +466,10 @@ func (f *Fs) rcat(ctx context.Context, dstFileName string, in io.ReadCloser, mod
_ = os.Remove(tempFile.Name())
}()
if err != nil {
return nil, errors.Wrap(err, "Failed to create temporary local FS to spool file")
return nil, fmt.Errorf("Failed to create temporary local FS to spool file: %w", err)
}
if _, err = io.Copy(tempFile, in); err != nil {
return nil, errors.Wrap(err, "Failed to write temporary local file")
return nil, fmt.Errorf("Failed to write temporary local file: %w", err)
}
if _, err = tempFile.Seek(0, 0); err != nil {
return nil, err
@@ -626,9 +630,11 @@ func (f *Fs) putMetadata(ctx context.Context, meta *ObjectMetadata, src fs.Objec
// Put the data
mo, err = put(ctx, metaReader, f.wrapInfo(src, makeMetadataName(src.Remote()), int64(len(data))), options...)
if err != nil {
removeErr := mo.Remove(ctx)
if removeErr != nil {
fs.Errorf(mo, "Failed to remove partially transferred object: %v", err)
if mo != nil {
removeErr := mo.Remove(ctx)
if removeErr != nil {
fs.Errorf(mo, "Failed to remove partially transferred object: %v", err)
}
}
return nil, err
}
@@ -714,7 +720,7 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
if found && (oldObj.(*Object).meta.Mode != Uncompressed || compressible) {
err = oldObj.(*Object).Object.Remove(ctx)
if err != nil {
return nil, errors.Wrap(err, "Could remove original object")
return nil, fmt.Errorf("Could remove original object: %w", err)
}
}
@@ -723,7 +729,7 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
if compressible {
wrapObj, err := operations.Move(ctx, f.Fs, nil, f.dataName(src.Remote(), newObj.size, compressible), newObj.Object)
if err != nil {
return nil, errors.Wrap(err, "Couldn't rename streamed Object.")
return nil, fmt.Errorf("Couldn't rename streamed Object.: %w", err)
}
newObj.Object = wrapObj
}
@@ -1260,7 +1266,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.Read
return o.Object.Open(ctx, options...)
}
// Get offset and limit from OpenOptions, pass the rest to the underlying remote
var openOptions []fs.OpenOption = []fs.OpenOption{&fs.SeekOption{Offset: 0}}
var openOptions = []fs.OpenOption{&fs.SeekOption{Offset: 0}}
var offset, limit int64 = 0, -1
for _, option := range options {
switch x := option.(type) {

View File

@@ -7,6 +7,8 @@ import (
gocipher "crypto/cipher"
"crypto/rand"
"encoding/base32"
"encoding/base64"
"errors"
"fmt"
"io"
"strconv"
@@ -15,7 +17,7 @@ import (
"time"
"unicode/utf8"
"github.com/pkg/errors"
"github.com/Max-Sum/base32768"
"github.com/rclone/rclone/backend/crypt/pkcs7"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
@@ -94,12 +96,12 @@ func NewNameEncryptionMode(s string) (mode NameEncryptionMode, err error) {
case "obfuscate":
mode = NameEncryptionObfuscated
default:
err = errors.Errorf("Unknown file name encryption mode %q", s)
err = fmt.Errorf("Unknown file name encryption mode %q", s)
}
return mode, err
}
// String turns mode into a human readable string
// String turns mode into a human-readable string
func (mode NameEncryptionMode) String() (out string) {
switch mode {
case NameEncryptionOff:
@@ -114,6 +116,57 @@ func (mode NameEncryptionMode) String() (out string) {
return out
}
// fileNameEncoding are the encoding methods dealing with encrypted file names
type fileNameEncoding interface {
EncodeToString(src []byte) string
DecodeString(s string) ([]byte, error)
}
// caseInsensitiveBase32Encoding defines a file name encoding
// using a modified version of standard base32 as described in
// RFC4648
//
// The standard encoding is modified in two ways
// * it becomes lower case (no-one likes upper case filenames!)
// * we strip the padding character `=`
type caseInsensitiveBase32Encoding struct{}
// EncodeToString encodes a strign using the modified version of
// base32 encoding.
func (caseInsensitiveBase32Encoding) EncodeToString(src []byte) string {
encoded := base32.HexEncoding.EncodeToString(src)
encoded = strings.TrimRight(encoded, "=")
return strings.ToLower(encoded)
}
// DecodeString decodes a string as encoded by EncodeToString
func (caseInsensitiveBase32Encoding) DecodeString(s string) ([]byte, error) {
if strings.HasSuffix(s, "=") {
return nil, ErrorBadBase32Encoding
}
// First figure out how many padding characters to add
roundUpToMultipleOf8 := (len(s) + 7) &^ 7
equals := roundUpToMultipleOf8 - len(s)
s = strings.ToUpper(s) + "========"[:equals]
return base32.HexEncoding.DecodeString(s)
}
// NewNameEncoding creates a NameEncoding from a string
func NewNameEncoding(s string) (enc fileNameEncoding, err error) {
s = strings.ToLower(s)
switch s {
case "base32":
enc = caseInsensitiveBase32Encoding{}
case "base64":
enc = base64.RawURLEncoding
case "base32768":
enc = base32768.SafeEncoding
default:
err = fmt.Errorf("Unknown file name encoding mode %q", s)
}
return enc, err
}
// Cipher defines an encoding and decoding cipher for the crypt backend
type Cipher struct {
dataKey [32]byte // Key for secretbox
@@ -121,15 +174,17 @@ type Cipher struct {
nameTweak [nameCipherBlockSize]byte // used to tweak the name crypto
block gocipher.Block
mode NameEncryptionMode
fileNameEnc fileNameEncoding
buffers sync.Pool // encrypt/decrypt buffers
cryptoRand io.Reader // read crypto random numbers from here
dirNameEncrypt bool
}
// newCipher initialises the cipher. If salt is "" then it uses a built in salt val
func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bool) (*Cipher, error) {
func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bool, enc fileNameEncoding) (*Cipher, error) {
c := &Cipher{
mode: mode,
fileNameEnc: enc,
cryptoRand: rand.Reader,
dirNameEncrypt: dirNameEncrypt,
}
@@ -187,30 +242,6 @@ func (c *Cipher) putBlock(buf []byte) {
c.buffers.Put(buf)
}
// encodeFileName encodes a filename using a modified version of
// standard base32 as described in RFC4648
//
// The standard encoding is modified in two ways
// * it becomes lower case (no-one likes upper case filenames!)
// * we strip the padding character `=`
func encodeFileName(in []byte) string {
encoded := base32.HexEncoding.EncodeToString(in)
encoded = strings.TrimRight(encoded, "=")
return strings.ToLower(encoded)
}
// decodeFileName decodes a filename as encoded by encodeFileName
func decodeFileName(in string) ([]byte, error) {
if strings.HasSuffix(in, "=") {
return nil, ErrorBadBase32Encoding
}
// First figure out how many padding characters to add
roundUpToMultipleOf8 := (len(in) + 7) &^ 7
equals := roundUpToMultipleOf8 - len(in)
in = strings.ToUpper(in) + "========"[:equals]
return base32.HexEncoding.DecodeString(in)
}
// encryptSegment encrypts a path segment
//
// This uses EME with AES
@@ -231,7 +262,7 @@ func (c *Cipher) encryptSegment(plaintext string) string {
}
paddedPlaintext := pkcs7.Pad(nameCipherBlockSize, []byte(plaintext))
ciphertext := eme.Transform(c.block, c.nameTweak[:], paddedPlaintext, eme.DirectionEncrypt)
return encodeFileName(ciphertext)
return c.fileNameEnc.EncodeToString(ciphertext)
}
// decryptSegment decrypts a path segment
@@ -239,7 +270,7 @@ func (c *Cipher) decryptSegment(ciphertext string) (string, error) {
if ciphertext == "" {
return "", nil
}
rawCiphertext, err := decodeFileName(ciphertext)
rawCiphertext, err := c.fileNameEnc.DecodeString(ciphertext)
if err != nil {
return "", err
}
@@ -580,7 +611,7 @@ func (n *nonce) pointer() *[fileNonceSize]byte {
func (n *nonce) fromReader(in io.Reader) error {
read, err := io.ReadFull(in, (*n)[:])
if read != fileNonceSize {
return errors.Wrap(err, "short read of nonce")
return fmt.Errorf("short read of nonce: %w", err)
}
return nil
}
@@ -956,7 +987,7 @@ func (fh *decrypter) RangeSeek(ctx context.Context, offset int64, whence int, li
// Re-open the underlying object with the offset given
rc, err := fh.open(ctx, underlyingOffset, underlyingLimit)
if err != nil {
return 0, fh.finish(errors.Wrap(err, "couldn't reopen file with offset and limit"))
return 0, fh.finish(fmt.Errorf("couldn't reopen file with offset and limit: %w", err))
}
// Set the file handle

View File

@@ -4,13 +4,15 @@ import (
"bytes"
"context"
"encoding/base32"
"encoding/base64"
"errors"
"fmt"
"io"
"io/ioutil"
"strings"
"testing"
"github.com/pkg/errors"
"github.com/Max-Sum/base32768"
"github.com/rclone/rclone/backend/crypt/pkcs7"
"github.com/rclone/rclone/lib/readers"
"github.com/stretchr/testify/assert"
@@ -45,11 +47,31 @@ func TestNewNameEncryptionModeString(t *testing.T) {
assert.Equal(t, NameEncryptionMode(3).String(), "Unknown mode #3")
}
func TestEncodeFileName(t *testing.T) {
for _, test := range []struct {
in string
expected string
}{
type EncodingTestCase struct {
in string
expected string
}
func testEncodeFileName(t *testing.T, encoding string, testCases []EncodingTestCase, caseInsensitive bool) {
for _, test := range testCases {
enc, err := NewNameEncoding(encoding)
assert.NoError(t, err, "There should be no error creating name encoder for base32.")
actual := enc.EncodeToString([]byte(test.in))
assert.Equal(t, actual, test.expected, fmt.Sprintf("in=%q", test.in))
recovered, err := enc.DecodeString(test.expected)
assert.NoError(t, err)
assert.Equal(t, string(recovered), test.in, fmt.Sprintf("reverse=%q", test.expected))
if caseInsensitive {
in := strings.ToUpper(test.expected)
recovered, err = enc.DecodeString(in)
assert.NoError(t, err)
assert.Equal(t, string(recovered), test.in, fmt.Sprintf("reverse=%q", in))
}
}
}
func TestEncodeFileNameBase32(t *testing.T) {
testEncodeFileName(t, "base32", []EncodingTestCase{
{"", ""},
{"1", "64"},
{"12", "64p0"},
@@ -67,20 +89,56 @@ func TestEncodeFileName(t *testing.T) {
{"12345678901234", "64p36d1l6orjge9g64p36d0"},
{"123456789012345", "64p36d1l6orjge9g64p36d1l"},
{"1234567890123456", "64p36d1l6orjge9g64p36d1l6o"},
} {
actual := encodeFileName([]byte(test.in))
assert.Equal(t, actual, test.expected, fmt.Sprintf("in=%q", test.in))
recovered, err := decodeFileName(test.expected)
assert.NoError(t, err)
assert.Equal(t, string(recovered), test.in, fmt.Sprintf("reverse=%q", test.expected))
in := strings.ToUpper(test.expected)
recovered, err = decodeFileName(in)
assert.NoError(t, err)
assert.Equal(t, string(recovered), test.in, fmt.Sprintf("reverse=%q", in))
}
}, true)
}
func TestDecodeFileName(t *testing.T) {
func TestEncodeFileNameBase64(t *testing.T) {
testEncodeFileName(t, "base64", []EncodingTestCase{
{"", ""},
{"1", "MQ"},
{"12", "MTI"},
{"123", "MTIz"},
{"1234", "MTIzNA"},
{"12345", "MTIzNDU"},
{"123456", "MTIzNDU2"},
{"1234567", "MTIzNDU2Nw"},
{"12345678", "MTIzNDU2Nzg"},
{"123456789", "MTIzNDU2Nzg5"},
{"1234567890", "MTIzNDU2Nzg5MA"},
{"12345678901", "MTIzNDU2Nzg5MDE"},
{"123456789012", "MTIzNDU2Nzg5MDEy"},
{"1234567890123", "MTIzNDU2Nzg5MDEyMw"},
{"12345678901234", "MTIzNDU2Nzg5MDEyMzQ"},
{"123456789012345", "MTIzNDU2Nzg5MDEyMzQ1"},
{"1234567890123456", "MTIzNDU2Nzg5MDEyMzQ1Ng"},
}, false)
}
func TestEncodeFileNameBase32768(t *testing.T) {
testEncodeFileName(t, "base32768", []EncodingTestCase{
{"", ""},
{"1", "㼿"},
{"12", "㻙ɟ"},
{"123", "㻙ⲿ"},
{"1234", "㻙ⲍƟ"},
{"12345", "㻙ⲍ⍟"},
{"123456", "㻙ⲍ⍆ʏ"},
{"1234567", "㻙ⲍ⍆觟"},
{"12345678", "㻙ⲍ⍆觓ɧ"},
{"123456789", "㻙ⲍ⍆觓栯"},
{"1234567890", "㻙ⲍ⍆觓栩ɣ"},
{"12345678901", "㻙ⲍ⍆觓栩朧"},
{"123456789012", "㻙ⲍ⍆觓栩朤ʅ"},
{"1234567890123", "㻙ⲍ⍆觓栩朤談"},
{"12345678901234", "㻙ⲍ⍆觓栩朤諆ɔ"},
{"123456789012345", "㻙ⲍ⍆觓栩朤諆媕"},
{"1234567890123456", "㻙ⲍ⍆觓栩朤諆媕䆿"},
}, false)
}
func TestDecodeFileNameBase32(t *testing.T) {
enc, err := NewNameEncoding("base32")
assert.NoError(t, err, "There should be no error creating name encoder for base32.")
// We've tested decoding the valid ones above, now concentrate on the invalid ones
for _, test := range []struct {
in string
@@ -90,17 +148,65 @@ func TestDecodeFileName(t *testing.T) {
{"!", base32.CorruptInputError(0)},
{"hello=hello", base32.CorruptInputError(5)},
} {
actual, actualErr := decodeFileName(test.in)
actual, actualErr := enc.DecodeString(test.in)
assert.Equal(t, test.expectedErr, actualErr, fmt.Sprintf("in=%q got actual=%q, err = %v %T", test.in, actual, actualErr, actualErr))
}
}
func TestEncryptSegment(t *testing.T) {
c, _ := newCipher(NameEncryptionStandard, "", "", true)
func TestDecodeFileNameBase64(t *testing.T) {
enc, err := NewNameEncoding("base64")
assert.NoError(t, err, "There should be no error creating name encoder for base32.")
// We've tested decoding the valid ones above, now concentrate on the invalid ones
for _, test := range []struct {
in string
expected string
in string
expectedErr error
}{
{"64=", base64.CorruptInputError(2)},
{"!", base64.CorruptInputError(0)},
{"Hello=Hello", base64.CorruptInputError(5)},
} {
actual, actualErr := enc.DecodeString(test.in)
assert.Equal(t, test.expectedErr, actualErr, fmt.Sprintf("in=%q got actual=%q, err = %v %T", test.in, actual, actualErr, actualErr))
}
}
func TestDecodeFileNameBase32768(t *testing.T) {
enc, err := NewNameEncoding("base32768")
assert.NoError(t, err, "There should be no error creating name encoder for base32.")
// We've tested decoding the valid ones above, now concentrate on the invalid ones
for _, test := range []struct {
in string
expectedErr error
}{
{"㼿c", base32768.CorruptInputError(1)},
{"!", base32768.CorruptInputError(0)},
{"㻙ⲿ=㻙ⲿ", base32768.CorruptInputError(2)},
} {
actual, actualErr := enc.DecodeString(test.in)
assert.Equal(t, test.expectedErr, actualErr, fmt.Sprintf("in=%q got actual=%q, err = %v %T", test.in, actual, actualErr, actualErr))
}
}
func testEncryptSegment(t *testing.T, encoding string, testCases []EncodingTestCase, caseInsensitive bool) {
enc, _ := NewNameEncoding(encoding)
c, _ := newCipher(NameEncryptionStandard, "", "", true, enc)
for _, test := range testCases {
actual := c.encryptSegment(test.in)
assert.Equal(t, test.expected, actual, fmt.Sprintf("Testing %q", test.in))
recovered, err := c.decryptSegment(test.expected)
assert.NoError(t, err, fmt.Sprintf("Testing reverse %q", test.expected))
assert.Equal(t, test.in, recovered, fmt.Sprintf("Testing reverse %q", test.expected))
if caseInsensitive {
in := strings.ToUpper(test.expected)
recovered, err = c.decryptSegment(in)
assert.NoError(t, err, fmt.Sprintf("Testing reverse %q", in))
assert.Equal(t, test.in, recovered, fmt.Sprintf("Testing reverse %q", in))
}
}
}
func TestEncryptSegmentBase32(t *testing.T) {
testEncryptSegment(t, "base32", []EncodingTestCase{
{"", ""},
{"1", "p0e52nreeaj0a5ea7s64m4j72s"},
{"12", "l42g6771hnv3an9cgc8cr2n1ng"},
@@ -118,26 +224,61 @@ func TestEncryptSegment(t *testing.T) {
{"12345678901234", "moq0uqdlqrblrc5pa5u5c7hq9g"},
{"123456789012345", "eeam3li4rnommi3a762h5n7meg"},
{"1234567890123456", "mijbj0frqf6ms7frcr6bd9h0env53jv96pjaaoirk7forcgpt70g"},
} {
actual := c.encryptSegment(test.in)
assert.Equal(t, test.expected, actual, fmt.Sprintf("Testing %q", test.in))
recovered, err := c.decryptSegment(test.expected)
assert.NoError(t, err, fmt.Sprintf("Testing reverse %q", test.expected))
assert.Equal(t, test.in, recovered, fmt.Sprintf("Testing reverse %q", test.expected))
in := strings.ToUpper(test.expected)
recovered, err = c.decryptSegment(in)
assert.NoError(t, err, fmt.Sprintf("Testing reverse %q", in))
assert.Equal(t, test.in, recovered, fmt.Sprintf("Testing reverse %q", in))
}
}, true)
}
func TestDecryptSegment(t *testing.T) {
func TestEncryptSegmentBase64(t *testing.T) {
testEncryptSegment(t, "base64", []EncodingTestCase{
{"", ""},
{"1", "yBxRX25ypgUVyj8MSxJnFw"},
{"12", "qQUDHOGN_jVdLIMQzYrhvA"},
{"123", "1CxFf2Mti1xIPYlGruDh-A"},
{"1234", "RL-xOTmsxsG7kuTy2XJUxw"},
{"12345", "3FP_GHoeBJdq0yLgaED8IQ"},
{"123456", "Xc4T1Gqrs3OVYnrE6dpEWQ"},
{"1234567", "uZeEzssOnDWHEOzLqjwpog"},
{"12345678", "8noiTP5WkkbEuijsPhOpxQ"},
{"123456789", "GeNxgLA0wiaGAKU3U7qL4Q"},
{"1234567890", "x1DUhdmqoVWYVBLD3dha-A"},
{"12345678901", "iEyP_3BZR6vvv_2WM6NbZw"},
{"123456789012", "4OPGvS4SZdjvS568APUaFw"},
{"1234567890123", "Y8c5Wr8OhYYUo7fPwdojdg"},
{"12345678901234", "tjQPabXW112wuVF8Vh46TA"},
{"123456789012345", "c5Vh1kTd8WtIajmFEtz2dA"},
{"1234567890123456", "tKa5gfvTzW4d-2bMtqYgdf5Rz-k2ZqViW6HfjbIZ6cE"},
}, false)
}
func TestEncryptSegmentBase32768(t *testing.T) {
testEncryptSegment(t, "base32768", []EncodingTestCase{
{"", ""},
{"1", "詮㪗鐮僀伎作㻖㢧⪟"},
{"12", "竢朧䉱虃光塬䟛⣡蓟"},
{"123", "遶㞟鋅缕袡鲅ⵝ蝁ꌟ"},
{"1234", "䢟銮䵵狌㐜燳谒颴詟"},
{"12345", "钉Ꞇ㖃蚩憶狫朰杜㜿"},
{"123456", "啇ᚵⵕ憗䋫➫➓肤卟"},
{"1234567", "茫螓翁連劘樓㶔抉矟"},
{"12345678", "龝☳䘊辄岅較络㧩襟"},
{"123456789", "ⲱ苀㱆犂媐Ꮤ锇惫靟"},
{"1234567890", "計宁憕偵匢皫╛纺ꌟ"},
{"12345678901", "檆䨿鑫㪺藝ꡖ勇䦛婟"},
{"123456789012", "雑頏䰂䲝淚哚鹡魺⪟"},
{"1234567890123", "塃璶繁躸圅㔟䗃肃懟"},
{"12345678901234", "腺ᕚ崚鏕鏥讥鼌䑺䲿"},
{"123456789012345", "怪绕滻蕶肣但⠥荖惟"},
{"1234567890123456", "肳哀旚挶靏鏻㾭䱠慟㪳ꏆ賊兲铧敻塹魀ʟ"},
}, false)
}
func TestDecryptSegmentBase32(t *testing.T) {
// We've tested the forwards above, now concentrate on the errors
longName := make([]byte, 3328)
for i := range longName {
longName[i] = 'a'
}
c, _ := newCipher(NameEncryptionStandard, "", "", true)
enc, _ := NewNameEncoding("base32")
c, _ := newCipher(NameEncryptionStandard, "", "", true, enc)
for _, test := range []struct {
in string
expectedErr error
@@ -145,118 +286,371 @@ func TestDecryptSegment(t *testing.T) {
{"64=", ErrorBadBase32Encoding},
{"!", base32.CorruptInputError(0)},
{string(longName), ErrorTooLongAfterDecode},
{encodeFileName([]byte("a")), ErrorNotAMultipleOfBlocksize},
{encodeFileName([]byte("123456789abcdef")), ErrorNotAMultipleOfBlocksize},
{encodeFileName([]byte("123456789abcdef0")), pkcs7.ErrorPaddingTooLong},
{enc.EncodeToString([]byte("a")), ErrorNotAMultipleOfBlocksize},
{enc.EncodeToString([]byte("123456789abcdef")), ErrorNotAMultipleOfBlocksize},
{enc.EncodeToString([]byte("123456789abcdef0")), pkcs7.ErrorPaddingTooLong},
} {
actual, actualErr := c.decryptSegment(test.in)
assert.Equal(t, test.expectedErr, actualErr, fmt.Sprintf("in=%q got actual=%q, err = %v %T", test.in, actual, actualErr, actualErr))
}
}
func TestEncryptFileName(t *testing.T) {
func TestDecryptSegmentBase64(t *testing.T) {
// We've tested the forwards above, now concentrate on the errors
longName := make([]byte, 2816)
for i := range longName {
longName[i] = 'a'
}
enc, _ := NewNameEncoding("base64")
c, _ := newCipher(NameEncryptionStandard, "", "", true, enc)
for _, test := range []struct {
in string
expectedErr error
}{
{"6H=", base64.CorruptInputError(2)},
{"!", base64.CorruptInputError(0)},
{string(longName), ErrorTooLongAfterDecode},
{enc.EncodeToString([]byte("a")), ErrorNotAMultipleOfBlocksize},
{enc.EncodeToString([]byte("123456789abcdef")), ErrorNotAMultipleOfBlocksize},
{enc.EncodeToString([]byte("123456789abcdef0")), pkcs7.ErrorPaddingTooLong},
} {
actual, actualErr := c.decryptSegment(test.in)
assert.Equal(t, test.expectedErr, actualErr, fmt.Sprintf("in=%q got actual=%q, err = %v %T", test.in, actual, actualErr, actualErr))
}
}
func TestDecryptSegmentBase32768(t *testing.T) {
// We've tested the forwards above, now concentrate on the errors
longName := strings.Repeat("怪", 1280)
enc, _ := NewNameEncoding("base32768")
c, _ := newCipher(NameEncryptionStandard, "", "", true, enc)
for _, test := range []struct {
in string
expectedErr error
}{
{"怪=", base32768.CorruptInputError(1)},
{"!", base32768.CorruptInputError(0)},
{longName, ErrorTooLongAfterDecode},
{enc.EncodeToString([]byte("a")), ErrorNotAMultipleOfBlocksize},
{enc.EncodeToString([]byte("123456789abcdef")), ErrorNotAMultipleOfBlocksize},
{enc.EncodeToString([]byte("123456789abcdef0")), pkcs7.ErrorPaddingTooLong},
} {
actual, actualErr := c.decryptSegment(test.in)
assert.Equal(t, test.expectedErr, actualErr, fmt.Sprintf("in=%q got actual=%q, err = %v %T", test.in, actual, actualErr, actualErr))
}
}
func testStandardEncryptFileName(t *testing.T, encoding string, testCasesEncryptDir []EncodingTestCase, testCasesNoEncryptDir []EncodingTestCase) {
// First standard mode
c, _ := newCipher(NameEncryptionStandard, "", "", true)
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s", c.EncryptFileName("1"))
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng", c.EncryptFileName("1/12"))
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng/qgm4avr35m5loi1th53ato71v0", c.EncryptFileName("1/12/123"))
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s-v2001-02-03-040506-123", c.EncryptFileName("1-v2001-02-03-040506-123"))
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng-v2001-02-03-040506-123", c.EncryptFileName("1/12-v2001-02-03-040506-123"))
enc, _ := NewNameEncoding(encoding)
c, _ := newCipher(NameEncryptionStandard, "", "", true, enc)
for _, test := range testCasesEncryptDir {
assert.Equal(t, test.expected, c.EncryptFileName(test.in))
}
// Standard mode with directory name encryption off
c, _ = newCipher(NameEncryptionStandard, "", "", false)
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s", c.EncryptFileName("1"))
assert.Equal(t, "1/l42g6771hnv3an9cgc8cr2n1ng", c.EncryptFileName("1/12"))
assert.Equal(t, "1/12/qgm4avr35m5loi1th53ato71v0", c.EncryptFileName("1/12/123"))
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s-v2001-02-03-040506-123", c.EncryptFileName("1-v2001-02-03-040506-123"))
assert.Equal(t, "1/l42g6771hnv3an9cgc8cr2n1ng-v2001-02-03-040506-123", c.EncryptFileName("1/12-v2001-02-03-040506-123"))
// Now off mode
c, _ = newCipher(NameEncryptionOff, "", "", true)
c, _ = newCipher(NameEncryptionStandard, "", "", false, enc)
for _, test := range testCasesNoEncryptDir {
assert.Equal(t, test.expected, c.EncryptFileName(test.in))
}
}
func TestStandardEncryptFileNameBase32(t *testing.T) {
testStandardEncryptFileName(t, "base32", []EncodingTestCase{
{"1", "p0e52nreeaj0a5ea7s64m4j72s"},
{"1/12", "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng"},
{"1/12/123", "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng/qgm4avr35m5loi1th53ato71v0"},
{"1-v2001-02-03-040506-123", "p0e52nreeaj0a5ea7s64m4j72s-v2001-02-03-040506-123"},
{"1/12-v2001-02-03-040506-123", "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng-v2001-02-03-040506-123"},
}, []EncodingTestCase{
{"1", "p0e52nreeaj0a5ea7s64m4j72s"},
{"1/12", "1/l42g6771hnv3an9cgc8cr2n1ng"},
{"1/12/123", "1/12/qgm4avr35m5loi1th53ato71v0"},
{"1-v2001-02-03-040506-123", "p0e52nreeaj0a5ea7s64m4j72s-v2001-02-03-040506-123"},
{"1/12-v2001-02-03-040506-123", "1/l42g6771hnv3an9cgc8cr2n1ng-v2001-02-03-040506-123"},
})
}
func TestStandardEncryptFileNameBase64(t *testing.T) {
testStandardEncryptFileName(t, "base64", []EncodingTestCase{
{"1", "yBxRX25ypgUVyj8MSxJnFw"},
{"1/12", "yBxRX25ypgUVyj8MSxJnFw/qQUDHOGN_jVdLIMQzYrhvA"},
{"1/12/123", "yBxRX25ypgUVyj8MSxJnFw/qQUDHOGN_jVdLIMQzYrhvA/1CxFf2Mti1xIPYlGruDh-A"},
{"1-v2001-02-03-040506-123", "yBxRX25ypgUVyj8MSxJnFw-v2001-02-03-040506-123"},
{"1/12-v2001-02-03-040506-123", "yBxRX25ypgUVyj8MSxJnFw/qQUDHOGN_jVdLIMQzYrhvA-v2001-02-03-040506-123"},
}, []EncodingTestCase{
{"1", "yBxRX25ypgUVyj8MSxJnFw"},
{"1/12", "1/qQUDHOGN_jVdLIMQzYrhvA"},
{"1/12/123", "1/12/1CxFf2Mti1xIPYlGruDh-A"},
{"1-v2001-02-03-040506-123", "yBxRX25ypgUVyj8MSxJnFw-v2001-02-03-040506-123"},
{"1/12-v2001-02-03-040506-123", "1/qQUDHOGN_jVdLIMQzYrhvA-v2001-02-03-040506-123"},
})
}
func TestStandardEncryptFileNameBase32768(t *testing.T) {
testStandardEncryptFileName(t, "base32768", []EncodingTestCase{
{"1", "詮㪗鐮僀伎作㻖㢧⪟"},
{"1/12", "詮㪗鐮僀伎作㻖㢧⪟/竢朧䉱虃光塬䟛⣡蓟"},
{"1/12/123", "詮㪗鐮僀伎作㻖㢧⪟/竢朧䉱虃光塬䟛⣡蓟/遶㞟鋅缕袡鲅ⵝ蝁ꌟ"},
{"1-v2001-02-03-040506-123", "詮㪗鐮僀伎作㻖㢧⪟-v2001-02-03-040506-123"},
{"1/12-v2001-02-03-040506-123", "詮㪗鐮僀伎作㻖㢧⪟/竢朧䉱虃光塬䟛⣡蓟-v2001-02-03-040506-123"},
}, []EncodingTestCase{
{"1", "詮㪗鐮僀伎作㻖㢧⪟"},
{"1/12", "1/竢朧䉱虃光塬䟛⣡蓟"},
{"1/12/123", "1/12/遶㞟鋅缕袡鲅ⵝ蝁ꌟ"},
{"1-v2001-02-03-040506-123", "詮㪗鐮僀伎作㻖㢧⪟-v2001-02-03-040506-123"},
{"1/12-v2001-02-03-040506-123", "1/竢朧䉱虃光塬䟛⣡蓟-v2001-02-03-040506-123"},
})
}
func TestNonStandardEncryptFileName(t *testing.T) {
// Off mode
c, _ := newCipher(NameEncryptionOff, "", "", true, nil)
assert.Equal(t, "1/12/123.bin", c.EncryptFileName("1/12/123"))
// Obfuscation mode
c, _ = newCipher(NameEncryptionObfuscated, "", "", true)
c, _ = newCipher(NameEncryptionObfuscated, "", "", true, nil)
assert.Equal(t, "49.6/99.23/150.890/53.!!lipps", c.EncryptFileName("1/12/123/!hello"))
assert.Equal(t, "49.6/99.23/150.890/53-v2001-02-03-040506-123.!!lipps", c.EncryptFileName("1/12/123/!hello-v2001-02-03-040506-123"))
assert.Equal(t, "49.6/99.23/150.890/162.uryyB-v2001-02-03-040506-123.GKG", c.EncryptFileName("1/12/123/hello-v2001-02-03-040506-123.txt"))
assert.Equal(t, "161.\u00e4", c.EncryptFileName("\u00a1"))
assert.Equal(t, "160.\u03c2", c.EncryptFileName("\u03a0"))
// Obfuscation mode with directory name encryption off
c, _ = newCipher(NameEncryptionObfuscated, "", "", false)
c, _ = newCipher(NameEncryptionObfuscated, "", "", false, nil)
assert.Equal(t, "1/12/123/53.!!lipps", c.EncryptFileName("1/12/123/!hello"))
assert.Equal(t, "1/12/123/53-v2001-02-03-040506-123.!!lipps", c.EncryptFileName("1/12/123/!hello-v2001-02-03-040506-123"))
assert.Equal(t, "161.\u00e4", c.EncryptFileName("\u00a1"))
assert.Equal(t, "160.\u03c2", c.EncryptFileName("\u03a0"))
}
func TestDecryptFileName(t *testing.T) {
for _, test := range []struct {
mode NameEncryptionMode
dirNameEncrypt bool
in string
expected string
expectedErr error
}{
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s", "1", nil},
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng", "1/12", nil},
{NameEncryptionStandard, true, "p0e52nreeAJ0A5EA7S64M4J72S/L42G6771HNv3an9cgc8cr2n1ng", "1/12", nil},
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng/qgm4avr35m5loi1th53ato71v0", "1/12/123", nil},
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1/qgm4avr35m5loi1th53ato71v0", "", ErrorNotAMultipleOfBlocksize},
{NameEncryptionStandard, false, "1/12/qgm4avr35m5loi1th53ato71v0", "1/12/123", nil},
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s-v2001-02-03-040506-123", "1-v2001-02-03-040506-123", nil},
{NameEncryptionOff, true, "1/12/123.bin", "1/12/123", nil},
{NameEncryptionOff, true, "1/12/123.bix", "", ErrorNotAnEncryptedFile},
{NameEncryptionOff, true, ".bin", "", ErrorNotAnEncryptedFile},
{NameEncryptionOff, true, "1/12/123-v2001-02-03-040506-123.bin", "1/12/123-v2001-02-03-040506-123", nil},
{NameEncryptionOff, true, "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.bin", "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123", nil},
{NameEncryptionOff, true, "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.txt.bin", "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.txt", nil},
{NameEncryptionObfuscated, true, "!.hello", "hello", nil},
{NameEncryptionObfuscated, true, "hello", "", ErrorNotAnEncryptedFile},
{NameEncryptionObfuscated, true, "161.\u00e4", "\u00a1", nil},
{NameEncryptionObfuscated, true, "160.\u03c2", "\u03a0", nil},
{NameEncryptionObfuscated, false, "1/12/123/53.!!lipps", "1/12/123/!hello", nil},
{NameEncryptionObfuscated, false, "1/12/123/53-v2001-02-03-040506-123.!!lipps", "1/12/123/!hello-v2001-02-03-040506-123", nil},
} {
c, _ := newCipher(test.mode, "", "", test.dirNameEncrypt)
func testStandardDecryptFileName(t *testing.T, encoding string, testCases []EncodingTestCase, caseInsensitive bool) {
enc, _ := NewNameEncoding(encoding)
for _, test := range testCases {
// Test when dirNameEncrypt=true
c, _ := newCipher(NameEncryptionStandard, "", "", true, enc)
actual, actualErr := c.DecryptFileName(test.in)
what := fmt.Sprintf("Testing %q (mode=%v)", test.in, test.mode)
assert.Equal(t, test.expected, actual, what)
assert.Equal(t, test.expectedErr, actualErr, what)
assert.NoError(t, actualErr)
assert.Equal(t, test.expected, actual)
if caseInsensitive {
c, _ := newCipher(NameEncryptionStandard, "", "", true, enc)
actual, actualErr := c.DecryptFileName(strings.ToUpper(test.in))
assert.NoError(t, actualErr)
assert.Equal(t, test.expected, actual)
}
// Add a character should raise ErrorNotAMultipleOfBlocksize
actual, actualErr = c.DecryptFileName(enc.EncodeToString([]byte("1")) + test.in)
assert.Equal(t, ErrorNotAMultipleOfBlocksize, actualErr)
assert.Equal(t, "", actual)
// Test when dirNameEncrypt=false
noDirEncryptIn := test.in
if strings.LastIndex(test.expected, "/") != -1 {
noDirEncryptIn = test.expected[:strings.LastIndex(test.expected, "/")] + test.in[strings.LastIndex(test.in, "/"):]
}
c, _ = newCipher(NameEncryptionStandard, "", "", false, enc)
actual, actualErr = c.DecryptFileName(noDirEncryptIn)
assert.NoError(t, actualErr)
assert.Equal(t, test.expected, actual)
}
}
func TestStandardDecryptFileNameBase32(t *testing.T) {
testStandardDecryptFileName(t, "base32", []EncodingTestCase{
{"p0e52nreeaj0a5ea7s64m4j72s", "1"},
{"p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng", "1/12"},
{"p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng/qgm4avr35m5loi1th53ato71v0", "1/12/123"},
}, true)
}
func TestStandardDecryptFileNameBase64(t *testing.T) {
testStandardDecryptFileName(t, "base64", []EncodingTestCase{
{"yBxRX25ypgUVyj8MSxJnFw", "1"},
{"yBxRX25ypgUVyj8MSxJnFw/qQUDHOGN_jVdLIMQzYrhvA", "1/12"},
{"yBxRX25ypgUVyj8MSxJnFw/qQUDHOGN_jVdLIMQzYrhvA/1CxFf2Mti1xIPYlGruDh-A", "1/12/123"},
}, false)
}
func TestStandardDecryptFileNameBase32768(t *testing.T) {
testStandardDecryptFileName(t, "base32768", []EncodingTestCase{
{"詮㪗鐮僀伎作㻖㢧⪟", "1"},
{"詮㪗鐮僀伎作㻖㢧⪟/竢朧䉱虃光塬䟛⣡蓟", "1/12"},
{"詮㪗鐮僀伎作㻖㢧⪟/竢朧䉱虃光塬䟛⣡蓟/遶㞟鋅缕袡鲅ⵝ蝁ꌟ", "1/12/123"},
}, false)
}
func TestNonStandardDecryptFileName(t *testing.T) {
for _, encoding := range []string{"base32", "base64", "base32768"} {
enc, _ := NewNameEncoding(encoding)
for _, test := range []struct {
mode NameEncryptionMode
dirNameEncrypt bool
in string
expected string
expectedErr error
}{
{NameEncryptionOff, true, "1/12/123.bin", "1/12/123", nil},
{NameEncryptionOff, true, "1/12/123.bix", "", ErrorNotAnEncryptedFile},
{NameEncryptionOff, true, ".bin", "", ErrorNotAnEncryptedFile},
{NameEncryptionOff, true, "1/12/123-v2001-02-03-040506-123.bin", "1/12/123-v2001-02-03-040506-123", nil},
{NameEncryptionOff, true, "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.bin", "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123", nil},
{NameEncryptionOff, true, "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.txt.bin", "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.txt", nil},
{NameEncryptionObfuscated, true, "!.hello", "hello", nil},
{NameEncryptionObfuscated, true, "hello", "", ErrorNotAnEncryptedFile},
{NameEncryptionObfuscated, true, "161.\u00e4", "\u00a1", nil},
{NameEncryptionObfuscated, true, "160.\u03c2", "\u03a0", nil},
{NameEncryptionObfuscated, false, "1/12/123/53.!!lipps", "1/12/123/!hello", nil},
{NameEncryptionObfuscated, false, "1/12/123/53-v2001-02-03-040506-123.!!lipps", "1/12/123/!hello-v2001-02-03-040506-123", nil},
} {
c, _ := newCipher(test.mode, "", "", test.dirNameEncrypt, enc)
actual, actualErr := c.DecryptFileName(test.in)
what := fmt.Sprintf("Testing %q (mode=%v)", test.in, test.mode)
assert.Equal(t, test.expected, actual, what)
assert.Equal(t, test.expectedErr, actualErr, what)
}
}
}
func TestEncDecMatches(t *testing.T) {
for _, test := range []struct {
mode NameEncryptionMode
in string
}{
{NameEncryptionStandard, "1/2/3/4"},
{NameEncryptionOff, "1/2/3/4"},
{NameEncryptionObfuscated, "1/2/3/4/!hello\u03a0"},
{NameEncryptionObfuscated, "Avatar The Last Airbender"},
} {
c, _ := newCipher(test.mode, "", "", true)
out, err := c.DecryptFileName(c.EncryptFileName(test.in))
what := fmt.Sprintf("Testing %q (mode=%v)", test.in, test.mode)
assert.Equal(t, out, test.in, what)
assert.Equal(t, err, nil, what)
for _, encoding := range []string{"base32", "base64", "base32768"} {
enc, _ := NewNameEncoding(encoding)
for _, test := range []struct {
mode NameEncryptionMode
in string
}{
{NameEncryptionStandard, "1/2/3/4"},
{NameEncryptionOff, "1/2/3/4"},
{NameEncryptionObfuscated, "1/2/3/4/!hello\u03a0"},
{NameEncryptionObfuscated, "Avatar The Last Airbender"},
} {
c, _ := newCipher(test.mode, "", "", true, enc)
out, err := c.DecryptFileName(c.EncryptFileName(test.in))
what := fmt.Sprintf("Testing %q (mode=%v)", test.in, test.mode)
assert.Equal(t, out, test.in, what)
assert.Equal(t, err, nil, what)
}
}
}
func TestEncryptDirName(t *testing.T) {
func testStandardEncryptDirName(t *testing.T, encoding string, testCases []EncodingTestCase) {
enc, _ := NewNameEncoding(encoding)
c, _ := newCipher(NameEncryptionStandard, "", "", true, enc)
// First standard mode
c, _ := newCipher(NameEncryptionStandard, "", "", true)
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s", c.EncryptDirName("1"))
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng", c.EncryptDirName("1/12"))
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng/qgm4avr35m5loi1th53ato71v0", c.EncryptDirName("1/12/123"))
// Standard mode with dir name encryption off
c, _ = newCipher(NameEncryptionStandard, "", "", false)
assert.Equal(t, "1/12", c.EncryptDirName("1/12"))
assert.Equal(t, "1/12/123", c.EncryptDirName("1/12/123"))
// Now off mode
c, _ = newCipher(NameEncryptionOff, "", "", true)
assert.Equal(t, "1/12/123", c.EncryptDirName("1/12/123"))
for _, test := range testCases {
assert.Equal(t, test.expected, c.EncryptDirName(test.in))
}
}
func TestDecryptDirName(t *testing.T) {
func TestStandardEncryptDirNameBase32(t *testing.T) {
testStandardEncryptDirName(t, "base32", []EncodingTestCase{
{"1", "p0e52nreeaj0a5ea7s64m4j72s"},
{"1/12", "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng"},
{"1/12/123", "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng/qgm4avr35m5loi1th53ato71v0"},
})
}
func TestStandardEncryptDirNameBase64(t *testing.T) {
testStandardEncryptDirName(t, "base64", []EncodingTestCase{
{"1", "yBxRX25ypgUVyj8MSxJnFw"},
{"1/12", "yBxRX25ypgUVyj8MSxJnFw/qQUDHOGN_jVdLIMQzYrhvA"},
{"1/12/123", "yBxRX25ypgUVyj8MSxJnFw/qQUDHOGN_jVdLIMQzYrhvA/1CxFf2Mti1xIPYlGruDh-A"},
})
}
func TestStandardEncryptDirNameBase32768(t *testing.T) {
testStandardEncryptDirName(t, "base32768", []EncodingTestCase{
{"1", "詮㪗鐮僀伎作㻖㢧⪟"},
{"1/12", "詮㪗鐮僀伎作㻖㢧⪟/竢朧䉱虃光塬䟛⣡蓟"},
{"1/12/123", "詮㪗鐮僀伎作㻖㢧⪟/竢朧䉱虃光塬䟛⣡蓟/遶㞟鋅缕袡鲅ⵝ蝁ꌟ"},
})
}
func TestNonStandardEncryptDirName(t *testing.T) {
for _, encoding := range []string{"base32", "base64", "base32768"} {
enc, _ := NewNameEncoding(encoding)
c, _ := newCipher(NameEncryptionStandard, "", "", false, enc)
assert.Equal(t, "1/12", c.EncryptDirName("1/12"))
assert.Equal(t, "1/12/123", c.EncryptDirName("1/12/123"))
// Now off mode
c, _ = newCipher(NameEncryptionOff, "", "", true, enc)
assert.Equal(t, "1/12/123", c.EncryptDirName("1/12/123"))
}
}
func testStandardDecryptDirName(t *testing.T, encoding string, testCases []EncodingTestCase, caseInsensitive bool) {
enc, _ := NewNameEncoding(encoding)
for _, test := range testCases {
// Test dirNameEncrypt=true
c, _ := newCipher(NameEncryptionStandard, "", "", true, enc)
actual, actualErr := c.DecryptDirName(test.in)
assert.Equal(t, test.expected, actual)
assert.NoError(t, actualErr)
if caseInsensitive {
actual, actualErr := c.DecryptDirName(strings.ToUpper(test.in))
assert.Equal(t, actual, test.expected)
assert.NoError(t, actualErr)
}
actual, actualErr = c.DecryptDirName(enc.EncodeToString([]byte("1")) + test.in)
assert.Equal(t, "", actual)
assert.Equal(t, ErrorNotAMultipleOfBlocksize, actualErr)
// Test dirNameEncrypt=false
c, _ = newCipher(NameEncryptionStandard, "", "", false, enc)
actual, actualErr = c.DecryptDirName(test.in)
assert.Equal(t, test.in, actual)
assert.NoError(t, actualErr)
actual, actualErr = c.DecryptDirName(test.expected)
assert.Equal(t, test.expected, actual)
assert.NoError(t, actualErr)
// Test dirNameEncrypt=false
}
}
/*
enc, _ := NewNameEncoding(encoding)
for _, test := range []struct {
mode NameEncryptionMode
dirNameEncrypt bool
in string
expected string
expectedErr error
}{
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s", "1", nil},
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng", "1/12", nil},
{NameEncryptionStandard, true, "p0e52nreeAJ0A5EA7S64M4J72S/L42G6771HNv3an9cgc8cr2n1ng", "1/12", nil},
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng/qgm4avr35m5loi1th53ato71v0", "1/12/123", nil},
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1/qgm4avr35m5loi1th53ato71v0", "", ErrorNotAMultipleOfBlocksize},
{NameEncryptionStandard, false, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng", "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng", nil},
{NameEncryptionStandard, false, "1/12/123", "1/12/123", nil},
} {
c, _ := newCipher(test.mode, "", "", test.dirNameEncrypt, enc)
actual, actualErr := c.DecryptDirName(test.in)
what := fmt.Sprintf("Testing %q (mode=%v)", test.in, test.mode)
assert.Equal(t, test.expected, actual, what)
assert.Equal(t, test.expectedErr, actualErr, what)
}
*/
func TestStandardDecryptDirNameBase32(t *testing.T) {
testStandardDecryptDirName(t, "base32", []EncodingTestCase{
{"p0e52nreeaj0a5ea7s64m4j72s", "1"},
{"p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng", "1/12"},
{"p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng/qgm4avr35m5loi1th53ato71v0", "1/12/123"},
}, true)
}
func TestStandardDecryptDirNameBase64(t *testing.T) {
testStandardDecryptDirName(t, "base64", []EncodingTestCase{
{"yBxRX25ypgUVyj8MSxJnFw", "1"},
{"yBxRX25ypgUVyj8MSxJnFw/qQUDHOGN_jVdLIMQzYrhvA", "1/12"},
{"yBxRX25ypgUVyj8MSxJnFw/qQUDHOGN_jVdLIMQzYrhvA/1CxFf2Mti1xIPYlGruDh-A", "1/12/123"},
}, false)
}
func TestStandardDecryptDirNameBase32768(t *testing.T) {
testStandardDecryptDirName(t, "base32768", []EncodingTestCase{
{"詮㪗鐮僀伎作㻖㢧⪟", "1"},
{"詮㪗鐮僀伎作㻖㢧⪟/竢朧䉱虃光塬䟛⣡蓟", "1/12"},
{"詮㪗鐮僀伎作㻖㢧⪟/竢朧䉱虃光塬䟛⣡蓟/遶㞟鋅缕袡鲅ⵝ蝁ꌟ", "1/12/123"},
}, false)
}
func TestNonStandardDecryptDirName(t *testing.T) {
for _, test := range []struct {
mode NameEncryptionMode
dirNameEncrypt bool
@@ -264,18 +658,11 @@ func TestDecryptDirName(t *testing.T) {
expected string
expectedErr error
}{
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s", "1", nil},
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng", "1/12", nil},
{NameEncryptionStandard, true, "p0e52nreeAJ0A5EA7S64M4J72S/L42G6771HNv3an9cgc8cr2n1ng", "1/12", nil},
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng/qgm4avr35m5loi1th53ato71v0", "1/12/123", nil},
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1/qgm4avr35m5loi1th53ato71v0", "", ErrorNotAMultipleOfBlocksize},
{NameEncryptionStandard, false, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng", "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng", nil},
{NameEncryptionStandard, false, "1/12/123", "1/12/123", nil},
{NameEncryptionOff, true, "1/12/123.bin", "1/12/123.bin", nil},
{NameEncryptionOff, true, "1/12/123", "1/12/123", nil},
{NameEncryptionOff, true, ".bin", ".bin", nil},
} {
c, _ := newCipher(test.mode, "", "", test.dirNameEncrypt)
c, _ := newCipher(test.mode, "", "", test.dirNameEncrypt, nil)
actual, actualErr := c.DecryptDirName(test.in)
what := fmt.Sprintf("Testing %q (mode=%v)", test.in, test.mode)
assert.Equal(t, test.expected, actual, what)
@@ -284,7 +671,7 @@ func TestDecryptDirName(t *testing.T) {
}
func TestEncryptedSize(t *testing.T) {
c, _ := newCipher(NameEncryptionStandard, "", "", true)
c, _ := newCipher(NameEncryptionStandard, "", "", true, nil)
for _, test := range []struct {
in int64
expected int64
@@ -308,7 +695,7 @@ func TestEncryptedSize(t *testing.T) {
func TestDecryptedSize(t *testing.T) {
// Test the errors since we tested the reverse above
c, _ := newCipher(NameEncryptionStandard, "", "", true)
c, _ := newCipher(NameEncryptionStandard, "", "", true, nil)
for _, test := range []struct {
in int64
expectedErr error
@@ -637,7 +1024,7 @@ func (r *randomSource) Read(p []byte) (n int, err error) {
func (r *randomSource) Write(p []byte) (n int, err error) {
for i := range p {
if p[i] != r.next() {
return 0, errors.Errorf("Error in stream at %d", r.counter)
return 0, fmt.Errorf("Error in stream at %d", r.counter)
}
}
return len(p), nil
@@ -679,7 +1066,7 @@ func (z *zeroes) Read(p []byte) (n int, err error) {
// Test encrypt decrypt with different buffer sizes
func testEncryptDecrypt(t *testing.T, bufSize int, copySize int64) {
c, err := newCipher(NameEncryptionStandard, "", "", true)
c, err := newCipher(NameEncryptionStandard, "", "", true, nil)
assert.NoError(t, err)
c.cryptoRand = &zeroes{} // zero out the nonce
buf := make([]byte, bufSize)
@@ -749,7 +1136,7 @@ func TestEncryptData(t *testing.T) {
{[]byte{1}, file1},
{[]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}, file16},
} {
c, err := newCipher(NameEncryptionStandard, "", "", true)
c, err := newCipher(NameEncryptionStandard, "", "", true, nil)
assert.NoError(t, err)
c.cryptoRand = newRandomSource(1e8) // nodge the crypto rand generator
@@ -772,7 +1159,7 @@ func TestEncryptData(t *testing.T) {
}
func TestNewEncrypter(t *testing.T) {
c, err := newCipher(NameEncryptionStandard, "", "", true)
c, err := newCipher(NameEncryptionStandard, "", "", true, nil)
assert.NoError(t, err)
c.cryptoRand = newRandomSource(1e8) // nodge the crypto rand generator
@@ -788,13 +1175,12 @@ func TestNewEncrypter(t *testing.T) {
fh, err = c.newEncrypter(z, nil)
assert.Nil(t, fh)
assert.Error(t, err, "short read of nonce")
}
// Test the stream returning 0, io.ErrUnexpectedEOF - this used to
// cause a fatal loop
func TestNewEncrypterErrUnexpectedEOF(t *testing.T) {
c, err := newCipher(NameEncryptionStandard, "", "", true)
c, err := newCipher(NameEncryptionStandard, "", "", true, nil)
assert.NoError(t, err)
in := &readers.ErrorReader{Err: io.ErrUnexpectedEOF}
@@ -823,7 +1209,7 @@ func (c *closeDetector) Close() error {
}
func TestNewDecrypter(t *testing.T) {
c, err := newCipher(NameEncryptionStandard, "", "", true)
c, err := newCipher(NameEncryptionStandard, "", "", true, nil)
assert.NoError(t, err)
c.cryptoRand = newRandomSource(1e8) // nodge the crypto rand generator
@@ -866,7 +1252,7 @@ func TestNewDecrypter(t *testing.T) {
// Test the stream returning 0, io.ErrUnexpectedEOF
func TestNewDecrypterErrUnexpectedEOF(t *testing.T) {
c, err := newCipher(NameEncryptionStandard, "", "", true)
c, err := newCipher(NameEncryptionStandard, "", "", true, nil)
assert.NoError(t, err)
in2 := &readers.ErrorReader{Err: io.ErrUnexpectedEOF}
@@ -882,7 +1268,7 @@ func TestNewDecrypterErrUnexpectedEOF(t *testing.T) {
}
func TestNewDecrypterSeekLimit(t *testing.T) {
c, err := newCipher(NameEncryptionStandard, "", "", true)
c, err := newCipher(NameEncryptionStandard, "", "", true, nil)
assert.NoError(t, err)
c.cryptoRand = &zeroes{} // nodge the crypto rand generator
@@ -1088,7 +1474,7 @@ func TestDecrypterCalculateUnderlying(t *testing.T) {
}
func TestDecrypterRead(t *testing.T) {
c, err := newCipher(NameEncryptionStandard, "", "", true)
c, err := newCipher(NameEncryptionStandard, "", "", true, nil)
assert.NoError(t, err)
// Test truncating the file at each possible point
@@ -1152,7 +1538,7 @@ func TestDecrypterRead(t *testing.T) {
}
func TestDecrypterClose(t *testing.T) {
c, err := newCipher(NameEncryptionStandard, "", "", true)
c, err := newCipher(NameEncryptionStandard, "", "", true, nil)
assert.NoError(t, err)
cd := newCloseDetector(bytes.NewBuffer(file16))
@@ -1190,7 +1576,7 @@ func TestDecrypterClose(t *testing.T) {
}
func TestPutGetBlock(t *testing.T) {
c, err := newCipher(NameEncryptionStandard, "", "", true)
c, err := newCipher(NameEncryptionStandard, "", "", true, nil)
assert.NoError(t, err)
block := c.getBlock()
@@ -1201,7 +1587,7 @@ func TestPutGetBlock(t *testing.T) {
}
func TestKey(t *testing.T) {
c, err := newCipher(NameEncryptionStandard, "", "", true)
c, err := newCipher(NameEncryptionStandard, "", "", true, nil)
assert.NoError(t, err)
// Check zero keys OK

View File

@@ -3,13 +3,13 @@ package crypt
import (
"context"
"errors"
"fmt"
"io"
"path"
"strings"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/cache"
@@ -30,7 +30,7 @@ func init() {
CommandHelp: commandHelp,
Options: []fs.Option{{
Name: "remote",
Help: "Remote to encrypt/decrypt.\nNormally should contain a ':' and a path, e.g. \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
Help: "Remote to encrypt/decrypt.\n\nNormally should contain a ':' and a path, e.g. \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
Required: true,
}, {
Name: "filename_encryption",
@@ -39,13 +39,13 @@ func init() {
Examples: []fs.OptionExample{
{
Value: "standard",
Help: "Encrypt the filenames see the docs for the details.",
Help: "Encrypt the filenames.\nSee the docs for the details.",
}, {
Value: "obfuscate",
Help: "Very simple filename obfuscation.",
}, {
Value: "off",
Help: "Don't encrypt the file names. Adds a \".bin\" extension only.",
Help: "Don't encrypt the file names.\nAdds a \".bin\" extension only.",
},
},
}, {
@@ -71,7 +71,7 @@ NB If filename_encryption is "off" then this option will do nothing.`,
Required: true,
}, {
Name: "password2",
Help: "Password or pass phrase for salt. Optional but recommended.\nShould be different to the previous password.",
Help: "Password or pass phrase for salt.\n\nOptional but recommended.\nShould be different to the previous password.",
IsPassword: true,
}, {
Name: "server_side_across_configs",
@@ -116,6 +116,29 @@ names, or for debugging purposes.`,
Help: "Encrypt file data.",
},
},
}, {
Name: "filename_encoding",
Help: `How to encode the encrypted filename to text string.
This option could help with shortening the encrypted filename. The
suitable option would depend on the way your remote count the filename
length and if it's case sensitve.`,
Default: "base32",
Examples: []fs.OptionExample{
{
Value: "base32",
Help: "Encode using base32. Suitable for all remote.",
},
{
Value: "base64",
Help: "Encode using base64. Suitable for case sensitive remote.",
},
{
Value: "base32768",
Help: "Encode using base32768. Suitable if your remote counts UTF-16 or\nUnicode codepoint instead of UTF-8 byte length. (Eg. Onedrive)",
},
},
Advanced: true,
}},
})
}
@@ -131,18 +154,22 @@ func newCipherForConfig(opt *Options) (*Cipher, error) {
}
password, err := obscure.Reveal(opt.Password)
if err != nil {
return nil, errors.Wrap(err, "failed to decrypt password")
return nil, fmt.Errorf("failed to decrypt password: %w", err)
}
var salt string
if opt.Password2 != "" {
salt, err = obscure.Reveal(opt.Password2)
if err != nil {
return nil, errors.Wrap(err, "failed to decrypt password2")
return nil, fmt.Errorf("failed to decrypt password2: %w", err)
}
}
cipher, err := newCipher(mode, password, salt, opt.DirectoryNameEncryption)
enc, err := NewNameEncoding(opt.FilenameEncoding)
if err != nil {
return nil, errors.Wrap(err, "failed to make cipher")
return nil, err
}
cipher, err := newCipher(mode, password, salt, opt.DirectoryNameEncryption, enc)
if err != nil {
return nil, fmt.Errorf("failed to make cipher: %w", err)
}
return cipher, nil
}
@@ -192,7 +219,7 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
}
}
if err != fs.ErrorIsFile && err != nil {
return nil, errors.Wrapf(err, "failed to make remote %q to wrap", remote)
return nil, fmt.Errorf("failed to make remote %q to wrap: %w", remote, err)
}
f := &Fs{
Fs: wrappedFs,
@@ -229,6 +256,7 @@ type Options struct {
Password2 string `config:"password2"`
ServerSideAcrossConfigs bool `config:"server_side_across_configs"`
ShowMapping bool `config:"show_mapping"`
FilenameEncoding string `config:"filename_encoding"`
}
// Fs represents a wrapped fs.Fs
@@ -300,7 +328,7 @@ func (f *Fs) encryptEntries(ctx context.Context, entries fs.DirEntries) (newEntr
case fs.Directory:
f.addDir(ctx, &newEntries, x)
default:
return nil, errors.Errorf("Unknown object type %T", entry)
return nil, fmt.Errorf("Unknown object type %T", entry)
}
}
return newEntries, nil
@@ -363,7 +391,11 @@ type putFn func(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ..
// put implements Put or PutStream
func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn) (fs.Object, error) {
if f.opt.NoDataEncryption {
return put(ctx, in, f.newObjectInfo(src, nonce{}), options...)
o, err := put(ctx, in, f.newObjectInfo(src, nonce{}), options...)
if err == nil && o != nil {
o = f.newObject(o)
}
return o, err
}
// Encrypt the data into wrappedIn
@@ -402,7 +434,7 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
var dstHash string
dstHash, err = o.Hash(ctx, ht)
if err != nil {
return nil, errors.Wrap(err, "failed to read destination hash")
return nil, fmt.Errorf("failed to read destination hash: %w", err)
}
if srcHash != "" && dstHash != "" {
if srcHash != dstHash {
@@ -411,7 +443,7 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
if err != nil {
fs.Errorf(o, "Failed to remove corrupted object: %v", err)
}
return nil, errors.Errorf("corrupted on transfer: %v crypted hash differ %q vs %q", ht, srcHash, dstHash)
return nil, fmt.Errorf("corrupted on transfer: %v crypted hash differ src %q vs dst %q", ht, srcHash, dstHash)
}
fs.Debugf(src, "%v = %s OK", ht, srcHash)
}
@@ -612,24 +644,24 @@ func (f *Fs) computeHashWithNonce(ctx context.Context, nonce nonce, src fs.Objec
// Open the src for input
in, err := src.Open(ctx)
if err != nil {
return "", errors.Wrap(err, "failed to open src")
return "", fmt.Errorf("failed to open src: %w", err)
}
defer fs.CheckClose(in, &err)
// Now encrypt the src with the nonce
out, err := f.cipher.newEncrypter(in, &nonce)
if err != nil {
return "", errors.Wrap(err, "failed to make encrypter")
return "", fmt.Errorf("failed to make encrypter: %w", err)
}
// pipe into hash
m, err := hash.NewMultiHasherTypes(hash.NewHashSet(hashType))
if err != nil {
return "", errors.Wrap(err, "failed to make hasher")
return "", fmt.Errorf("failed to make hasher: %w", err)
}
_, err = io.Copy(m, out)
if err != nil {
return "", errors.Wrap(err, "failed to hash data")
return "", fmt.Errorf("failed to hash data: %w", err)
}
return m.Sums()[hashType], nil
@@ -648,12 +680,12 @@ func (f *Fs) ComputeHash(ctx context.Context, o *Object, src fs.Object, hashType
// use a limited read so we only read the header
in, err := o.Object.Open(ctx, &fs.RangeOption{Start: 0, End: int64(fileHeaderSize) - 1})
if err != nil {
return "", errors.Wrap(err, "failed to open object to read nonce")
return "", fmt.Errorf("failed to open object to read nonce: %w", err)
}
d, err := f.cipher.newDecrypter(in)
if err != nil {
_ = in.Close()
return "", errors.Wrap(err, "failed to open object to read nonce")
return "", fmt.Errorf("failed to open object to read nonce: %w", err)
}
nonce := d.nonce
// fs.Debugf(o, "Read nonce % 2x", nonce)
@@ -672,7 +704,7 @@ func (f *Fs) ComputeHash(ctx context.Context, o *Object, src fs.Object, hashType
// Close d (and hence in) once we have read the nonce
err = d.Close()
if err != nil {
return "", errors.Wrap(err, "failed to close nonce read")
return "", fmt.Errorf("failed to close nonce read: %w", err)
}
return f.computeHashWithNonce(ctx, nonce, src, hashType)
@@ -791,7 +823,7 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
for _, encryptedFileName := range arg {
fileName, err := f.DecryptFileName(encryptedFileName)
if err != nil {
return out, errors.Wrap(err, fmt.Sprintf("Failed to decrypt : %s", encryptedFileName))
return out, fmt.Errorf("failed to decrypt: %s: %w", encryptedFileName, err)
}
out = append(out, fileName)
}
@@ -995,6 +1027,9 @@ func (o *ObjectInfo) Size() int64 {
if size < 0 {
return size
}
if o.f.opt.NoDataEncryption {
return size
}
return o.f.cipher.EncryptedSize(size)
}

View File

@@ -29,7 +29,7 @@ func TestIntegration(t *testing.T) {
}
// TestStandard runs integration tests against the remote
func TestStandard(t *testing.T) {
func TestStandardBase32(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
@@ -49,6 +49,48 @@ func TestStandard(t *testing.T) {
})
}
func TestStandardBase64(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-standard")
name := "TestCrypt"
fstests.Run(t, &fstests.Opt{
RemoteName: name + ":",
NilObject: (*crypt.Object)(nil),
ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "type", Value: "crypt"},
{Name: name, Key: "remote", Value: tempdir},
{Name: name, Key: "password", Value: obscure.MustObscure("potato")},
{Name: name, Key: "filename_encryption", Value: "standard"},
{Name: name, Key: "filename_encoding", Value: "base64"},
},
UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"},
})
}
func TestStandardBase32768(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-standard")
name := "TestCrypt"
fstests.Run(t, &fstests.Opt{
RemoteName: name + ":",
NilObject: (*crypt.Object)(nil),
ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "type", Value: "crypt"},
{Name: name, Key: "remote", Value: tempdir},
{Name: name, Key: "password", Value: obscure.MustObscure("potato")},
{Name: name, Key: "filename_encryption", Value: "standard"},
{Name: name, Key: "filename_encoding", Value: "base32768"},
},
UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"},
})
}
// TestOff runs integration tests against the remote
func TestOff(t *testing.T) {
if *fstest.RemoteName != "" {

View File

@@ -4,7 +4,7 @@
// buffers which are a multiple of an underlying crypto block size.
package pkcs7
import "github.com/pkg/errors"
import "errors"
// Errors Unpad can return
var (

412
backend/drive/drive.go Executable file → Normal file
View File

@@ -11,10 +11,10 @@ import (
"bytes"
"context"
"crypto/tls"
"errors"
"fmt"
"io"
"io/ioutil"
"log"
"mime"
"net/http"
"path"
@@ -26,13 +26,13 @@ import (
"text/template"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/cache"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/filter"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/fspath"
@@ -68,8 +68,8 @@ const (
defaultScope = "drive"
// chunkSize is the size of the chunks created during a resumable upload and should be a power of two.
// 1<<18 is the minimum size supported by the Google uploader, and there is no maximum.
minChunkSize = 256 * fs.KibiByte
defaultChunkSize = 8 * fs.MebiByte
minChunkSize = fs.SizeSuffix(googleapi.MinUploadChunkSize)
defaultChunkSize = 8 * fs.Mebi
partialFields = "id,name,size,md5Checksum,trashed,explicitlyTrashed,modifiedTime,createdTime,mimeType,parents,webViewLink,shortcutDetails,exportLinks"
listRGrouping = 50 // number of IDs to search at once when using ListR
listRInputBuffer = 1000 // size of input buffer when using ListR
@@ -84,7 +84,7 @@ var (
Endpoint: google.Endpoint,
ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.TitleBarRedirectURL,
RedirectURL: oauthutil.RedirectURL,
}
_mimeTypeToExtensionDuplicates = map[string]string{
"application/x-vnd.oasis.opendocument.presentation": ".odp",
@@ -183,32 +183,71 @@ func init() {
Description: "Google Drive",
NewFs: NewFs,
CommandHelp: commandHelp,
Config: func(ctx context.Context, name string, m configmap.Mapper) {
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
fs.Errorf(nil, "Couldn't parse config into struct: %v", err)
return
return nil, fmt.Errorf("couldn't parse config into struct: %w", err)
}
// Fill in the scopes
driveConfig.Scopes = driveScopes(opt.Scope)
// Set the root_folder_id if using drive.appfolder
if driveScopesContainsAppFolder(driveConfig.Scopes) {
m.Set("root_folder_id", "appDataFolder")
}
switch config.State {
case "":
// Fill in the scopes
driveConfig.Scopes = driveScopes(opt.Scope)
if opt.ServiceAccountFile == "" && opt.ServiceAccountCredentials == "" {
err = oauthutil.Config(ctx, "drive", name, m, driveConfig, nil)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
// Set the root_folder_id if using drive.appfolder
if driveScopesContainsAppFolder(driveConfig.Scopes) {
m.Set("root_folder_id", "appDataFolder")
}
if opt.ServiceAccountFile == "" && opt.ServiceAccountCredentials == "" {
return oauthutil.ConfigOut("teamdrive", &oauthutil.Options{
OAuth2Config: driveConfig,
})
}
return fs.ConfigGoto("teamdrive")
case "teamdrive":
if opt.TeamDriveID == "" {
return fs.ConfigConfirm("teamdrive_ok", false, "config_change_team_drive", "Configure this as a Shared Drive (Team Drive)?\n")
}
return fs.ConfigConfirm("teamdrive_change", false, "config_change_team_drive", fmt.Sprintf("Change current Shared Drive (Team Drive) ID %q?\n", opt.TeamDriveID))
case "teamdrive_ok":
if config.Result == "false" {
m.Set("team_drive", "")
return nil, nil
}
return fs.ConfigGoto("teamdrive_config")
case "teamdrive_change":
if config.Result == "false" {
return nil, nil
}
return fs.ConfigGoto("teamdrive_config")
case "teamdrive_config":
f, err := newFs(ctx, name, "", m)
if err != nil {
return nil, fmt.Errorf("failed to make Fs to list Shared Drives: %w", err)
}
teamDrives, err := f.listTeamDrives(ctx)
if err != nil {
return nil, err
}
if len(teamDrives) == 0 {
return fs.ConfigError("", "No Shared Drives found in your account")
}
return fs.ConfigChoose("teamdrive_final", "config_team_drive", "Shared Drive", len(teamDrives), func(i int) (string, string) {
teamDrive := teamDrives[i]
return teamDrive.Id, teamDrive.Name
})
case "teamdrive_final":
driveID := config.Result
m.Set("team_drive", driveID)
m.Set("root_folder_id", "")
opt.TeamDriveID = driveID
opt.RootFolderID = ""
return nil, nil
}
err = configTeamDrive(ctx, opt, m, name)
if err != nil {
log.Fatalf("Failed to configure Shared Drive: %v", err)
}
return nil, fmt.Errorf("unknown state %q", config.State)
},
Options: append(driveOAuthOptions(), []fs.Option{{
Name: "scope",
@@ -231,7 +270,7 @@ func init() {
}},
}, {
Name: "root_folder_id",
Help: `ID of the root folder
Help: `ID of the root folder.
Leave blank normally.
Fill in to access "Computers" folders (see docs), or for rclone to use
@@ -239,15 +278,15 @@ a non root folder as its starting point.
`,
}, {
Name: "service_account_file",
Help: "Service Account Credentials JSON file path \nLeave blank normally.\nNeeded only if you want use SA instead of interactive login." + env.ShellExpandHelp,
Help: "Service Account Credentials JSON file path.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login." + env.ShellExpandHelp,
}, {
Name: "service_account_credentials",
Help: "Service Account Credentials JSON blob\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
Help: "Service Account Credentials JSON blob.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
Hide: fs.OptionHideConfigurator,
Advanced: true,
}, {
Name: "team_drive",
Help: "ID of the Shared Drive (Team Drive)",
Help: "ID of the Shared Drive (Team Drive).",
Hide: fs.OptionHideConfigurator,
Advanced: true,
}, {
@@ -258,12 +297,23 @@ a non root folder as its starting point.
}, {
Name: "use_trash",
Default: true,
Help: "Send files to the trash instead of deleting permanently.\nDefaults to true, namely sending files to the trash.\nUse `--drive-use-trash=false` to delete files permanently instead.",
Help: "Send files to the trash instead of deleting permanently.\n\nDefaults to true, namely sending files to the trash.\nUse `--drive-use-trash=false` to delete files permanently instead.",
Advanced: true,
}, {
Name: "copy_shortcut_content",
Default: false,
Help: `Server side copy contents of shortcuts instead of the shortcut.
When doing server side copies, normally rclone will copy shortcuts as
shortcuts.
If this flag is used then rclone will copy the contents of shortcuts
rather than shortcuts themselves when doing server side copies.`,
Advanced: true,
}, {
Name: "skip_gdocs",
Default: false,
Help: "Skip google documents in all listings.\nIf given, gdocs practically become invisible to rclone.",
Help: "Skip google documents in all listings.\n\nIf given, gdocs practically become invisible to rclone.",
Advanced: true,
}, {
Name: "skip_checksum_gphotos",
@@ -296,7 +346,7 @@ commands (copy, sync, etc.), and with all other commands too.`,
}, {
Name: "trashed_only",
Default: false,
Help: "Only show files that are in the trash.\nThis will show trashed files in their original directory structure.",
Help: "Only show files that are in the trash.\n\nThis will show trashed files in their original directory structure.",
Advanced: true,
}, {
Name: "starred_only",
@@ -306,7 +356,7 @@ commands (copy, sync, etc.), and with all other commands too.`,
}, {
Name: "formats",
Default: "",
Help: "Deprecated: see export_formats",
Help: "Deprecated: See export_formats.",
Advanced: true,
Hide: fs.OptionHideConfigurator,
}, {
@@ -322,12 +372,12 @@ commands (copy, sync, etc.), and with all other commands too.`,
}, {
Name: "allow_import_name_change",
Default: false,
Help: "Allow the filetype to change when uploading Google docs (e.g. file.doc to file.docx). This will confuse sync and reupload every time.",
Help: "Allow the filetype to change when uploading Google docs.\n\nE.g. file.doc to file.docx. This will confuse sync and reupload every time.",
Advanced: true,
}, {
Name: "use_created_date",
Default: false,
Help: `Use file created date instead of modified date.,
Help: `Use file created date instead of modified date.
Useful when downloading data and you want the creation date used in
place of the last modified date.
@@ -361,7 +411,7 @@ date is used.`,
}, {
Name: "list_chunk",
Default: 1000,
Help: "Size of listing chunk 100-1000. 0 to disable.",
Help: "Size of listing chunk 100-1000, 0 to disable.",
Advanced: true,
}, {
Name: "impersonate",
@@ -371,17 +421,19 @@ date is used.`,
}, {
Name: "alternate_export",
Default: false,
Help: "Deprecated: no longer needed",
Help: "Deprecated: No longer needed.",
Hide: fs.OptionHideBoth,
}, {
Name: "upload_cutoff",
Default: defaultChunkSize,
Help: "Cutoff for switching to chunked upload",
Help: "Cutoff for switching to chunked upload.",
Advanced: true,
}, {
Name: "chunk_size",
Default: defaultChunkSize,
Help: `Upload chunk size. Must a power of 2 >= 256k.
Help: `Upload chunk size.
Must a power of 2 >= 256k.
Making this larger will improve performance, but note that each chunk
is buffered in memory one per transfer.
@@ -451,7 +503,7 @@ configurations.`,
}, {
Name: "disable_http2",
Default: true,
Help: `Disable drive using http2
Help: `Disable drive using http2.
There is currently an unsolved issue with the google drive backend and
HTTP/2. HTTP/2 is therefore disabled by default for the drive backend
@@ -465,9 +517,9 @@ See: https://github.com/rclone/rclone/issues/3631
}, {
Name: "stop_on_upload_limit",
Default: false,
Help: `Make upload limit errors be fatal
Help: `Make upload limit errors be fatal.
At the time of writing it is only possible to upload 750GB of data to
At the time of writing it is only possible to upload 750 GiB of data to
Google Drive a day (this is an undocumented limit). When this limit is
reached Google Drive produces a slightly different error message. When
this flag is set it causes these errors to be fatal. These will stop
@@ -482,9 +534,9 @@ See: https://github.com/rclone/rclone/issues/3857
}, {
Name: "stop_on_download_limit",
Default: false,
Help: `Make download limit errors be fatal
Help: `Make download limit errors be fatal.
At the time of writing it is only possible to download 10TB of data from
At the time of writing it is only possible to download 10 TiB of data from
Google Drive a day (this is an undocumented limit). When this limit is
reached Google Drive produces a slightly different error message. When
this flag is set it causes these errors to be fatal. These will stop
@@ -496,11 +548,19 @@ Google don't document so it may break in the future.
Advanced: true,
}, {
Name: "skip_shortcuts",
Help: `If set skip shortcut files
Help: `If set skip shortcut files.
Normally rclone dereferences shortcut files making them appear as if
they are the original file (see [the shortcuts section](#shortcuts)).
If this flag is set then rclone will ignore shortcut files completely.
`,
Advanced: true,
Default: false,
}, {
Name: "skip_dangling_shortcuts",
Help: `If set skip dangling shortcut files.
If this is set then rclone will not show any dangling shortcuts in listings.
`,
Advanced: true,
Default: false,
@@ -522,7 +582,7 @@ If this flag is set then rclone will ignore shortcut files completely.
} {
for mimeType, extension := range m {
if err := mime.AddExtensionType(extension, mimeType); err != nil {
log.Fatalf("Failed to register MIME type %q: %v", mimeType, err)
fs.Errorf("Failed to register MIME type %q: %v", mimeType, err)
}
}
}
@@ -537,6 +597,7 @@ type Options struct {
TeamDriveID string `config:"team_drive"`
AuthOwnerOnly bool `config:"auth_owner_only"`
UseTrash bool `config:"use_trash"`
CopyShortcutContent bool `config:"copy_shortcut_content"`
SkipGdocs bool `config:"skip_gdocs"`
SkipChecksumGphotos bool `config:"skip_checksum_gphotos"`
SharedWithMe bool `config:"shared_with_me"`
@@ -563,6 +624,7 @@ type Options struct {
StopOnUploadLimit bool `config:"stop_on_upload_limit"`
StopOnDownloadLimit bool `config:"stop_on_download_limit"`
SkipShortcuts bool `config:"skip_shortcuts"`
SkipDanglingShortcuts bool `config:"skip_dangling_shortcuts"`
Enc encoder.MultiEncoder `config:"encoding"`
}
@@ -578,6 +640,7 @@ type Fs struct {
client *http.Client // authorized client
rootFolderID string // the id of the root folder
dirCache *dircache.DirCache // Map of directory path to directory id
lastQuery string // Last query string to check in unit tests
pacer *fs.Pacer // To pace the API calls
exportExtensions []string // preferred extensions to download docs
importMimeTypes []string // MIME types to convert to docs
@@ -713,7 +776,7 @@ func (f *Fs) getFile(ctx context.Context, ID string, fields googleapi.Field) (in
func (f *Fs) getRootID(ctx context.Context) (string, error) {
info, err := f.getFile(ctx, "root", "id")
if err != nil {
return "", errors.Wrap(err, "couldn't find root directory ID")
return "", fmt.Errorf("couldn't find root directory ID: %w", err)
}
return info.Id, nil
}
@@ -791,11 +854,31 @@ func (f *Fs) list(ctx context.Context, dirIDs []string, title string, directorie
if filesOnly {
query = append(query, fmt.Sprintf("mimeType!='%s'", driveFolderType))
}
list := f.svc.Files.List()
if len(query) > 0 {
list.Q(strings.Join(query, " and "))
// fmt.Printf("list Query = %q\n", query)
// Constrain query using filter if this remote is a sync/copy/walk source.
if fi, use := filter.GetConfig(ctx), filter.GetUseFilter(ctx); fi != nil && use {
queryByTime := func(op string, tm time.Time) {
if tm.IsZero() {
return
}
// https://developers.google.com/drive/api/v3/ref-search-terms#operators
// Query times use RFC 3339 format, default timezone is UTC
timeStr := tm.UTC().Format("2006-01-02T15:04:05")
term := fmt.Sprintf("(modifiedTime %s '%s' or mimeType = '%s')", op, timeStr, driveFolderType)
query = append(query, term)
}
queryByTime(">=", fi.ModTimeFrom)
queryByTime("<=", fi.ModTimeTo)
}
list := f.svc.Files.List()
queryString := strings.Join(query, " and ")
if queryString != "" {
list.Q(queryString)
// fs.Debugf(f, "list query: %q", queryString)
}
f.lastQuery = queryString // for unit tests
if f.opt.ListChunk > 0 {
list.PageSize(f.opt.ListChunk)
}
@@ -820,7 +903,7 @@ OUTER:
return f.shouldRetry(ctx, err)
})
if err != nil {
return false, errors.Wrap(err, "couldn't list directory")
return false, fmt.Errorf("couldn't list directory: %w", err)
}
if files.IncompleteSearch {
fs.Errorf(f, "search result INCOMPLETE")
@@ -842,7 +925,12 @@ OUTER:
}
item, err = f.resolveShortcut(ctx, item)
if err != nil {
return false, errors.Wrap(err, "list")
return false, fmt.Errorf("list: %w", err)
}
// leave the dangling shortcut out of the listings
// we've already logged about the dangling shortcut in resolveShortcut
if f.opt.SkipDanglingShortcuts && item.MimeType == shortcutMimeTypeDangling {
continue
}
}
// Check the case of items is correct since
@@ -903,7 +991,7 @@ func fixMimeType(mimeTypeIn string) string {
mimeTypeOut = mime.FormatMediaType(mediaType, param)
}
if mimeTypeOut == "" {
panic(errors.Errorf("unable to fix MIME type %q", mimeTypeIn))
panic(fmt.Errorf("unable to fix MIME type %q", mimeTypeIn))
}
return mimeTypeOut
}
@@ -938,7 +1026,7 @@ func parseExtensions(extensionsIn ...string) (extensions, mimeTypes []string, er
}
mt := mime.TypeByExtension(extension)
if mt == "" {
return extensions, mimeTypes, errors.Errorf("couldn't find MIME type for extension %q", extension)
return extensions, mimeTypes, fmt.Errorf("couldn't find MIME type for extension %q", extension)
}
if !containsString(extensions, extension) {
extensions = append(extensions, extension)
@@ -949,48 +1037,6 @@ func parseExtensions(extensionsIn ...string) (extensions, mimeTypes []string, er
return
}
// Figure out if the user wants to use a team drive
func configTeamDrive(ctx context.Context, opt *Options, m configmap.Mapper, name string) error {
ci := fs.GetConfig(ctx)
// Stop if we are running non-interactive config
if ci.AutoConfirm {
return nil
}
if opt.TeamDriveID == "" {
fmt.Printf("Configure this as a Shared Drive (Team Drive)?\n")
} else {
fmt.Printf("Change current Shared Drive (Team Drive) ID %q?\n", opt.TeamDriveID)
}
if !config.Confirm(false) {
return nil
}
f, err := newFs(ctx, name, "", m)
if err != nil {
return errors.Wrap(err, "failed to make Fs to list Shared Drives")
}
fmt.Printf("Fetching Shared Drive list...\n")
teamDrives, err := f.listTeamDrives(ctx)
if err != nil {
return err
}
if len(teamDrives) == 0 {
fmt.Printf("No Shared Drives found in your account")
return nil
}
var driveIDs, driveNames []string
for _, teamDrive := range teamDrives {
driveIDs = append(driveIDs, teamDrive.Id)
driveNames = append(driveNames, teamDrive.Name)
}
driveID := config.Choose("Enter a Shared Drive ID", driveIDs, driveNames, true)
m.Set("team_drive", driveID)
m.Set("root_folder_id", "")
opt.TeamDriveID = driveID
opt.RootFolderID = ""
return nil
}
// getClient makes an http client according to the options
func getClient(ctx context.Context, opt *Options) *http.Client {
t := fshttp.NewTransportCustom(ctx, func(t *http.Transport) {
@@ -1007,7 +1053,7 @@ func getServiceAccountClient(ctx context.Context, opt *Options, credentialsData
scopes := driveScopes(opt.Scope)
conf, err := google.JWTConfigFromJSON(credentialsData, scopes...)
if err != nil {
return nil, errors.Wrap(err, "error processing credentials")
return nil, fmt.Errorf("error processing credentials: %w", err)
}
if opt.Impersonate != "" {
conf.Subject = opt.Impersonate
@@ -1024,19 +1070,19 @@ func createOAuthClient(ctx context.Context, opt *Options, name string, m configm
if len(opt.ServiceAccountCredentials) == 0 && opt.ServiceAccountFile != "" {
loadedCreds, err := ioutil.ReadFile(env.ShellExpand(opt.ServiceAccountFile))
if err != nil {
return nil, errors.Wrap(err, "error opening service account credentials file")
return nil, fmt.Errorf("error opening service account credentials file: %w", err)
}
opt.ServiceAccountCredentials = string(loadedCreds)
}
if opt.ServiceAccountCredentials != "" {
oAuthClient, err = getServiceAccountClient(ctx, opt, []byte(opt.ServiceAccountCredentials))
if err != nil {
return nil, errors.Wrap(err, "failed to create oauth client from service account")
return nil, fmt.Errorf("failed to create oauth client from service account: %w", err)
}
} else {
oAuthClient, _, err = oauthutil.NewClientWithBaseClient(ctx, name, m, driveConfig, getClient(ctx, opt))
if err != nil {
return nil, errors.Wrap(err, "failed to create oauth client")
return nil, fmt.Errorf("failed to create oauth client: %w", err)
}
}
@@ -1045,10 +1091,10 @@ func createOAuthClient(ctx context.Context, opt *Options, name string, m configm
func checkUploadChunkSize(cs fs.SizeSuffix) error {
if !isPowerOfTwo(int64(cs)) {
return errors.Errorf("%v isn't a power of two", cs)
return fmt.Errorf("%v isn't a power of two", cs)
}
if cs < minChunkSize {
return errors.Errorf("%s is less than %s", cs, minChunkSize)
return fmt.Errorf("%s is less than %s", cs, minChunkSize)
}
return nil
}
@@ -1086,16 +1132,16 @@ func newFs(ctx context.Context, name, path string, m configmap.Mapper) (*Fs, err
}
err = checkUploadCutoff(opt.UploadCutoff)
if err != nil {
return nil, errors.Wrap(err, "drive: upload cutoff")
return nil, fmt.Errorf("drive: upload cutoff: %w", err)
}
err = checkUploadChunkSize(opt.ChunkSize)
if err != nil {
return nil, errors.Wrap(err, "drive: chunk size")
return nil, fmt.Errorf("drive: chunk size: %w", err)
}
oAuthClient, err := createOAuthClient(ctx, opt, name, m)
if err != nil {
return nil, errors.Wrap(err, "drive: failed when making oauth client")
return nil, fmt.Errorf("drive: failed when making oauth client: %w", err)
}
root, err := parseDrivePath(path)
@@ -1129,13 +1175,13 @@ func newFs(ctx context.Context, name, path string, m configmap.Mapper) (*Fs, err
f.client = oAuthClient
f.svc, err = drive.New(f.client)
if err != nil {
return nil, errors.Wrap(err, "couldn't create Drive client")
return nil, fmt.Errorf("couldn't create Drive client: %w", err)
}
if f.opt.V2DownloadMinSize >= 0 {
f.v2Svc, err = drive_v2.New(f.client)
if err != nil {
return nil, errors.Wrap(err, "couldn't create Drive v2 client")
return nil, fmt.Errorf("couldn't create Drive v2 client: %w", err)
}
}
@@ -1160,7 +1206,8 @@ func NewFs(ctx context.Context, name, path string, m configmap.Mapper) (fs.Fs, e
// otherwise look up the actual root ID
rootID, err := f.getRootID(ctx)
if err != nil {
if gerr, ok := errors.Cause(err).(*googleapi.Error); ok && gerr.Code == 404 {
var gerr *googleapi.Error
if errors.As(err, &gerr) && gerr.Code == 404 {
// 404 means that this scope does not have permission to get the
// root so just use "root"
rootID = "root"
@@ -1169,7 +1216,7 @@ func NewFs(ctx context.Context, name, path string, m configmap.Mapper) (fs.Fs, e
}
}
f.rootFolderID = rootID
fs.Debugf(f, "root_folder_id = %q - save this in the config to speed up startup", rootID)
fs.Debugf(f, "'root_folder_id = %s' - save this in the config to speed up startup", rootID)
}
f.dirCache = dircache.New(f.root, f.rootFolderID, f)
@@ -1302,7 +1349,7 @@ func (f *Fs) newDocumentObject(remote string, info *drive.File, extension, expor
func (f *Fs) newLinkObject(remote string, info *drive.File, extension, exportMimeType string) (fs.Object, error) {
t := linkTemplate(exportMimeType)
if t == nil {
return nil, errors.Errorf("unsupported link type %s", exportMimeType)
return nil, fmt.Errorf("unsupported link type %s", exportMimeType)
}
xdgIcon := _mimeTypeToXDGLinkIcons[info.MimeType]
if xdgIcon == "" {
@@ -1315,7 +1362,7 @@ func (f *Fs) newLinkObject(remote string, info *drive.File, extension, exportMim
info.WebViewLink, info.Name, xdgIcon,
})
if err != nil {
return nil, errors.Wrap(err, "executing template failed")
return nil, fmt.Errorf("executing template failed: %w", err)
}
baseObject := f.newBaseObject(remote+extension, info)
@@ -1332,8 +1379,8 @@ func (f *Fs) newLinkObject(remote string, info *drive.File, extension, exportMim
//
// When the drive.File cannot be represented as an fs.Object it will return (nil, nil).
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *drive.File) (fs.Object, error) {
// If item has MD5 sum or a length it is a file stored on drive
if info.Md5Checksum != "" || info.Size > 0 {
// If item has MD5 sum it is a file stored on drive
if info.Md5Checksum != "" {
return f.newRegularObject(remote, info), nil
}
@@ -1352,11 +1399,11 @@ func (f *Fs) newObjectWithExportInfo(
// will have been resolved so this will do nothing.
info, err = f.resolveShortcut(ctx, info)
if err != nil {
return nil, errors.Wrap(err, "new object")
return nil, fmt.Errorf("new object: %w", err)
}
switch {
case info.MimeType == driveFolderType:
return nil, fs.ErrorNotAFile
return nil, fs.ErrorIsDir
case info.MimeType == shortcutMimeType:
// We can only get here if f.opt.SkipShortcuts is set
// and not from a listing. This is unlikely.
@@ -1366,8 +1413,8 @@ func (f *Fs) newObjectWithExportInfo(
// Pretend a dangling shortcut is a regular object
// It will error if used, but appear in listings so it can be deleted
return f.newRegularObject(remote, info), nil
case info.Md5Checksum != "" || info.Size > 0:
// If item has MD5 sum or a length it is a file stored on drive
case info.Md5Checksum != "":
// If item has MD5 sum it is a file stored on drive
return f.newRegularObject(remote, info), nil
case f.opt.SkipGdocs:
fs.Debugf(remote, "Skipping google document type %q", info.MimeType)
@@ -1550,6 +1597,15 @@ func (f *Fs) findExportFormatByMimeType(ctx context.Context, itemMimeType string
}
}
// If using a link type export and a more specific export
// hasn't been found all docs should be exported
for _, _extension := range f.exportExtensions {
_mimeType := mime.TypeByExtension(_extension)
if isLinkMimeType(_mimeType) {
return _extension, _mimeType, true
}
}
// else return empty
return "", "", isDocument
}
@@ -1560,6 +1616,14 @@ func (f *Fs) findExportFormatByMimeType(ctx context.Context, itemMimeType string
// Look through the exportExtensions and find the first format that can be
// converted. If none found then return ("", "", "", false)
func (f *Fs) findExportFormat(ctx context.Context, item *drive.File) (extension, filename, mimeType string, isDocument bool) {
// If item has MD5 sum it is a file stored on drive
if item.Md5Checksum != "" {
return
}
// Folders can't be documents
if item.MimeType == driveFolderType {
return
}
extension, mimeType, isDocument = f.findExportFormatByMimeType(ctx, item.MimeType)
if extension != "" {
filename = item.Name + extension
@@ -1995,13 +2059,14 @@ func (f *Fs) resolveShortcut(ctx context.Context, item *drive.File) (newItem *dr
}
newItem, err = f.getFile(ctx, item.ShortcutDetails.TargetId, f.fileFields)
if err != nil {
if gerr, ok := errors.Cause(err).(*googleapi.Error); ok && gerr.Code == 404 {
var gerr *googleapi.Error
if errors.As(err, &gerr) && gerr.Code == 404 {
// 404 means dangling shortcut, so just return the shortcut with the mime type mangled
fs.Logf(nil, "Dangling shortcut %q detected", item.Name)
item.MimeType = shortcutMimeTypeDangling
return item, nil
}
return nil, errors.Wrap(err, "failed to resolve shortcut")
return nil, fmt.Errorf("failed to resolve shortcut: %w", err)
}
// make sure we use the Name, Parents and Trashed from the original item
newItem.Name = item.Name
@@ -2103,10 +2168,10 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
exportExt, _, _ = f.findExportFormatByMimeType(ctx, importMimeType)
if exportExt == "" {
return nil, errors.Errorf("No export format found for %q", importMimeType)
return nil, fmt.Errorf("No export format found for %q", importMimeType)
}
if exportExt != srcExt && !f.opt.AllowImportNameChange {
return nil, errors.Errorf("Can't convert %q to a document with a different export filetype (%q)", srcExt, exportExt)
return nil, fmt.Errorf("Can't convert %q to a document with a different export filetype (%q)", srcExt, exportExt)
}
}
}
@@ -2127,7 +2192,7 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
// Don't retry, return a retry error instead
err = f.pacer.CallNoRetry(func() (bool, error) {
info, err = f.svc.Files.Create(createInfo).
Media(in, googleapi.ContentType(srcMimeType)).
Media(in, googleapi.ContentType(srcMimeType), googleapi.ChunkSize(0)).
Fields(partialFields).
SupportsAllDrives(true).
KeepRevisionForever(f.opt.KeepRevisionForever).
@@ -2174,7 +2239,7 @@ func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
return false
})
if err != nil {
return errors.Wrapf(err, "MergeDirs list failed on %v", srcDir)
return fmt.Errorf("MergeDirs list failed on %v: %w", srcDir, err)
}
// move them into place
for _, info := range infos {
@@ -2190,14 +2255,14 @@ func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
return f.shouldRetry(ctx, err)
})
if err != nil {
return errors.Wrapf(err, "MergeDirs move failed on %q in %v", info.Name, srcDir)
return fmt.Errorf("MergeDirs move failed on %q in %v: %w", info.Name, srcDir, err)
}
}
// rmdir (into trash) the now empty source directory
fs.Infof(srcDir, "removing empty directory")
err = f.delete(ctx, srcDir.ID(), true)
if err != nil {
return errors.Wrapf(err, "MergeDirs move failed to rmdir %q", srcDir)
return fmt.Errorf("MergeDirs move failed to rmdir %q: %w", srcDir, err)
}
}
return nil
@@ -2260,7 +2325,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
return err
}
if found {
return errors.Errorf("directory not empty")
return fmt.Errorf("directory not empty")
}
}
if root != "" {
@@ -2352,9 +2417,16 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
createInfo.Description = ""
}
// get the ID of the thing to copy - this is the shortcut if available
// get the ID of the thing to copy
// copy the contents if CopyShortcutContent
// else copy the shortcut only
id := shortcutID(srcObj.id)
if f.opt.CopyShortcutContent {
id = actualID(srcObj.id)
}
var info *drive.File
err = f.pacer.Call(func() (bool, error) {
info, err = f.svc.Files.Copy(id, createInfo).
@@ -2438,7 +2510,7 @@ func (f *Fs) cleanupTeamDrive(ctx context.Context, dir string, directoryID strin
return false
})
if err != nil {
err = errors.Wrap(err, "failed to list directory")
err = fmt.Errorf("failed to list directory: %w", err)
r.Errors++
fs.Errorf(dir, "%v", err)
}
@@ -2482,7 +2554,7 @@ func (f *Fs) teamDriveOK(ctx context.Context) (err error) {
return f.shouldRetry(ctx, err)
})
if err != nil {
return errors.Wrap(err, "failed to get Shared Drive info")
return fmt.Errorf("failed to get Shared Drive info: %w", err)
}
fs.Debugf(f, "read info from Shared Drive %q", td.Name)
return err
@@ -2505,7 +2577,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
return f.shouldRetry(ctx, err)
})
if err != nil {
return nil, errors.Wrap(err, "failed to get Drive storageQuota")
return nil, fmt.Errorf("failed to get Drive storageQuota: %w", err)
}
q := about.StorageQuota
usage := &fs.Usage{
@@ -2829,7 +2901,7 @@ func (f *Fs) Hashes() hash.Set {
func (f *Fs) changeChunkSize(chunkSizeString string) (err error) {
chunkSizeInt, err := strconv.ParseInt(chunkSizeString, 10, 64)
if err != nil {
return errors.Wrap(err, "couldn't convert chunk size to int")
return fmt.Errorf("couldn't convert chunk size to int: %w", err)
}
chunkSize := fs.SizeSuffix(chunkSizeInt)
if chunkSize == f.opt.ChunkSize {
@@ -2866,17 +2938,17 @@ func (f *Fs) changeServiceAccountFile(ctx context.Context, file string) (err err
f.opt.ServiceAccountCredentials = ""
oAuthClient, err := createOAuthClient(ctx, &f.opt, f.name, f.m)
if err != nil {
return errors.Wrap(err, "drive: failed when making oauth client")
return fmt.Errorf("drive: failed when making oauth client: %w", err)
}
f.client = oAuthClient
f.svc, err = drive.New(f.client)
if err != nil {
return errors.Wrap(err, "couldn't create Drive client")
return fmt.Errorf("couldn't create Drive client: %w", err)
}
if f.opt.V2DownloadMinSize >= 0 {
f.v2Svc, err = drive_v2.New(f.client)
if err != nil {
return errors.Wrap(err, "couldn't create Drive v2 client")
return fmt.Errorf("couldn't create Drive v2 client: %w", err)
}
}
return nil
@@ -2904,13 +2976,13 @@ func (f *Fs) makeShortcut(ctx context.Context, srcPath string, dstFs *Fs, dstPat
}
isDir = true
} else if srcObj, err := srcFs.NewObject(ctx, srcPath); err != nil {
if err != fs.ErrorNotAFile {
return nil, errors.Wrap(err, "can't find source")
if err != fs.ErrorIsDir {
return nil, fmt.Errorf("can't find source: %w", err)
}
// source was a directory
srcID, err = srcFs.dirCache.FindDir(ctx, srcPath, false)
if err != nil {
return nil, errors.Wrap(err, "failed to find source dir")
return nil, fmt.Errorf("failed to find source dir: %w", err)
}
isDir = true
} else {
@@ -2924,16 +2996,16 @@ func (f *Fs) makeShortcut(ctx context.Context, srcPath string, dstFs *Fs, dstPat
if err != fs.ErrorObjectNotFound {
if err == nil {
err = errors.New("existing file")
} else if err == fs.ErrorNotAFile {
} else if err == fs.ErrorIsDir {
err = errors.New("existing directory")
}
return nil, errors.Wrap(err, "not overwriting shortcut target")
return nil, fmt.Errorf("not overwriting shortcut target: %w", err)
}
// Create destination shortcut
createInfo, err := dstFs.createFileInfo(ctx, dstPath, time.Now())
if err != nil {
return nil, errors.Wrap(err, "shortcut destination failed")
return nil, fmt.Errorf("shortcut destination failed: %w", err)
}
createInfo.MimeType = shortcutMimeType
createInfo.ShortcutDetails = &drive.FileShortcutDetails{
@@ -2950,7 +3022,7 @@ func (f *Fs) makeShortcut(ctx context.Context, srcPath string, dstFs *Fs, dstPat
return dstFs.shouldRetry(ctx, err)
})
if err != nil {
return nil, errors.Wrap(err, "shortcut creation failed")
return nil, fmt.Errorf("shortcut creation failed: %w", err)
}
if isDir {
return nil, nil
@@ -2970,7 +3042,7 @@ func (f *Fs) listTeamDrives(ctx context.Context) (drives []*drive.Drive, err err
return defaultFs.shouldRetry(ctx, err)
})
if err != nil {
return drives, errors.Wrap(err, "listing Team Drives failed")
return drives, fmt.Errorf("listing Team Drives failed: %w", err)
}
drives = append(drives, teamDrives.Drives...)
if teamDrives.NextPageToken == "" {
@@ -3013,7 +3085,7 @@ func (f *Fs) unTrash(ctx context.Context, dir string, directoryID string, recurs
return f.shouldRetry(ctx, err)
})
if err != nil {
err = errors.Wrap(err, "failed to restore")
err = fmt.Errorf("failed to restore: %w", err)
r.Errors++
fs.Errorf(remote, "%v", err)
} else {
@@ -3030,7 +3102,7 @@ func (f *Fs) unTrash(ctx context.Context, dir string, directoryID string, recurs
return false
})
if err != nil {
err = errors.Wrap(err, "failed to list directory")
err = fmt.Errorf("failed to list directory: %w", err)
r.Errors++
fs.Errorf(dir, "%v", err)
}
@@ -3054,10 +3126,10 @@ func (f *Fs) unTrashDir(ctx context.Context, dir string, recurse bool) (r unTras
func (f *Fs) copyID(ctx context.Context, id, dest string) (err error) {
info, err := f.getFile(ctx, id, f.fileFields)
if err != nil {
return errors.Wrap(err, "couldn't find id")
return fmt.Errorf("couldn't find id: %w", err)
}
if info.MimeType == driveFolderType {
return errors.Errorf("can't copy directory use: rclone copy --drive-root-folder-id %s %s %s", id, fs.ConfigString(f), dest)
return fmt.Errorf("can't copy directory use: rclone copy --drive-root-folder-id %s %s %s", id, fs.ConfigString(f), dest)
}
info.Name = f.opt.Enc.ToStandardName(info.Name)
o, err := f.newObjectWithInfo(ctx, info.Name, info)
@@ -3080,7 +3152,7 @@ func (f *Fs) copyID(ctx context.Context, id, dest string) (err error) {
}
_, err = operations.Copy(ctx, dstFs, nil, destLeaf, o)
if err != nil {
return errors.Wrap(err, "copy failed")
return fmt.Errorf("copy failed: %w", err)
}
return nil
}
@@ -3144,7 +3216,7 @@ account.
Usage:
rclone backend drives drive:
rclone backend [-o config] drives drive:
This will return a JSON list of objects like this
@@ -3161,6 +3233,22 @@ This will return a JSON list of objects like this
}
]
With the -o config parameter it will output the list in a format
suitable for adding to a config file to make aliases for all the
drives found.
[My Drive]
type = alias
remote = drive,team_drive=0ABCDEF-01234567890,root_folder_id=:
[Test Drive]
type = alias
remote = drive,team_drive=0ABCDEFabcdefghijkl,root_folder_id=:
Adding this to the rclone config file will cause those team drives to
be accessible with the aliases shown. This may require manual editing
of the names.
`,
}, {
Name: "untrash",
@@ -3263,7 +3351,7 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
if ok {
targetFs, err := cache.Get(ctx, target)
if err != nil {
return nil, errors.Wrap(err, "couldn't find target")
return nil, fmt.Errorf("couldn't find target: %w", err)
}
dstFs, ok = targetFs.(*Fs)
if !ok {
@@ -3272,7 +3360,21 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
}
return f.makeShortcut(ctx, arg[0], dstFs, arg[1])
case "drives":
return f.listTeamDrives(ctx)
drives, err := f.listTeamDrives(ctx)
if err != nil {
return nil, err
}
if _, ok := opt["config"]; ok {
lines := []string{}
for _, drive := range drives {
lines = append(lines, "")
lines = append(lines, fmt.Sprintf("[%s]", drive.Name))
lines = append(lines, fmt.Sprintf("type = alias"))
lines = append(lines, fmt.Sprintf("remote = %s,team_drive=%s,root_folder_id=:", f.name, drive.Id))
}
return lines, nil
}
return drives, nil
case "untrash":
dir := ""
if len(arg) > 0 {
@@ -3288,7 +3390,7 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
arg = arg[2:]
err = f.copyID(ctx, id, dest)
if err != nil {
return nil, errors.Wrapf(err, "failed copying %q to %q", id, dest)
return nil, fmt.Errorf("failed copying %q to %q: %w", id, dest, err)
}
}
return nil, nil
@@ -3522,11 +3624,11 @@ func (o *baseObject) open(ctx context.Context, url string, options ...fs.OpenOpt
url += "acknowledgeAbuse=true"
_, res, err = o.httpResponse(ctx, url, "GET", options)
} else {
err = errors.Wrap(err, "Use the --drive-acknowledge-abuse flag to download this file")
err = fmt.Errorf("Use the --drive-acknowledge-abuse flag to download this file: %w", err)
}
}
if err != nil {
return nil, errors.Wrap(err, "open file failed")
return nil, fmt.Errorf("open file failed: %w", err)
}
}
return res.Body, nil
@@ -3620,7 +3722,7 @@ func (o *baseObject) update(ctx context.Context, updateInfo *drive.File, uploadM
// Don't retry, return a retry error instead
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
info, err = o.fs.svc.Files.Update(actualID(o.id), updateInfo).
Media(in, googleapi.ContentType(uploadMimeType)).
Media(in, googleapi.ContentType(uploadMimeType), googleapi.ChunkSize(0)).
Fields(partialFields).
SupportsAllDrives(true).
KeepRevisionForever(o.fs.opt.KeepRevisionForever).
@@ -3690,14 +3792,14 @@ func (o *documentObject) Update(ctx context.Context, in io.Reader, src fs.Object
}
if o.fs.importMimeTypes == nil || o.fs.opt.SkipGdocs {
return errors.Errorf("can't update google document type without --drive-import-formats")
return fmt.Errorf("can't update google document type without --drive-import-formats")
}
importMimeType = o.fs.findImportFormat(ctx, updateInfo.MimeType)
if importMimeType == "" {
return errors.Errorf("no import format found for %q", srcMimeType)
return fmt.Errorf("no import format found for %q", srcMimeType)
}
if importMimeType != o.documentMimeType {
return errors.Errorf("can't change google document type (o: %q, src: %q, import: %q)", o.documentMimeType, srcMimeType, importMimeType)
return fmt.Errorf("can't change google document type (o: %q, src: %q, import: %q)", o.documentMimeType, srcMimeType, importMimeType)
}
updateInfo.MimeType = importMimeType

View File

@@ -4,6 +4,8 @@ import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"mime"
@@ -14,11 +16,12 @@ import (
"testing"
"time"
"github.com/pkg/errors"
_ "github.com/rclone/rclone/backend/local"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/filter"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fs/sync"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
"github.com/rclone/rclone/lib/random"
@@ -419,11 +422,7 @@ func (f *Fs) InternalTestCopyID(t *testing.T) {
require.NoError(t, err)
o := obj.(*Object)
dir, err := ioutil.TempDir("", "rclone-drive-copyid-test")
require.NoError(t, err)
defer func() {
_ = os.RemoveAll(dir)
}()
dir := t.TempDir()
checkFile := func(name string) {
filePath := filepath.Join(dir, name)
@@ -461,6 +460,73 @@ func (f *Fs) InternalTestCopyID(t *testing.T) {
})
}
// TestIntegration/FsMkdir/FsPutFiles/Internal/AgeQuery
func (f *Fs) InternalTestAgeQuery(t *testing.T) {
opt := &filter.Opt{}
err := opt.MaxAge.Set("1h")
assert.NoError(t, err)
flt, err := filter.NewFilter(opt)
assert.NoError(t, err)
defCtx := context.Background()
fltCtx := filter.ReplaceConfig(defCtx, flt)
testCtx1 := fltCtx
testCtx2 := filter.SetUseFilter(testCtx1, true)
testCtx3, testCancel := context.WithCancel(testCtx2)
testCtx4 := filter.SetUseFilter(testCtx3, false)
testCancel()
assert.False(t, filter.GetUseFilter(testCtx1))
assert.True(t, filter.GetUseFilter(testCtx2))
assert.True(t, filter.GetUseFilter(testCtx3))
assert.False(t, filter.GetUseFilter(testCtx4))
subRemote := fmt.Sprintf("%s:%s/%s", f.Name(), f.Root(), "agequery-testdir")
subFsResult, err := fs.NewFs(defCtx, subRemote)
require.NoError(t, err)
subFs, isDriveFs := subFsResult.(*Fs)
require.True(t, isDriveFs)
tempDir1 := t.TempDir()
tempFs1, err := fs.NewFs(defCtx, tempDir1)
require.NoError(t, err)
tempDir2 := t.TempDir()
tempFs2, err := fs.NewFs(defCtx, tempDir2)
require.NoError(t, err)
file1 := fstest.Item{ModTime: time.Now(), Path: "agequery.txt"}
_, _ = fstests.PutTestContents(defCtx, t, tempFs1, &file1, "abcxyz", true)
// validate sync/copy
const timeQuery = "(modifiedTime >= '"
assert.NoError(t, sync.CopyDir(defCtx, subFs, tempFs1, false))
assert.NotContains(t, subFs.lastQuery, timeQuery)
assert.NoError(t, sync.CopyDir(fltCtx, subFs, tempFs1, false))
assert.Contains(t, subFs.lastQuery, timeQuery)
assert.NoError(t, sync.CopyDir(fltCtx, tempFs2, subFs, false))
assert.Contains(t, subFs.lastQuery, timeQuery)
assert.NoError(t, sync.CopyDir(defCtx, tempFs2, subFs, false))
assert.NotContains(t, subFs.lastQuery, timeQuery)
// validate list/walk
devNull, errOpen := os.OpenFile(os.DevNull, os.O_WRONLY, 0)
require.NoError(t, errOpen)
defer func() {
_ = devNull.Close()
}()
assert.NoError(t, operations.List(defCtx, subFs, devNull))
assert.NotContains(t, subFs.lastQuery, timeQuery)
assert.NoError(t, operations.List(fltCtx, subFs, devNull))
assert.Contains(t, subFs.lastQuery, timeQuery)
}
func (f *Fs) InternalTest(t *testing.T) {
// These tests all depend on each other so run them as nested tests
t.Run("DocumentImport", func(t *testing.T) {
@@ -478,6 +544,7 @@ func (f *Fs) InternalTest(t *testing.T) {
t.Run("Shortcuts", f.InternalTestShortcuts)
t.Run("UnTrash", f.InternalTestUnTrash)
t.Run("CopyID", f.InternalTestCopyID)
t.Run("AgeQuery", f.InternalTestAgeQuery)
}
var _ fstests.InternalTester = (*Fs)(nil)

358
backend/dropbox/batcher.go Normal file
View File

@@ -0,0 +1,358 @@
// This file contains the implementation of the sync batcher for uploads
//
// Dropbox rules say you can start as many batches as you want, but
// you may only have one batch being committed and must wait for the
// batch to be finished before committing another.
package dropbox
import (
"context"
"errors"
"fmt"
"sync"
"time"
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/async"
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/files"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/lib/atexit"
)
const (
maxBatchSize = 1000 // max size the batch can be
defaultTimeoutSync = 500 * time.Millisecond // kick off the batch if nothing added for this long (sync)
defaultTimeoutAsync = 10 * time.Second // kick off the batch if nothing added for this long (ssync)
defaultBatchSizeAsync = 100 // default batch size if async
)
// batcher holds info about the current items waiting for upload
type batcher struct {
f *Fs // Fs this batch is part of
mode string // configured batch mode
size int // maximum size for batch
timeout time.Duration // idle timeout for batch
async bool // whether we are using async batching
in chan batcherRequest // incoming items to batch
closed chan struct{} // close to indicate batcher shut down
atexit atexit.FnHandle // atexit handle
shutOnce sync.Once // make sure we shutdown once only
wg sync.WaitGroup // wait for shutdown
}
// batcherRequest holds an incoming request with a place for a reply
type batcherRequest struct {
commitInfo *files.UploadSessionFinishArg
result chan<- batcherResponse
}
// Return true if batcherRequest is the quit request
func (br *batcherRequest) isQuit() bool {
return br.commitInfo == nil
}
// Send this to get the engine to quit
var quitRequest = batcherRequest{}
// batcherResponse holds a response to be delivered to clients waiting
// for a batch to complete.
type batcherResponse struct {
err error
entry *files.FileMetadata
}
// newBatcher creates a new batcher structure
func newBatcher(ctx context.Context, f *Fs, mode string, size int, timeout time.Duration) (*batcher, error) {
// fs.Debugf(f, "Creating batcher with mode %q, size %d, timeout %v", mode, size, timeout)
if size > maxBatchSize || size < 0 {
return nil, fmt.Errorf("dropbox: batch size must be < %d and >= 0 - it is currently %d", maxBatchSize, size)
}
async := false
switch mode {
case "sync":
if size <= 0 {
ci := fs.GetConfig(ctx)
size = ci.Transfers
}
if timeout <= 0 {
timeout = defaultTimeoutSync
}
case "async":
if size <= 0 {
size = defaultBatchSizeAsync
}
if timeout <= 0 {
timeout = defaultTimeoutAsync
}
async = true
case "off":
size = 0
default:
return nil, fmt.Errorf("dropbox: batch mode must be sync|async|off not %q", mode)
}
b := &batcher{
f: f,
mode: mode,
size: size,
timeout: timeout,
async: async,
in: make(chan batcherRequest, size),
closed: make(chan struct{}),
}
if b.Batching() {
b.atexit = atexit.Register(b.Shutdown)
b.wg.Add(1)
go b.commitLoop(context.Background())
}
return b, nil
}
// Batching returns true if batching is active
func (b *batcher) Batching() bool {
return b.size > 0
}
// finishBatch commits the batch, returning a batch status to poll or maybe complete
func (b *batcher) finishBatch(ctx context.Context, items []*files.UploadSessionFinishArg) (batchStatus *files.UploadSessionFinishBatchLaunch, err error) {
var arg = &files.UploadSessionFinishBatchArg{
Entries: items,
}
err = b.f.pacer.Call(func() (bool, error) {
batchStatus, err = b.f.srv.UploadSessionFinishBatch(arg)
// If error is insufficient space then don't retry
if e, ok := err.(files.UploadSessionFinishAPIError); ok {
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.WriteErrorInsufficientSpace {
err = fserrors.NoRetryError(err)
return false, err
}
}
// after the first chunk is uploaded, we retry everything
return err != nil, err
})
if err != nil {
return nil, fmt.Errorf("batch commit failed: %w", err)
}
return batchStatus, nil
}
// finishBatchJobStatus waits for the batch to complete returning completed entries
func (b *batcher) finishBatchJobStatus(ctx context.Context, launchBatchStatus *files.UploadSessionFinishBatchLaunch) (complete *files.UploadSessionFinishBatchResult, err error) {
if launchBatchStatus.AsyncJobId == "" {
return nil, errors.New("wait for batch completion: empty job ID")
}
var batchStatus *files.UploadSessionFinishBatchJobStatus
sleepTime := 100 * time.Millisecond
const maxSleepTime = 1 * time.Second
startTime := time.Now()
try := 1
for {
remaining := time.Duration(b.f.opt.BatchCommitTimeout) - time.Since(startTime)
if remaining < 0 {
break
}
err = b.f.pacer.Call(func() (bool, error) {
batchStatus, err = b.f.srv.UploadSessionFinishBatchCheck(&async.PollArg{
AsyncJobId: launchBatchStatus.AsyncJobId,
})
return shouldRetry(ctx, err)
})
if err != nil {
fs.Debugf(b.f, "Wait for batch: sleeping for %v after error: %v: try %d remaining %v", sleepTime, err, try, remaining)
} else {
if batchStatus.Tag == "complete" {
fs.Debugf(b.f, "Upload batch completed in %v", time.Since(startTime))
return batchStatus.Complete, nil
}
fs.Debugf(b.f, "Wait for batch: sleeping for %v after status: %q: try %d remaining %v", sleepTime, batchStatus.Tag, try, remaining)
}
time.Sleep(sleepTime)
sleepTime *= 2
if sleepTime > maxSleepTime {
sleepTime = maxSleepTime
}
try++
}
if err == nil {
err = errors.New("batch didn't complete")
}
return nil, fmt.Errorf("wait for batch failed after %d tries in %v: %w", try, time.Since(startTime), err)
}
// commit a batch
func (b *batcher) commitBatch(ctx context.Context, items []*files.UploadSessionFinishArg, results []chan<- batcherResponse) (err error) {
// If commit fails then signal clients if sync
var signalled = b.async
defer func() {
if err != nil && signalled {
// Signal to clients that there was an error
for _, result := range results {
result <- batcherResponse{err: err}
}
}
}()
desc := fmt.Sprintf("%s batch length %d starting with: %s", b.mode, len(items), items[0].Commit.Path)
fs.Debugf(b.f, "Committing %s", desc)
// finalise the batch getting either a result or a job id to poll
batchStatus, err := b.finishBatch(ctx, items)
if err != nil {
return err
}
// check whether batch is complete
var complete *files.UploadSessionFinishBatchResult
switch batchStatus.Tag {
case "async_job_id":
// wait for batch to complete
complete, err = b.finishBatchJobStatus(ctx, batchStatus)
if err != nil {
return err
}
case "complete":
complete = batchStatus.Complete
default:
return fmt.Errorf("batch returned unknown status %q", batchStatus.Tag)
}
// Check we got the right number of entries
entries := complete.Entries
if len(entries) != len(results) {
return fmt.Errorf("expecting %d items in batch but got %d", len(results), len(entries))
}
// Report results to clients
var (
errorTag = ""
errorCount = 0
)
for i := range results {
item := entries[i]
resp := batcherResponse{}
if item.Tag == "success" {
resp.entry = item.Success
} else {
errorCount++
errorTag = item.Tag
if item.Failure != nil {
errorTag = item.Failure.Tag
if item.Failure.LookupFailed != nil {
errorTag += "/" + item.Failure.LookupFailed.Tag
}
if item.Failure.Path != nil {
errorTag += "/" + item.Failure.Path.Tag
}
if item.Failure.PropertiesError != nil {
errorTag += "/" + item.Failure.PropertiesError.Tag
}
}
resp.err = fmt.Errorf("batch upload failed: %s", errorTag)
}
if !b.async {
results[i] <- resp
}
}
// Show signalled so no need to report error to clients from now on
signalled = true
// Report an error if any failed in the batch
if errorTag != "" {
return fmt.Errorf("batch had %d errors: last error: %s", errorCount, errorTag)
}
fs.Debugf(b.f, "Committed %s", desc)
return nil
}
// commitLoop runs the commit engine in the background
func (b *batcher) commitLoop(ctx context.Context) {
var (
items []*files.UploadSessionFinishArg // current batch of uncommitted files
results []chan<- batcherResponse // current batch of clients awaiting results
idleTimer = time.NewTimer(b.timeout)
commit = func() {
err := b.commitBatch(ctx, items, results)
if err != nil {
fs.Errorf(b.f, "%s batch commit: failed to commit batch length %d: %v", b.mode, len(items), err)
}
items, results = nil, nil
}
)
defer b.wg.Done()
defer idleTimer.Stop()
idleTimer.Stop()
outer:
for {
select {
case req := <-b.in:
if req.isQuit() {
break outer
}
items = append(items, req.commitInfo)
results = append(results, req.result)
idleTimer.Stop()
if len(items) >= b.size {
commit()
} else {
idleTimer.Reset(b.timeout)
}
case <-idleTimer.C:
if len(items) > 0 {
fs.Debugf(b.f, "Batch idle for %v so committing", b.timeout)
commit()
}
}
}
// commit any remaining items
if len(items) > 0 {
commit()
}
}
// Shutdown finishes any pending batches then shuts everything down
//
// Can be called from atexit handler
func (b *batcher) Shutdown() {
b.shutOnce.Do(func() {
atexit.Unregister(b.atexit)
fs.Infof(b.f, "Commiting uploads - please wait...")
// show that batcher is shutting down
close(b.closed)
// quit the commitLoop by sending a quitRequest message
//
// Note that we don't close b.in because that will
// cause write to closed channel in Commit when we are
// exiting due to a signal.
b.in <- quitRequest
b.wg.Wait()
})
}
// Commit commits the file using a batch call, first adding it to the
// batch and then waiting for the batch to complete in a synchronous
// way if async is not set.
func (b *batcher) Commit(ctx context.Context, commitInfo *files.UploadSessionFinishArg) (entry *files.FileMetadata, err error) {
select {
case <-b.closed:
return nil, fserrors.FatalError(errors.New("batcher is shutting down"))
default:
}
fs.Debugf(b.f, "Adding %q to batch", commitInfo.Commit.Path)
resp := make(chan batcherResponse, 1)
b.in <- batcherRequest{
commitInfo: commitInfo,
result: resp,
}
// If running async then don't wait for the result
if b.async {
return nil, nil
}
result := <-resp
return result.entry, result.err
}

349
backend/dropbox/dropbox.go Executable file → Normal file
View File

@@ -23,23 +23,22 @@ of path_display and all will be well.
import (
"context"
"errors"
"fmt"
"io"
"log"
"path"
"regexp"
"strings"
"time"
"unicode/utf8"
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox"
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/auth"
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/common"
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/files"
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/sharing"
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/team"
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/users"
"github.com/pkg/errors"
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox"
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/auth"
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/common"
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/files"
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/sharing"
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/team"
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/users"
"github.com/rclone/rclone/backend/dropbox/dbhash"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
@@ -65,9 +64,9 @@ const (
// Upload chunk size - setting too small makes uploads slow.
// Chunks are buffered into memory for retries.
//
// Speed vs chunk size uploading a 1 GB file on 2017-11-22
// Speed vs chunk size uploading a 1 GiB file on 2017-11-22
//
// Chunk Size MB, Speed Mbyte/s, % of max
// Chunk Size MiB, Speed MiB/s, % of max
// 1 1.364 11%
// 2 2.443 19%
// 4 4.288 33%
@@ -82,11 +81,11 @@ const (
// 96 12.302 95%
// 128 12.945 100%
//
// Choose 48MB which is 91% of Maximum speed. rclone by
// default does 4 transfers so this should use 4*48MB = 192MB
// Choose 48 MiB which is 91% of Maximum speed. rclone by
// default does 4 transfers so this should use 4*48 MiB = 192 MiB
// by default.
defaultChunkSize = 48 * fs.MebiByte
maxChunkSize = 150 * fs.MebiByte
defaultChunkSize = 48 * fs.Mebi
maxChunkSize = 150 * fs.Mebi
// Max length of filename parts: https://help.dropbox.com/installs-integrations/sync-uploads/files-not-syncing
maxFileNameLength = 255
)
@@ -139,32 +138,29 @@ func getOauthConfig(m configmap.Mapper) *oauth2.Config {
// Register with Fs
func init() {
DbHashType = hash.RegisterHash("DropboxHash", 64, dbhash.New)
DbHashType = hash.RegisterHash("dropbox", "DropboxHash", 64, dbhash.New)
fs.Register(&fs.RegInfo{
Name: "dropbox",
Description: "Dropbox",
NewFs: NewFs,
Config: func(ctx context.Context, name string, m configmap.Mapper) {
opt := oauthutil.Options{
NoOffline: true,
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
return oauthutil.ConfigOut("", &oauthutil.Options{
OAuth2Config: getOauthConfig(m),
NoOffline: true,
OAuth2Opts: []oauth2.AuthCodeOption{
oauth2.SetAuthURLParam("token_access_type", "offline"),
},
}
err := oauthutil.Config(ctx, "dropbox", name, m, getOauthConfig(m), &opt)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
})
},
Options: append(oauthutil.SharedOptions, []fs.Option{{
Name: "chunk_size",
Help: fmt.Sprintf(`Upload chunk size. (< %v).
Help: fmt.Sprintf(`Upload chunk size (< %v).
Any files larger than this will be uploaded in chunks of this size.
Note that chunks are buffered in memory (one at a time) so rclone can
deal with retries. Setting this larger will increase the speed
slightly (at most 10%% for 128MB in tests) at the cost of using more
slightly (at most 10%% for 128 MiB in tests) at the cost of using more
memory. It can be set smaller if you are tight on memory.`, maxChunkSize),
Default: defaultChunkSize,
Advanced: true,
@@ -213,6 +209,68 @@ Note that we don't unmount the shared folder afterwards so the
shared folder.`,
Default: false,
Advanced: true,
}, {
Name: "batch_mode",
Help: `Upload file batching sync|async|off.
This sets the batch mode used by rclone.
For full info see [the main docs](https://rclone.org/dropbox/#batch-mode)
This has 3 possible values
- off - no batching
- sync - batch uploads and check completion (default)
- async - batch upload and don't check completion
Rclone will close any outstanding batches when it exits which may make
a delay on quit.
`,
Default: "sync",
Advanced: true,
}, {
Name: "batch_size",
Help: `Max number of files in upload batch.
This sets the batch size of files to upload. It has to be less than 1000.
By default this is 0 which means rclone which calculate the batch size
depending on the setting of batch_mode.
- batch_mode: async - default batch_size is 100
- batch_mode: sync - default batch_size is the same as --transfers
- batch_mode: off - not in use
Rclone will close any outstanding batches when it exits which may make
a delay on quit.
Setting this is a great idea if you are uploading lots of small files
as it will make them a lot quicker. You can use --transfers 32 to
maximise throughput.
`,
Default: 0,
Advanced: true,
}, {
Name: "batch_timeout",
Help: `Max time to allow an idle upload batch before uploading.
If an upload batch is idle for more than this long then it will be
uploaded.
The default for this is 0 which means rclone will choose a sensible
default based on the batch_mode in use.
- batch_mode: async - default batch_timeout is 500ms
- batch_mode: sync - default batch_timeout is 10s
- batch_mode: off - not in use
`,
Default: fs.Duration(0),
Advanced: true,
}, {
Name: "batch_commit_timeout",
Help: `Max time to wait for a batch to finish comitting`,
Default: fs.Duration(10 * time.Minute),
Advanced: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
@@ -232,11 +290,16 @@ shared folder.`,
// Options defines the configuration for this backend
type Options struct {
ChunkSize fs.SizeSuffix `config:"chunk_size"`
Impersonate string `config:"impersonate"`
SharedFiles bool `config:"shared_files"`
SharedFolders bool `config:"shared_folders"`
Enc encoder.MultiEncoder `config:"encoding"`
ChunkSize fs.SizeSuffix `config:"chunk_size"`
Impersonate string `config:"impersonate"`
SharedFiles bool `config:"shared_files"`
SharedFolders bool `config:"shared_folders"`
BatchMode string `config:"batch_mode"`
BatchSize int `config:"batch_size"`
BatchTimeout fs.Duration `config:"batch_timeout"`
BatchCommitTimeout fs.Duration `config:"batch_commit_timeout"`
AsyncBatch bool `config:"async_batch"`
Enc encoder.MultiEncoder `config:"encoding"`
}
// Fs represents a remote dropbox server
@@ -255,6 +318,7 @@ type Fs struct {
slashRootSlash string // root with "/" prefix and postfix, lowercase
pacer *fs.Pacer // To pace the API calls
ns string // The namespace we are using or "" for none
batcher *batcher // batch builder
}
// Object describes a dropbox object
@@ -270,8 +334,6 @@ type Object struct {
hash string // content_hash of the object
}
// ------------------------------------------------------------
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string {
return f.name
@@ -301,36 +363,36 @@ func shouldRetry(ctx context.Context, err error) (bool, error) {
if err == nil {
return false, err
}
baseErrString := errors.Cause(err).Error()
errString := err.Error()
// First check for specific errors
if strings.Contains(baseErrString, "insufficient_space") {
if strings.Contains(errString, "insufficient_space") {
return false, fserrors.FatalError(err)
} else if strings.Contains(baseErrString, "malformed_path") {
} else if strings.Contains(errString, "malformed_path") {
return false, fserrors.NoRetryError(err)
}
// Then handle any official Retry-After header from Dropbox's SDK
switch e := err.(type) {
case auth.RateLimitAPIError:
if e.RateLimitError.RetryAfter > 0 {
fs.Logf(baseErrString, "Too many requests or write operations. Trying again in %d seconds.", e.RateLimitError.RetryAfter)
fs.Logf(errString, "Too many requests or write operations. Trying again in %d seconds.", e.RateLimitError.RetryAfter)
err = pacer.RetryAfterError(err, time.Duration(e.RateLimitError.RetryAfter)*time.Second)
}
return true, err
}
// Keep old behavior for backward compatibility
if strings.Contains(baseErrString, "too_many_write_operations") || strings.Contains(baseErrString, "too_many_requests") || baseErrString == "" {
if strings.Contains(errString, "too_many_write_operations") || strings.Contains(errString, "too_many_requests") || errString == "" {
return true, err
}
return fserrors.ShouldRetry(err), err
}
func checkUploadChunkSize(cs fs.SizeSuffix) error {
const minChunkSize = fs.Byte
const minChunkSize = fs.SizeSuffixBase
if cs < minChunkSize {
return errors.Errorf("%s is less than %s", cs, minChunkSize)
return fmt.Errorf("%s is less than %s", cs, minChunkSize)
}
if cs > maxChunkSize {
return errors.Errorf("%s is greater than %s", cs, maxChunkSize)
return fmt.Errorf("%s is greater than %s", cs, maxChunkSize)
}
return nil
}
@@ -353,7 +415,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
}
err = checkUploadChunkSize(opt.ChunkSize)
if err != nil {
return nil, errors.Wrap(err, "dropbox: chunk size")
return nil, fmt.Errorf("dropbox: chunk size: %w", err)
}
// Convert the old token if it exists. The old token was just
@@ -365,13 +427,13 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
newToken := fmt.Sprintf(`{"access_token":"%s","token_type":"bearer","expiry":"0001-01-01T00:00:00Z"}`, oldToken)
err := config.SetValueAndSave(name, config.ConfigToken, newToken)
if err != nil {
return nil, errors.Wrap(err, "NewFS convert token")
return nil, fmt.Errorf("NewFS convert token: %w", err)
}
}
oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, getOauthConfig(m))
if err != nil {
return nil, errors.Wrap(err, "failed to configure dropbox")
return nil, fmt.Errorf("failed to configure dropbox: %w", err)
}
ci := fs.GetConfig(ctx)
@@ -382,6 +444,10 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
ci: ci,
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
}
f.batcher, err = newBatcher(ctx, f, f.opt.BatchMode, f.opt.BatchSize, time.Duration(f.opt.BatchTimeout))
if err != nil {
return nil, err
}
cfg := dropbox.Config{
LogLevel: dropbox.LogOff, // logging in the SDK: LogOff, LogDebug, LogInfo
Client: oAuthClient, // maybe???
@@ -408,7 +474,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
memberIds, err := f.team.MembersGetInfo(args)
if err != nil {
return nil, errors.Wrapf(err, "invalid dropbox team member: %q", opt.Impersonate)
return nil, fmt.Errorf("invalid dropbox team member: %q: %w", opt.Impersonate, err)
}
cfg.AsMemberID = memberIds[0].MemberInfo.Profile.MemberProfile.TeamMemberId
@@ -485,7 +551,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
return shouldRetry(ctx, err)
})
if err != nil {
return nil, errors.Wrap(err, "get current account failed")
return nil, fmt.Errorf("get current account failed: %w", err)
}
switch x := acc.RootInfo.(type) {
case *common.TeamRootInfo:
@@ -493,28 +559,30 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
case *common.UserRootInfo:
f.ns = x.RootNamespaceId
default:
return nil, errors.Errorf("unknown RootInfo type %v %T", acc.RootInfo, acc.RootInfo)
return nil, fmt.Errorf("unknown RootInfo type %v %T", acc.RootInfo, acc.RootInfo)
}
fs.Debugf(f, "Using root namespace %q", f.ns)
}
f.setRoot(root)
// See if the root is actually an object
_, err = f.getFileMetadata(ctx, f.slashRoot)
if err == nil {
newRoot := path.Dir(f.root)
if newRoot == "." {
newRoot = ""
if f.root != "" {
_, err = f.getFileMetadata(ctx, f.slashRoot)
if err == nil {
newRoot := path.Dir(f.root)
if newRoot == "." {
newRoot = ""
}
f.setRoot(newRoot)
// return an error with an fs which points to the parent
return f, fs.ErrorIsFile
}
f.setRoot(newRoot)
// return an error with an fs which points to the parent
return f, fs.ErrorIsFile
}
return f, nil
}
// headerGenerator for dropbox sdk
func (f *Fs) headerGenerator(hostType string, style string, namespace string, route string) map[string]string {
func (f *Fs) headerGenerator(hostType string, namespace string, route string) map[string]string {
if f.ns == "" {
return map[string]string{}
}
@@ -564,6 +632,9 @@ func (f *Fs) getFileMetadata(ctx context.Context, filePath string) (fileInfo *fi
}
fileInfo, ok := entry.(*files.FileMetadata)
if !ok {
if _, ok = entry.(*files.FolderMetadata); ok {
return nil, fs.ErrorIsDir
}
return nil, fs.ErrorNotAFile
}
return fileInfo, nil
@@ -641,7 +712,7 @@ func (f *Fs) listSharedFolders(ctx context.Context) (entries fs.DirEntries, err
return shouldRetry(ctx, err)
})
if err != nil {
return nil, errors.Wrap(err, "list continue")
return nil, fmt.Errorf("list continue: %w", err)
}
}
for _, entry := range res.Entries {
@@ -715,7 +786,7 @@ func (f *Fs) listReceivedFiles(ctx context.Context) (entries fs.DirEntries, err
return shouldRetry(ctx, err)
})
if err != nil {
return nil, errors.Wrap(err, "list continue")
return nil, fmt.Errorf("list continue: %w", err)
}
}
for _, entry := range res.Entries {
@@ -725,7 +796,7 @@ func (f *Fs) listReceivedFiles(ctx context.Context) (entries fs.DirEntries, err
fs: f,
url: entry.PreviewUrl,
remote: entryPath,
modTime: entry.TimeInvited,
modTime: *entry.TimeInvited,
}
if err != nil {
return nil, err
@@ -781,6 +852,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
arg := files.ListFolderArg{
Path: f.opt.Enc.FromStandardPath(root),
Recursive: false,
Limit: 1000,
}
if root == "/" {
arg.Path = "" // Specify root folder as empty string
@@ -808,7 +880,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
return shouldRetry(ctx, err)
})
if err != nil {
return nil, errors.Wrap(err, "list continue")
return nil, fmt.Errorf("list continue: %w", err)
}
}
for _, entry := range res.Entries {
@@ -920,7 +992,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error)
// check directory exists
_, err = f.getDirMetadata(ctx, root)
if err != nil {
return errors.Wrap(err, "Rmdir")
return fmt.Errorf("Rmdir: %w", err)
}
root = f.opt.Enc.FromStandardPath(root)
@@ -938,7 +1010,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error)
return shouldRetry(ctx, err)
})
if err != nil {
return errors.Wrap(err, "Rmdir")
return fmt.Errorf("Rmdir: %w", err)
}
if len(res.Entries) != 0 {
return errors.New("directory not empty")
@@ -1004,7 +1076,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
return shouldRetry(ctx, err)
})
if err != nil {
return nil, errors.Wrap(err, "copy failed")
return nil, fmt.Errorf("copy failed: %w", err)
}
// Set the metadata
@@ -1014,7 +1086,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
}
err = dstObj.setMetadataFromEntry(fileInfo)
if err != nil {
return nil, errors.Wrap(err, "copy failed")
return nil, fmt.Errorf("copy failed: %w", err)
}
return dstObj, nil
@@ -1065,7 +1137,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
return shouldRetry(ctx, err)
})
if err != nil {
return nil, errors.Wrap(err, "move failed")
return nil, fmt.Errorf("move failed: %w", err)
}
// Set the metadata
@@ -1075,7 +1147,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
}
err = dstObj.setMetadataFromEntry(fileInfo)
if err != nil {
return nil, errors.Wrap(err, "move failed")
return nil, fmt.Errorf("move failed: %w", err)
}
return dstObj, nil
}
@@ -1100,14 +1172,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
}
if expire < fs.DurationOff {
expiryTime := time.Now().Add(time.Duration(expire)).UTC().Round(time.Second)
createArg.Settings.Expires = expiryTime
}
// FIXME note we can't set Settings for non enterprise dropbox
// because of https://github.com/dropbox/dropbox-sdk-go-unofficial/issues/75
// however this only goes wrong when we set Expires, so as a
// work-around remove Settings unless expire is set.
if expire == fs.DurationOff {
createArg.Settings = nil
createArg.Settings.Expires = &expiryTime
}
var linkRes sharing.IsSharedLinkMetadata
@@ -1190,7 +1255,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
return shouldRetry(ctx, err)
})
if err != nil {
return errors.Wrap(err, "MoveDir failed")
return fmt.Errorf("MoveDir failed: %w", err)
}
return nil
@@ -1204,7 +1269,7 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
return shouldRetry(ctx, err)
})
if err != nil {
return nil, errors.Wrap(err, "about failed")
return nil, fmt.Errorf("about failed: %w", err)
}
var total uint64
if q.Allocation != nil {
@@ -1344,7 +1409,7 @@ func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.
return shouldRetry(ctx, err)
})
if err != nil {
return "", errors.Wrap(err, "list continue")
return "", fmt.Errorf("list continue: %w", err)
}
cursor = changeList.Cursor
var entryType fs.EntryType
@@ -1381,6 +1446,13 @@ func (f *Fs) Hashes() hash.Set {
return hash.Set(DbHashType)
}
// Shutdown the backend, closing any background tasks and any
// cached connections.
func (f *Fs) Shutdown(ctx context.Context) error {
f.batcher.Shutdown()
return nil
}
// ------------------------------------------------------------
// Fs returns the parent Fs
@@ -1416,7 +1488,7 @@ func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
}
err := o.readMetaData(ctx)
if err != nil {
return "", errors.Wrap(err, "failed to read hash from metadata")
return "", fmt.Errorf("failed to read hash from metadata: %w", err)
}
return o.hash, nil
}
@@ -1540,97 +1612,83 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
// uploadChunked uploads the object in parts
//
// Will work optimally if size is >= uploadChunkSize. If the size is either
// unknown (i.e. -1) or smaller than uploadChunkSize, the method incurs an
// avoidable request to the Dropbox API that does not carry payload.
// Will introduce two additional network requests to start and finish the session.
// If the size is unknown (i.e. -1) the method incurs one additional
// request to the Dropbox API that does not carry a payload to close the append session.
func (o *Object) uploadChunked(ctx context.Context, in0 io.Reader, commitInfo *files.CommitInfo, size int64) (entry *files.FileMetadata, err error) {
chunkSize := int64(o.fs.opt.ChunkSize)
chunks := 0
if size != -1 {
chunks = int(size/chunkSize) + 1
}
in := readers.NewCountingReader(in0)
buf := make([]byte, int(chunkSize))
fmtChunk := func(cur int, last bool) {
if chunks == 0 && last {
fs.Debugf(o, "Streaming chunk %d/%d", cur, cur)
} else if chunks == 0 {
fs.Debugf(o, "Streaming chunk %d/unknown", cur)
} else {
fs.Debugf(o, "Uploading chunk %d/%d", cur, chunks)
}
}
// write the first chunk
fmtChunk(1, false)
// start upload
var res *files.UploadSessionStartResult
chunk := readers.NewRepeatableLimitReaderBuffer(in, buf, chunkSize)
err = o.fs.pacer.Call(func() (bool, error) {
// seek to the start in case this is a retry
if _, err = chunk.Seek(0, io.SeekStart); err != nil {
return false, nil
}
res, err = o.fs.srv.UploadSessionStart(&files.UploadSessionStartArg{}, chunk)
res, err = o.fs.srv.UploadSessionStart(&files.UploadSessionStartArg{}, nil)
return shouldRetry(ctx, err)
})
if err != nil {
return nil, err
}
chunkSize := int64(o.fs.opt.ChunkSize)
chunks, remainder := size/chunkSize, size%chunkSize
if remainder > 0 {
chunks++
}
// write chunks
in := readers.NewCountingReader(in0)
buf := make([]byte, int(chunkSize))
cursor := files.UploadSessionCursor{
SessionId: res.SessionId,
Offset: 0,
}
appendArg := files.UploadSessionAppendArg{
Cursor: &cursor,
Close: false,
}
// write more whole chunks (if any)
currentChunk := 2
for {
if chunks > 0 && currentChunk >= chunks {
// if the size is known, only upload full chunks. Remaining bytes are uploaded with
// the UploadSessionFinish request.
break
} else if chunks == 0 && in.BytesRead()-cursor.Offset < uint64(chunkSize) {
// if the size is unknown, upload as long as we can read full chunks from the reader.
// The UploadSessionFinish request will not contain any payload.
break
}
appendArg := files.UploadSessionAppendArg{Cursor: &cursor}
for currentChunk := 1; ; currentChunk++ {
cursor.Offset = in.BytesRead()
fmtChunk(currentChunk, false)
chunk = readers.NewRepeatableLimitReaderBuffer(in, buf, chunkSize)
if chunks < 0 {
fs.Debugf(o, "Streaming chunk %d/unknown", currentChunk)
} else {
fs.Debugf(o, "Uploading chunk %d/%d", currentChunk, chunks)
}
chunk := readers.NewRepeatableLimitReaderBuffer(in, buf, chunkSize)
err = o.fs.pacer.Call(func() (bool, error) {
// seek to the start in case this is a retry
if _, err = chunk.Seek(0, io.SeekStart); err != nil {
return false, nil
}
err = o.fs.srv.UploadSessionAppendV2(&appendArg, chunk)
// after the first chunk is uploaded, we retry everything
// after session is started, we retry everything
return err != nil, err
})
if err != nil {
return nil, err
}
currentChunk++
if appendArg.Close {
break
}
if size > 0 {
// if size is known, check if next chunk is final
appendArg.Close = uint64(size)-in.BytesRead() <= uint64(chunkSize)
} else {
// if size is unknown, upload as long as we can read full chunks from the reader
appendArg.Close = in.BytesRead()-cursor.Offset < uint64(chunkSize)
}
}
// write the remains
// finish upload
cursor.Offset = in.BytesRead()
args := &files.UploadSessionFinishArg{
Cursor: &cursor,
Commit: commitInfo,
}
fmtChunk(currentChunk, true)
chunk = readers.NewRepeatableReaderBuffer(in, buf)
// If we are batching then we should have written all the data now
// store the commit info now for a batch commit
if o.fs.batcher.Batching() {
return o.fs.batcher.Commit(ctx, args)
}
err = o.fs.pacer.Call(func() (bool, error) {
// seek to the start in case this is a retry
if _, err = chunk.Seek(0, io.SeekStart); err != nil {
return false, nil
}
entry, err = o.fs.srv.UploadSessionFinish(args, chunk)
entry, err = o.fs.srv.UploadSessionFinish(args, nil)
// If error is insufficient space then don't retry
if e, ok := err.(files.UploadSessionFinishAPIError); ok {
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.WriteErrorInsufficientSpace {
@@ -1683,12 +1741,13 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
}
remote := o.remotePath()
if ignoredFiles.MatchString(remote) {
return fserrors.NoRetryError(errors.Errorf("file name %q is disallowed - not uploading", path.Base(remote)))
return fserrors.NoRetryError(fmt.Errorf("file name %q is disallowed - not uploading", path.Base(remote)))
}
commitInfo := files.NewCommitInfo(o.fs.opt.Enc.FromStandardPath(o.remotePath()))
commitInfo.Mode.Tag = "overwrite"
// The Dropbox API only accepts timestamps in UTC with second precision.
commitInfo.ClientModified = src.ModTime(ctx).UTC().Round(time.Second)
clientModified := src.ModTime(ctx).UTC().Round(time.Second)
commitInfo.ClientModified = &clientModified
// Don't attempt to create filenames that are too long
if cErr := checkPathLength(commitInfo.Path); cErr != nil {
return cErr
@@ -1697,7 +1756,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
size := src.Size()
var err error
var entry *files.FileMetadata
if size > int64(o.fs.opt.ChunkSize) || size == -1 {
if size > int64(o.fs.opt.ChunkSize) || size < 0 || o.fs.batcher.Batching() {
entry, err = o.uploadChunked(ctx, in, commitInfo, size)
} else {
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
@@ -1706,7 +1765,16 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
})
}
if err != nil {
return errors.Wrap(err, "upload failed")
return fmt.Errorf("upload failed: %w", err)
}
// If we haven't received data back from batch upload then fake it
//
// This will only happen if we are uploading async batches
if entry == nil {
o.bytes = size
o.modTime = *commitInfo.ClientModified
o.hash = "" // we don't have this
return nil
}
return o.setMetadataFromEntry(entry)
}
@@ -1735,6 +1803,7 @@ var (
_ fs.PublicLinker = (*Fs)(nil)
_ fs.DirMover = (*Fs)(nil)
_ fs.Abouter = (*Fs)(nil)
_ fs.Shutdowner = &Fs{}
_ fs.Object = (*Object)(nil)
_ fs.IDer = (*Object)(nil)
)

View File

@@ -2,14 +2,16 @@ package fichier
import (
"context"
"errors"
"fmt"
"io"
"net/http"
"net/url"
"regexp"
"strconv"
"strings"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/lib/rest"
@@ -80,16 +82,22 @@ func (f *Fs) readFileInfo(ctx context.Context, url string) (*File, error) {
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't read file info")
return nil, fmt.Errorf("couldn't read file info: %w", err)
}
return &file, err
}
// maybe do some actual validation later if necessary
func validToken(token *GetTokenResponse) bool {
return token.Status == "OK"
}
func (f *Fs) getDownloadToken(ctx context.Context, url string) (*GetTokenResponse, error) {
request := DownloadRequest{
URL: url,
Single: 1,
Pass: f.opt.FilePassword,
}
opts := rest.Opts{
Method: "POST",
@@ -99,10 +107,11 @@ func (f *Fs) getDownloadToken(ctx context.Context, url string) (*GetTokenRespons
var token GetTokenResponse
err := f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, &request, &token)
return shouldRetry(ctx, resp, err)
doretry, err := shouldRetry(ctx, resp, err)
return doretry || !validToken(&token), err
})
if err != nil {
return nil, errors.Wrap(err, "couldn't list files")
return nil, fmt.Errorf("couldn't list files: %w", err)
}
return &token, nil
@@ -118,10 +127,16 @@ func fileFromSharedFile(file *SharedFile) File {
func (f *Fs) listSharedFiles(ctx context.Context, id string) (entries fs.DirEntries, err error) {
opts := rest.Opts{
Method: "GET",
RootURL: "https://1fichier.com/dir/",
Path: id,
Parameters: map[string][]string{"json": {"1"}},
Method: "GET",
RootURL: "https://1fichier.com/dir/",
Path: id,
Parameters: map[string][]string{"json": {"1"}},
ContentType: "application/x-www-form-urlencoded",
}
if f.opt.FolderPassword != "" {
opts.Method = "POST"
opts.Parameters = nil
opts.Body = strings.NewReader("json=1&pass=" + url.QueryEscape(f.opt.FolderPassword))
}
var sharedFiles SharedFolderResponse
@@ -130,7 +145,7 @@ func (f *Fs) listSharedFiles(ctx context.Context, id string) (entries fs.DirEntr
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't list files")
return nil, fmt.Errorf("couldn't list files: %w", err)
}
entries = make([]fs.DirEntry, len(sharedFiles))
@@ -159,7 +174,7 @@ func (f *Fs) listFiles(ctx context.Context, directoryID int) (filesList *FilesLi
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't list files")
return nil, fmt.Errorf("couldn't list files: %w", err)
}
for i := range filesList.Items {
item := &filesList.Items[i]
@@ -187,7 +202,7 @@ func (f *Fs) listFolders(ctx context.Context, directoryID int) (foldersList *Fol
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't list folders")
return nil, fmt.Errorf("couldn't list folders: %w", err)
}
foldersList.Name = f.opt.Enc.ToStandardName(foldersList.Name)
for i := range foldersList.SubFolders {
@@ -281,7 +296,7 @@ func (f *Fs) makeFolder(ctx context.Context, leaf string, folderID int) (respons
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't create folder")
return nil, fmt.Errorf("couldn't create folder: %w", err)
}
// fs.Debugf(f, "Created Folder `%s` in id `%s`", name, directoryID)
@@ -308,10 +323,10 @@ func (f *Fs) removeFolder(ctx context.Context, name string, folderID int) (respo
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't remove folder")
return nil, fmt.Errorf("couldn't remove folder: %w", err)
}
if response.Status != "OK" {
return nil, errors.New("Can't remove non-empty dir")
return nil, fmt.Errorf("can't remove folder: %s", response.Message)
}
// fs.Debugf(f, "Removed Folder with id `%s`", directoryID)
@@ -338,7 +353,7 @@ func (f *Fs) deleteFile(ctx context.Context, url string) (response *GenericOKRes
})
if err != nil {
return nil, errors.Wrap(err, "couldn't remove file")
return nil, fmt.Errorf("couldn't remove file: %w", err)
}
// fs.Debugf(f, "Removed file with url `%s`", url)
@@ -365,7 +380,7 @@ func (f *Fs) moveFile(ctx context.Context, url string, folderID int, rename stri
})
if err != nil {
return nil, errors.Wrap(err, "couldn't copy file")
return nil, fmt.Errorf("couldn't copy file: %w", err)
}
return response, nil
@@ -390,7 +405,35 @@ func (f *Fs) copyFile(ctx context.Context, url string, folderID int, rename stri
})
if err != nil {
return nil, errors.Wrap(err, "couldn't copy file")
return nil, fmt.Errorf("couldn't copy file: %w", err)
}
return response, nil
}
func (f *Fs) renameFile(ctx context.Context, url string, newName string) (response *RenameFileResponse, err error) {
request := &RenameFileRequest{
URLs: []RenameFileURL{
{
URL: url,
Filename: newName,
},
},
}
opts := rest.Opts{
Method: "POST",
Path: "/file/rename.cgi",
}
response = &RenameFileResponse{}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, request, response)
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, fmt.Errorf("couldn't rename file: %w", err)
}
return response, nil
@@ -411,7 +454,7 @@ func (f *Fs) getUploadNode(ctx context.Context) (response *GetUploadNodeResponse
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "didnt got an upload node")
return nil, fmt.Errorf("didnt got an upload node: %w", err)
}
// fs.Debugf(f, "Got Upload node")
@@ -455,7 +498,7 @@ func (f *Fs) uploadFile(ctx context.Context, in io.Reader, size int64, fileName,
})
if err != nil {
return nil, errors.Wrap(err, "couldn't upload file")
return nil, fmt.Errorf("couldn't upload file: %w", err)
}
// fs.Debugf(f, "Uploaded File `%s`", fileName)
@@ -489,7 +532,7 @@ func (f *Fs) endUpload(ctx context.Context, uploadID string, nodeurl string) (re
})
if err != nil {
return nil, errors.Wrap(err, "couldn't finish file upload")
return nil, fmt.Errorf("couldn't finish file upload: %w", err)
}
return response, err

View File

@@ -2,6 +2,7 @@ package fichier
import (
"context"
"errors"
"fmt"
"io"
"net/http"
@@ -9,7 +10,6 @@ import (
"strings"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
@@ -35,17 +35,24 @@ func init() {
fs.Register(&fs.RegInfo{
Name: "fichier",
Description: "1Fichier",
Config: func(ctx context.Context, name string, config configmap.Mapper) {
},
NewFs: NewFs,
NewFs: NewFs,
Options: []fs.Option{{
Help: "Your API Key, get it from https://1fichier.com/console/params.pl",
Help: "Your API Key, get it from https://1fichier.com/console/params.pl.",
Name: "api_key",
}, {
Help: "If you want to download a shared folder, add this parameter",
Help: "If you want to download a shared folder, add this parameter.",
Name: "shared_folder",
Required: false,
Advanced: true,
}, {
Help: "If you want to download a shared file that is password protected, add this parameter.",
Name: "file_password",
Advanced: true,
IsPassword: true,
}, {
Help: "If you want to list the files in a shared folder that is password protected, add this parameter.",
Name: "folder_password",
Advanced: true,
IsPassword: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
@@ -77,9 +84,11 @@ func init() {
// Options defines the configuration for this backend
type Options struct {
APIKey string `config:"api_key"`
SharedFolder string `config:"shared_folder"`
Enc encoder.MultiEncoder `config:"encoding"`
APIKey string `config:"api_key"`
SharedFolder string `config:"shared_folder"`
FilePassword string `config:"file_password"`
FolderPassword string `config:"folder_password"`
Enc encoder.MultiEncoder `config:"encoding"`
}
// Fs is the interface a cloud storage system must provide
@@ -425,25 +434,45 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
return nil, fs.ErrorCantMove
}
// Find current directory ID
_, currentDirectoryID, err := f.dirCache.FindPath(ctx, remote, false)
if err != nil {
return nil, err
}
// Create temporary object
dstObj, leaf, directoryID, err := f.createObject(ctx, remote)
if err != nil {
return nil, err
}
folderID, err := strconv.Atoi(directoryID)
if err != nil {
return nil, err
}
resp, err := f.moveFile(ctx, srcObj.file.URL, folderID, leaf)
if err != nil {
return nil, errors.Wrap(err, "couldn't move file")
}
if resp.Status != "OK" {
return nil, errors.New("couldn't move file")
// If it is in the correct directory, just rename it
var url string
if currentDirectoryID == directoryID {
resp, err := f.renameFile(ctx, srcObj.file.URL, leaf)
if err != nil {
return nil, fmt.Errorf("couldn't rename file: %w", err)
}
if resp.Status != "OK" {
return nil, fmt.Errorf("couldn't rename file: %s", resp.Message)
}
url = resp.URLs[0].URL
} else {
folderID, err := strconv.Atoi(directoryID)
if err != nil {
return nil, err
}
resp, err := f.moveFile(ctx, srcObj.file.URL, folderID, leaf)
if err != nil {
return nil, fmt.Errorf("couldn't move file: %w", err)
}
if resp.Status != "OK" {
return nil, fmt.Errorf("couldn't move file: %s", resp.Message)
}
url = resp.URLs[0]
}
file, err := f.readFileInfo(ctx, resp.URLs[0])
file, err := f.readFileInfo(ctx, url)
if err != nil {
return nil, errors.New("couldn't read file data")
}
@@ -471,10 +500,10 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
}
resp, err := f.copyFile(ctx, srcObj.file.URL, folderID, leaf)
if err != nil {
return nil, errors.Wrap(err, "couldn't move file")
return nil, fmt.Errorf("couldn't move file: %w", err)
}
if resp.Status != "OK" {
return nil, errors.New("couldn't move file")
return nil, fmt.Errorf("couldn't move file: %s", resp.Message)
}
file, err := f.readFileInfo(ctx, resp.URLs[0].ToURL)
@@ -485,6 +514,32 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
return dstObj, nil
}
// About gets quota information
func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
opts := rest.Opts{
Method: "POST",
Path: "/user/info.cgi",
ContentType: "application/json",
}
var accountInfo AccountInfo
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.rest.CallJSON(ctx, &opts, nil, &accountInfo)
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, fmt.Errorf("failed to read user info: %w", err)
}
// FIXME max upload size would be useful to use in Update
usage = &fs.Usage{
Used: fs.NewUsageValue(accountInfo.ColdStorage), // bytes in use
Total: fs.NewUsageValue(accountInfo.AvailableColdStorage), // bytes total
Free: fs.NewUsageValue(accountInfo.AvailableColdStorage - accountInfo.ColdStorage), // bytes free
}
return usage, nil
}
// PublicLink adds a "readable by anyone with link" permission on the given file or folder.
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (string, error) {
o, err := f.NewObject(ctx, remote)

View File

@@ -2,11 +2,12 @@ package fichier
import (
"context"
"errors"
"fmt"
"io"
"net/http"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/rest"
@@ -122,7 +123,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// Delete duplicate after successful upload
err = o.Remove(ctx)
if err != nil {
return errors.Wrap(err, "failed to remove old version")
return fmt.Errorf("failed to remove old version: %w", err)
}
// Replace guts of old object with new one

View File

@@ -19,6 +19,7 @@ type ListFilesRequest struct {
type DownloadRequest struct {
URL string `json:"url"`
Single int `json:"single"`
Pass string `json:"pass,omitempty"`
}
// RemoveFolderRequest is the request structure of the corresponding request
@@ -63,8 +64,9 @@ type MoveFileRequest struct {
// MoveFileResponse is the response structure of the corresponding request
type MoveFileResponse struct {
Status string `json:"status"`
URLs []string `json:"urls"`
Status string `json:"status"`
Message string `json:"message"`
URLs []string `json:"urls"`
}
// CopyFileRequest is the request structure of the corresponding request
@@ -76,9 +78,10 @@ type CopyFileRequest struct {
// CopyFileResponse is the response structure of the corresponding request
type CopyFileResponse struct {
Status string `json:"status"`
Copied int `json:"copied"`
URLs []FileCopy `json:"urls"`
Status string `json:"status"`
Message string `json:"message"`
Copied int `json:"copied"`
URLs []FileCopy `json:"urls"`
}
// FileCopy is used in the the CopyFileResponse
@@ -87,6 +90,30 @@ type FileCopy struct {
ToURL string `json:"to_url"`
}
// RenameFileURL is the data structure to rename a single file
type RenameFileURL struct {
URL string `json:"url"`
Filename string `json:"filename"`
}
// RenameFileRequest is the request structure of the corresponding request
type RenameFileRequest struct {
URLs []RenameFileURL `json:"urls"`
Pretty int `json:"pretty"`
}
// RenameFileResponse is the response structure of the corresponding request
type RenameFileResponse struct {
Status string `json:"status"`
Message string `json:"message"`
Renamed int `json:"renamed"`
URLs []struct {
URL string `json:"url"`
OldFilename string `json:"old_filename"`
NewFilename string `json:"new_filename"`
} `json:"urls"`
}
// GetUploadNodeResponse is the response structure of the corresponding request
type GetUploadNodeResponse struct {
ID string `json:"id"`
@@ -155,3 +182,34 @@ type FoldersList struct {
Status string `json:"Status"`
SubFolders []Folder `json:"sub_folders"`
}
// AccountInfo is the structure how 1Fichier returns user info
type AccountInfo struct {
StatsDate string `json:"stats_date"`
MailRM string `json:"mail_rm"`
DefaultQuota int64 `json:"default_quota"`
UploadForbidden string `json:"upload_forbidden"`
PageLimit int `json:"page_limit"`
ColdStorage int64 `json:"cold_storage"`
Status string `json:"status"`
UseCDN string `json:"use_cdn"`
AvailableColdStorage int64 `json:"available_cold_storage"`
DefaultPort string `json:"default_port"`
DefaultDomain int `json:"default_domain"`
Email string `json:"email"`
DownloadMenu string `json:"download_menu"`
FTPDID int `json:"ftp_did"`
DefaultPortFiles string `json:"default_port_files"`
FTPReport string `json:"ftp_report"`
OverQuota int64 `json:"overquota"`
AvailableStorage int64 `json:"available_storage"`
CDN string `json:"cdn"`
Offer string `json:"offer"`
SubscriptionEnd string `json:"subscription_end"`
TFA string `json:"2fa"`
AllowedColdStorage int64 `json:"allowed_cold_storage"`
HotStorage int64 `json:"hot_storage"`
DefaultColdStorageQuota int64 `json:"default_cold_storage_quota"`
FTPMode string `json:"ftp_mode"`
RUReport string `json:"ru_report"`
}

View File

@@ -5,6 +5,7 @@ package api
import (
"bytes"
"encoding/json"
"fmt"
"reflect"
"strings"
@@ -51,11 +52,46 @@ func (t Time) String() string {
return time.Time(t).UTC().Format(timeFormatParameters)
}
// Int represents an integer which can be represented in JSON as a
// quoted integer or an integer.
type Int int
// MarshalJSON turns a Int into JSON
func (i *Int) MarshalJSON() (out []byte, err error) {
return json.Marshal((*int)(i))
}
// UnmarshalJSON turns JSON into a Int
func (i *Int) UnmarshalJSON(data []byte) error {
if len(data) >= 2 && data[0] == '"' && data[len(data)-1] == '"' {
data = data[1 : len(data)-1]
}
return json.Unmarshal(data, (*int)(i))
}
// String represents an string which can be represented in JSON as a
// quoted string or an integer.
type String string
// MarshalJSON turns a String into JSON
func (s *String) MarshalJSON() (out []byte, err error) {
return json.Marshal((*string)(s))
}
// UnmarshalJSON turns JSON into a String
func (s *String) UnmarshalJSON(data []byte) error {
err := json.Unmarshal(data, (*string)(s))
if err != nil {
*s = String(data)
}
return nil
}
// Status return returned in all status responses
type Status struct {
Code string `json:"status"`
Message string `json:"statusmessage"`
TaskID string `json:"taskid"`
TaskID String `json:"taskid"`
// Warning string `json:"warning"` // obsolete
}
@@ -115,7 +151,7 @@ type GetFolderContentsResponse struct {
Total int `json:"total,string"`
Items []Item `json:"filelist"`
Folder Item `json:"folder"`
From int `json:"from,string"`
From Int `json:"from"`
//Count int `json:"count"`
Pid string `json:"pid"`
RefreshResult Status `json:"refreshresult"`

View File

@@ -17,6 +17,7 @@ import (
"bytes"
"context"
"encoding/base64"
"errors"
"fmt"
"io"
"io/ioutil"
@@ -32,7 +33,6 @@ import (
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/random"
"github.com/pkg/errors"
"github.com/rclone/rclone/backend/filefabric/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
@@ -65,7 +65,7 @@ func init() {
NewFs: NewFs,
Options: []fs.Option{{
Name: "url",
Help: "URL of the Enterprise File Fabric to connect to",
Help: "URL of the Enterprise File Fabric to connect to.",
Required: true,
Examples: []fs.OptionExample{{
Value: "https://storagemadeeasy.com",
@@ -79,14 +79,15 @@ func init() {
}},
}, {
Name: "root_folder_id",
Help: `ID of the root folder
Help: `ID of the root folder.
Leave blank normally.
Fill in to make rclone start with directory of a given ID.
`,
}, {
Name: "permanent_token",
Help: `Permanent Authentication Token
Help: `Permanent Authentication Token.
A Permanent Authentication Token can be created in the Enterprise File
Fabric, on the users Dashboard under Security, there is an entry
@@ -99,7 +100,7 @@ For more info see: https://docs.storagemadeeasy.com/organisationcloud/api-tokens
`,
}, {
Name: "token",
Help: `Session Token
Help: `Session Token.
This is a session token which rclone caches in the config file. It is
usually valid for 1 hour.
@@ -109,14 +110,14 @@ Don't set this value - rclone will set it automatically.
Advanced: true,
}, {
Name: "token_expiry",
Help: `Token expiry time
Help: `Token expiry time.
Don't set this value - rclone will set it automatically.
`,
Advanced: true,
}, {
Name: "version",
Help: `Version read from the file fabric
Help: `Version read from the file fabric.
Don't set this value - rclone will set it automatically.
`,
@@ -222,13 +223,14 @@ var retryStatusCodes = []struct {
// delete in that folder. Please try again later or use
// another name. (error_background)
code: "error_background",
sleep: 6 * time.Second,
sleep: 1 * time.Second,
},
}
// shouldRetry returns a boolean as to whether this resp and err
// deserve to be retried. It returns the err as a convenience
func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error, status api.OKError) (bool, error) {
// try should be the number of the tries so far, counting up from 1
func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error, status api.OKError, try int) (bool, error) {
if fserrors.ContextError(ctx, &err) {
return false, err
}
@@ -244,9 +246,10 @@ func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error, st
for _, retryCode := range retryStatusCodes {
if code == retryCode.code {
if retryCode.sleep > 0 {
// make this thread only sleep extra time
fs.Debugf(f, "Sleeping for %v to wait for %q error to clear", retryCode.sleep, retryCode.code)
time.Sleep(retryCode.sleep)
// make this thread only sleep exponentially increasing extra time
sleepTime := retryCode.sleep << (try - 1)
fs.Debugf(f, "Sleeping for %v to wait for %q error to clear", sleepTime, retryCode.code)
time.Sleep(sleepTime)
}
return true, err
}
@@ -264,7 +267,7 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, rootID string, path string
"pid": rootID,
}, &resp, nil)
if err != nil {
return nil, errors.Wrap(err, "failed to check path exists")
return nil, fmt.Errorf("failed to check path exists: %w", err)
}
if resp.Exists != "y" {
return nil, fs.ErrorObjectNotFound
@@ -305,7 +308,7 @@ func (f *Fs) getApplianceInfo(ctx context.Context) error {
"token": "*",
}, &applianceInfo, nil)
if err != nil {
return errors.Wrap(err, "failed to read appliance version")
return fmt.Errorf("failed to read appliance version: %w", err)
}
f.opt.Version = applianceInfo.SoftwareVersionLabel
f.m.Set("version", f.opt.Version)
@@ -346,7 +349,7 @@ func (f *Fs) getToken(ctx context.Context) (token string, err error) {
"authtoken": f.opt.PermanentToken,
}, &info, nil)
if err != nil {
return "", errors.Wrap(err, "failed to get session token")
return "", fmt.Errorf("failed to get session token: %w", err)
}
refreshed = true
now = now.Add(tokenLifeTime)
@@ -400,11 +403,13 @@ func (f *Fs) rpc(ctx context.Context, function string, p params, result api.OKEr
ContentType: "application/x-www-form-urlencoded",
Options: options,
}
try := 0
err = f.pacer.Call(func() (bool, error) {
try++
// Refresh the body each retry
opts.Body = strings.NewReader(data.Encode())
resp, err = f.srv.CallJSON(ctx, &opts, nil, result)
return f.shouldRetry(ctx, resp, err, result)
return f.shouldRetry(ctx, resp, err, result, try)
})
if err != nil {
return resp, err
@@ -557,7 +562,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
"fi_name": f.opt.Enc.FromStandardName(leaf),
}, &info, nil)
if err != nil {
return "", errors.Wrap(err, "failed to create directory")
return "", fmt.Errorf("failed to create directory: %w", err)
}
// fmt.Printf("...Id %q\n", *info.Id)
return info.Item.ID, nil
@@ -590,7 +595,7 @@ OUTER:
var info api.GetFolderContentsResponse
_, err = f.rpc(ctx, "getFolderContents", p, &info, nil)
if err != nil {
return false, errors.Wrap(err, "failed to list directory")
return false, fmt.Errorf("failed to list directory: %w", err)
}
for i := range info.Items {
item := &info.Items[i]
@@ -721,7 +726,7 @@ func (f *Fs) deleteObject(ctx context.Context, id string) (err error) {
"completedeletion": "n",
}, &info, nil)
if err != nil {
return errors.Wrap(err, "failed to delete file")
return fmt.Errorf("failed to delete file: %w", err)
}
return nil
}
@@ -758,7 +763,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
}, &info, nil)
f.dirCache.FlushDir(dir)
if err != nil {
return errors.Wrap(err, "failed to remove directory")
return fmt.Errorf("failed to remove directory: %w", err)
}
return nil
}
@@ -820,7 +825,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
}
_, err = f.rpc(ctx, "doCopyFile", p, &info, nil)
if err != nil {
return nil, errors.Wrap(err, "failed to copy file")
return nil, fmt.Errorf("failed to copy file: %w", err)
}
err = dstObj.setMetaData(&info.Item)
if err != nil {
@@ -839,7 +844,7 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
}
// Wait for the the background task to complete if necessary
func (f *Fs) waitForBackgroundTask(ctx context.Context, taskID string) (err error) {
func (f *Fs) waitForBackgroundTask(ctx context.Context, taskID api.String) (err error) {
if taskID == "" || taskID == "0" {
// No task to wait for
return nil
@@ -852,7 +857,7 @@ func (f *Fs) waitForBackgroundTask(ctx context.Context, taskID string) (err erro
"taskid": taskID,
}, &info, nil)
if err != nil {
return errors.Wrapf(err, "failed to wait for task %s to complete", taskID)
return fmt.Errorf("failed to wait for task %s to complete: %w", taskID, err)
}
if len(info.Tasks) == 0 {
// task has finished
@@ -885,7 +890,7 @@ func (f *Fs) renameLeaf(ctx context.Context, isDir bool, id string, newLeaf stri
"fi_name": newLeaf,
}, &info, nil)
if err != nil {
return nil, errors.Wrap(err, "failed to rename leaf")
return nil, fmt.Errorf("failed to rename leaf: %w", err)
}
err = f.waitForBackgroundTask(ctx, info.Status.TaskID)
if err != nil {
@@ -929,7 +934,7 @@ func (f *Fs) move(ctx context.Context, isDir bool, id, oldLeaf, newLeaf, oldDire
"dir_id": newDirectoryID,
}, &info, nil)
if err != nil {
return nil, errors.Wrap(err, "failed to move file to new directory")
return nil, fmt.Errorf("failed to move file to new directory: %w", err)
}
item = &info.Item
err = f.waitForBackgroundTask(ctx, info.Status.TaskID)
@@ -1032,7 +1037,7 @@ func (f *Fs) CleanUp(ctx context.Context) (err error) {
var info api.EmptyResponse
_, err = f.rpc(ctx, "emptyTrashInBackground", params{}, &info, nil)
if err != nil {
return errors.Wrap(err, "failed to empty trash")
return fmt.Errorf("failed to empty trash: %w", err)
}
return nil
}
@@ -1089,7 +1094,7 @@ func (o *Object) Size() int64 {
// setMetaData sets the metadata from info
func (o *Object) setMetaData(info *api.Item) (err error) {
if info.Type != api.ItemTypeFile {
return errors.Wrapf(fs.ErrorNotAFile, "%q is %q", o.remote, info.Type)
return fs.ErrorIsDir
}
o.hasMetaData = true
o.size = info.Size
@@ -1159,7 +1164,7 @@ func (o *Object) modifyFile(ctx context.Context, keyValues [][2]string) error {
"data": data.String(),
}, &info, nil)
if err != nil {
return errors.Wrap(err, "failed to update metadata")
return fmt.Errorf("failed to update metadata: %w", err)
}
return o.setMetaData(&info.Item)
}
@@ -1242,7 +1247,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
}
_, err = o.fs.rpc(ctx, "doInitUpload", p, &upload, nil)
if err != nil {
return errors.Wrap(err, "failed to initialize upload")
return fmt.Errorf("failed to initialize upload: %w", err)
}
// Cancel the upload if aborted or it fails
@@ -1278,18 +1283,20 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
var contentLength = size
opts.ContentLength = &contentLength // NB CallJSON scribbles on this which is naughty
}
try := 0
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
try++
resp, err := o.fs.srv.CallJSON(ctx, &opts, nil, &uploader)
return o.fs.shouldRetry(ctx, resp, err, nil)
return o.fs.shouldRetry(ctx, resp, err, nil, try)
})
if err != nil {
return errors.Wrap(err, "failed to upload")
return fmt.Errorf("failed to upload: %w", err)
}
if uploader.Success != "y" {
return errors.Errorf("upload failed")
return fmt.Errorf("upload failed")
}
if size > 0 && uploader.FileSize != size {
return errors.Errorf("upload failed: size mismatch: want %d got %d", size, uploader.FileSize)
return fmt.Errorf("upload failed: size mismatch: want %d got %d", size, uploader.FileSize)
}
// Now finalize the file
@@ -1301,7 +1308,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
}
_, err = o.fs.rpc(ctx, "doCompleteUpload", p, &finalize, nil)
if err != nil {
return errors.Wrap(err, "failed to finalize upload")
return fmt.Errorf("failed to finalize upload: %w", err)
}
finalized = true

View File

@@ -4,6 +4,8 @@ package ftp
import (
"context"
"crypto/tls"
"errors"
"fmt"
"io"
"net"
"net/textproto"
@@ -13,8 +15,7 @@ import (
"sync"
"time"
"github.com/jlaffaye/ftp"
"github.com/pkg/errors"
"github.com/rclone/ftp"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/config"
@@ -48,26 +49,24 @@ func init() {
NewFs: NewFs,
Options: []fs.Option{{
Name: "host",
Help: "FTP host to connect to",
Help: "FTP host to connect to.\n\nE.g. \"ftp.example.com\".",
Required: true,
Examples: []fs.OptionExample{{
Value: "ftp.example.com",
Help: "Connect to ftp.example.com",
}},
}, {
Name: "user",
Help: "FTP username, leave blank for current username, " + currentUser,
Name: "user",
Help: "FTP username.",
Default: currentUser,
}, {
Name: "port",
Help: "FTP port, leave blank to use default (21)",
Name: "port",
Help: "FTP port number.",
Default: 21,
}, {
Name: "pass",
Help: "FTP password",
Help: "FTP password.",
IsPassword: true,
Required: true,
}, {
Name: "tls",
Help: `Use Implicit FTPS (FTP over TLS)
Help: `Use Implicit FTPS (FTP over TLS).
When using implicit FTP over TLS the client connects using TLS
right from the start which breaks compatibility with
non-TLS-aware servers. This is usually served over port 990 rather
@@ -75,35 +74,41 @@ than port 21. Cannot be used in combination with explicit FTP.`,
Default: false,
}, {
Name: "explicit_tls",
Help: `Use Explicit FTPS (FTP over TLS)
Help: `Use Explicit FTPS (FTP over TLS).
When using explicit FTP over TLS the client explicitly requests
security from the server in order to upgrade a plain text connection
to an encrypted one. Cannot be used in combination with implicit FTP.`,
Default: false,
}, {
Name: "concurrency",
Help: "Maximum number of FTP simultaneous connections, 0 for unlimited",
Help: "Maximum number of FTP simultaneous connections, 0 for unlimited.",
Default: 0,
Advanced: true,
}, {
Name: "no_check_certificate",
Help: "Do not verify the TLS certificate of the server",
Help: "Do not verify the TLS certificate of the server.",
Default: false,
Advanced: true,
}, {
Name: "disable_epsv",
Help: "Disable using EPSV even if server advertises support",
Help: "Disable using EPSV even if server advertises support.",
Default: false,
Advanced: true,
}, {
Name: "disable_mlsd",
Help: "Disable using MLSD even if server advertises support",
Help: "Disable using MLSD even if server advertises support.",
Default: false,
Advanced: true,
}, {
Name: "writing_mdtm",
Help: "Use MDTM to set modification time (VsFtpd quirk)",
Default: false,
Advanced: true,
}, {
Name: "idle_timeout",
Default: fs.Duration(60 * time.Second),
Help: `Max time before closing idle connections
Help: `Max time before closing idle connections.
If no connections have been returned to the connection pool in the time
given, rclone will empty the connection pool.
@@ -116,17 +121,51 @@ Set to 0 to keep connections indefinitely.
Help: "Maximum time to wait for a response to close.",
Default: fs.Duration(60 * time.Second),
Advanced: true,
}, {
Name: "tls_cache_size",
Help: `Size of TLS session cache for all control and data connections.
TLS cache allows to resume TLS sessions and reuse PSK between connections.
Increase if default size is not enough resulting in TLS resumption errors.
Enabled by default. Use 0 to disable.`,
Default: 32,
Advanced: true,
}, {
Name: "disable_tls13",
Help: "Disable TLS 1.3 (workaround for FTP servers with buggy TLS)",
Default: false,
Advanced: true,
}, {
Name: "shut_timeout",
Help: "Maximum time to wait for data connection closing status.",
Default: fs.Duration(60 * time.Second),
Advanced: true,
}, {
Name: "ask_password",
Default: false,
Help: `Allow asking for FTP password when needed.
If this is set and no password is supplied then rclone will ask for a password
`,
Advanced: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
Advanced: true,
// The FTP protocol can't handle trailing spaces (for instance
// pureftpd turns them into _)
//
// proftpd can't handle '*' in file names
// pureftpd can't handle '[', ']' or '*'
// The FTP protocol can't handle trailing spaces
// (for instance, pureftpd turns them into '_')
Default: (encoder.Display |
encoder.EncodeRightSpace),
Examples: []fs.OptionExample{{
Value: "Asterisk,Ctl,Dot,Slash",
Help: "ProFTPd can't handle '*' in file names",
}, {
Value: "BackSlash,Ctl,Del,Dot,RightSpace,Slash,SquareBracket",
Help: "PureFTPd can't handle '[]' or '*' in file names",
}, {
Value: "Ctl,LeftPeriod,Slash",
Help: "VsFTPd can't handle file names starting with dot",
}},
}},
})
}
@@ -139,12 +178,17 @@ type Options struct {
Port string `config:"port"`
TLS bool `config:"tls"`
ExplicitTLS bool `config:"explicit_tls"`
TLSCacheSize int `config:"tls_cache_size"`
DisableTLS13 bool `config:"disable_tls13"`
Concurrency int `config:"concurrency"`
SkipVerifyTLSCert bool `config:"no_check_certificate"`
DisableEPSV bool `config:"disable_epsv"`
DisableMLSD bool `config:"disable_mlsd"`
WritingMDTM bool `config:"writing_mdtm"`
IdleTimeout fs.Duration `config:"idle_timeout"`
CloseTimeout fs.Duration `config:"close_timeout"`
ShutTimeout fs.Duration `config:"shut_timeout"`
AskPassword bool `config:"ask_password"`
Enc encoder.MultiEncoder `config:"encoding"`
}
@@ -165,6 +209,9 @@ type Fs struct {
tokens *pacer.TokenDispenser
tlsConf *tls.Config
pacer *fs.Pacer // pacer for FTP connections
fGetTime bool // true if the ftp library accepts GetTime
fSetTime bool // true if the ftp library accepts SetTime
fLstTime bool // true if the List call returns precise time
}
// Object describes an FTP file
@@ -179,6 +226,7 @@ type FileInfo struct {
Name string
Size uint64
ModTime time.Time
precise bool // true if the time is precise
IsDir bool
}
@@ -290,6 +338,12 @@ func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
if f.opt.DisableMLSD {
ftpConfig = append(ftpConfig, ftp.DialWithDisabledMLSD(true))
}
if f.opt.ShutTimeout != 0 && f.opt.ShutTimeout != fs.DurationOff {
ftpConfig = append(ftpConfig, ftp.DialWithShutTimeout(time.Duration(f.opt.ShutTimeout)))
}
if f.opt.WritingMDTM {
ftpConfig = append(ftpConfig, ftp.DialWithWritingMDTM(true))
}
if f.ci.Dump&(fs.DumpHeaders|fs.DumpBodies|fs.DumpRequests|fs.DumpResponses) != 0 {
ftpConfig = append(ftpConfig, ftp.DialWithDebugOutput(&debugLog{auth: f.ci.Dump&fs.DumpAuth != 0}))
}
@@ -306,7 +360,7 @@ func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
return false, nil
})
if err != nil {
err = errors.Wrapf(err, "failed to make FTP connection to %q", f.dialAddr)
err = fmt.Errorf("failed to make FTP connection to %q: %w", f.dialAddr, err)
}
return c, err
}
@@ -353,8 +407,8 @@ func (f *Fs) putFtpConnection(pc **ftp.ServerConn, err error) {
*pc = nil
if err != nil {
// If not a regular FTP error code then check the connection
_, isRegularError := errors.Cause(err).(*textproto.Error)
if !isRegularError {
var tpErr *textproto.Error
if !errors.As(err, &tpErr) {
nopErr := c.NoOp()
if nopErr != nil {
fs.Debugf(f, "Connection failed, closing: %v", nopErr)
@@ -400,9 +454,14 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
if err != nil {
return nil, err
}
pass, err := obscure.Reveal(opt.Pass)
if err != nil {
return nil, errors.Wrap(err, "NewFS decrypt password")
pass := ""
if opt.AskPassword && opt.Pass == "" {
pass = config.GetPassword("FTP server password")
} else {
pass, err = obscure.Reveal(opt.Pass)
if err != nil {
return nil, fmt.Errorf("NewFS decrypt password: %w", err)
}
}
user := opt.User
if user == "" {
@@ -427,6 +486,12 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
ServerName: opt.Host,
InsecureSkipVerify: opt.SkipVerifyTLSCert,
}
if opt.TLSCacheSize > 0 {
tlsConfig.ClientSessionCache = tls.NewLRUClientSessionCache(opt.TLSCacheSize)
}
if opt.DisableTLS13 {
tlsConfig.MaxVersion = tls.VersionTLS12
}
}
u := protocol + path.Join(dialAddr+"/", root)
ci := fs.GetConfig(ctx)
@@ -453,7 +518,13 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
// Make a connection and pool it to return errors early
c, err := f.getFtpConnection(ctx)
if err != nil {
return nil, errors.Wrap(err, "NewFs")
return nil, fmt.Errorf("NewFs: %w", err)
}
f.fGetTime = c.IsGetTimeSupported()
f.fSetTime = c.IsSetTimeSupported()
f.fLstTime = c.IsTimePreciseInList()
if !f.fLstTime && f.fGetTime {
f.features.SlowModTime = true
}
f.putFtpConnection(&c, nil)
if root != "" {
@@ -465,7 +536,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
}
_, err := f.NewObject(ctx, remote)
if err != nil {
if err == fs.ErrorObjectNotFound || errors.Cause(err) == fs.ErrorNotAFile {
if err == fs.ErrorObjectNotFound || errors.Is(err, fs.ErrorNotAFile) {
// File doesn't exist so return old f
f.root = root
return f, nil
@@ -544,7 +615,7 @@ func (f *Fs) findItem(ctx context.Context, remote string) (entry *ftp.Entry, err
c, err := f.getFtpConnection(ctx)
if err != nil {
return nil, errors.Wrap(err, "findItem")
return nil, fmt.Errorf("findItem: %w", err)
}
files, err := c.List(f.dirFromStandardPath(dir))
f.putFtpConnection(&c, err)
@@ -573,13 +644,12 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err err
fs: f,
remote: remote,
}
info := &FileInfo{
o.info = &FileInfo{
Name: remote,
Size: entry.Size,
ModTime: entry.Time,
precise: f.fLstTime,
}
o.info = info
return o, nil
}
return nil, fs.ErrorObjectNotFound
@@ -589,7 +659,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err err
func (f *Fs) dirExists(ctx context.Context, remote string) (exists bool, err error) {
entry, err := f.findItem(ctx, remote)
if err != nil {
return false, errors.Wrap(err, "dirExists")
return false, fmt.Errorf("dirExists: %w", err)
}
if entry != nil && entry.Type == ftp.EntryTypeFolder {
return true, nil
@@ -610,7 +680,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// defer log.Trace(dir, "dir=%q", dir)("entries=%v, err=%v", &entries, &err)
c, err := f.getFtpConnection(ctx)
if err != nil {
return nil, errors.Wrap(err, "list")
return nil, fmt.Errorf("list: %w", err)
}
var listErr error
@@ -648,7 +718,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
if len(files) == 0 {
exists, err := f.dirExists(ctx, dir)
if err != nil {
return nil, errors.Wrap(err, "list")
return nil, fmt.Errorf("list: %w", err)
}
if !exists {
return nil, fs.ErrorDirNotFound
@@ -674,6 +744,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
Name: newremote,
Size: object.Size,
ModTime: object.Time,
precise: f.fLstTime,
}
o.info = info
entries = append(entries, o)
@@ -687,8 +758,18 @@ func (f *Fs) Hashes() hash.Set {
return 0
}
// Precision shows Modified Time not supported
// Precision shows whether modified time is supported or not depending on the
// FTP server capabilities, namely whether FTP server:
// - accepts the MDTM command to get file time (fGetTime)
// or supports MLSD returning precise file time in the list (fLstTime)
// - accepts the MFMT command to set file time (fSetTime)
// or non-standard form of the MDTM command (fSetTime, too)
// used by VsFtpd for the same purpose (WritingMDTM)
// See "mdtm_write" in https://security.appspot.com/vsftpd/vsftpd_conf.html
func (f *Fs) Precision() time.Duration {
if (f.fGetTime || f.fLstTime) && f.fSetTime {
return time.Second
}
return fs.ModTimeNotSupported
}
@@ -701,7 +782,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
// fs.Debugf(f, "Trying to put file %s", src.Remote())
err := f.mkParentDir(ctx, src.Remote())
if err != nil {
return nil, errors.Wrap(err, "Put mkParentDir failed")
return nil, fmt.Errorf("Put mkParentDir failed: %w", err)
}
o := &Object{
fs: f,
@@ -724,7 +805,7 @@ func (f *Fs) getInfo(ctx context.Context, remote string) (fi *FileInfo, err erro
c, err := f.getFtpConnection(ctx)
if err != nil {
return nil, errors.Wrap(err, "getInfo")
return nil, fmt.Errorf("getInfo: %w", err)
}
files, err := c.List(f.dirFromStandardPath(dir))
f.putFtpConnection(&c, err)
@@ -740,6 +821,7 @@ func (f *Fs) getInfo(ctx context.Context, remote string) (fi *FileInfo, err erro
Name: remote,
Size: file.Size,
ModTime: file.Time,
precise: f.fLstTime,
IsDir: file.Type == ftp.EntryTypeFolder,
}
return info, nil
@@ -761,7 +843,7 @@ func (f *Fs) mkdir(ctx context.Context, abspath string) error {
}
return fs.ErrorIsFile
} else if err != fs.ErrorObjectNotFound {
return errors.Wrapf(err, "mkdir %q failed", abspath)
return fmt.Errorf("mkdir %q failed: %w", abspath, err)
}
parent := path.Dir(abspath)
err = f.mkdir(ctx, parent)
@@ -770,7 +852,7 @@ func (f *Fs) mkdir(ctx context.Context, abspath string) error {
}
c, connErr := f.getFtpConnection(ctx)
if connErr != nil {
return errors.Wrap(connErr, "mkdir")
return fmt.Errorf("mkdir: %w", connErr)
}
err = c.MakeDir(f.dirFromStandardPath(abspath))
f.putFtpConnection(&c, err)
@@ -806,7 +888,7 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
c, err := f.getFtpConnection(ctx)
if err != nil {
return errors.Wrap(translateErrorFile(err), "Rmdir")
return fmt.Errorf("Rmdir: %w", translateErrorFile(err))
}
err = c.RemoveDir(f.dirFromStandardPath(path.Join(f.root, dir)))
f.putFtpConnection(&c, err)
@@ -822,11 +904,11 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
}
err := f.mkParentDir(ctx, remote)
if err != nil {
return nil, errors.Wrap(err, "Move mkParentDir failed")
return nil, fmt.Errorf("Move mkParentDir failed: %w", err)
}
c, err := f.getFtpConnection(ctx)
if err != nil {
return nil, errors.Wrap(err, "Move")
return nil, fmt.Errorf("Move: %w", err)
}
err = c.Rename(
f.opt.Enc.FromStandardPath(path.Join(srcObj.fs.root, srcObj.remote)),
@@ -834,11 +916,11 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
)
f.putFtpConnection(&c, err)
if err != nil {
return nil, errors.Wrap(err, "Move Rename failed")
return nil, fmt.Errorf("Move Rename failed: %w", err)
}
dstObj, err := f.NewObject(ctx, remote)
if err != nil {
return nil, errors.Wrap(err, "Move NewObject failed")
return nil, fmt.Errorf("Move NewObject failed: %w", err)
}
return dstObj, nil
}
@@ -868,19 +950,19 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
}
return fs.ErrorIsFile
} else if err != fs.ErrorObjectNotFound {
return errors.Wrapf(err, "DirMove getInfo failed")
return fmt.Errorf("DirMove getInfo failed: %w", err)
}
// Make sure the parent directory exists
err = f.mkdir(ctx, path.Dir(dstPath))
if err != nil {
return errors.Wrap(err, "DirMove mkParentDir dst failed")
return fmt.Errorf("DirMove mkParentDir dst failed: %w", err)
}
// Do the move
c, err := f.getFtpConnection(ctx)
if err != nil {
return errors.Wrap(err, "DirMove")
return fmt.Errorf("DirMove: %w", err)
}
err = c.Rename(
f.dirFromStandardPath(srcPath),
@@ -888,7 +970,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
)
f.putFtpConnection(&c, err)
if err != nil {
return errors.Wrapf(err, "DirMove Rename(%q,%q) failed", srcPath, dstPath)
return fmt.Errorf("DirMove Rename(%q,%q) failed: %w", srcPath, dstPath, err)
}
return nil
}
@@ -925,12 +1007,41 @@ func (o *Object) Size() int64 {
// ModTime returns the modification time of the object
func (o *Object) ModTime(ctx context.Context) time.Time {
if !o.info.precise && o.fs.fGetTime {
c, err := o.fs.getFtpConnection(ctx)
if err == nil {
path := path.Join(o.fs.root, o.remote)
path = o.fs.opt.Enc.FromStandardPath(path)
modTime, err := c.GetTime(path)
if err == nil && o.info != nil {
o.info.ModTime = modTime
o.info.precise = true
}
o.fs.putFtpConnection(&c, err)
}
}
return o.info.ModTime
}
// SetModTime sets the modification time of the object
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
return nil
if !o.fs.fSetTime {
fs.Errorf(o.fs, "SetModTime is not supported")
return nil
}
c, err := o.fs.getFtpConnection(ctx)
if err != nil {
return err
}
path := path.Join(o.fs.root, o.remote)
path = o.fs.opt.Enc.FromStandardPath(path)
err = c.SetTime(path, modTime.In(time.UTC))
if err == nil && o.info != nil {
o.info.ModTime = modTime
o.info.precise = true
}
o.fs.putFtpConnection(&c, err)
return err
}
// Storable returns a boolean as to whether this object is storable
@@ -963,7 +1074,11 @@ func (f *ftpReadCloser) Close() error {
errchan <- f.rc.Close()
}()
// Wait for Close for up to 60 seconds by default
timer := time.NewTimer(time.Duration(f.f.opt.CloseTimeout))
closeTimeout := f.f.opt.CloseTimeout
if closeTimeout == 0 {
closeTimeout = fs.DurationOff
}
timer := time.NewTimer(time.Duration(closeTimeout))
select {
case err = <-errchan:
timer.Stop()
@@ -1012,12 +1127,12 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.Read
}
c, err := o.fs.getFtpConnection(ctx)
if err != nil {
return nil, errors.Wrap(err, "open")
return nil, fmt.Errorf("open: %w", err)
}
fd, err := c.RetrFrom(o.fs.opt.Enc.FromStandardPath(path), uint64(offset))
if err != nil {
o.fs.putFtpConnection(&c, err)
return nil, errors.Wrap(err, "open")
return nil, fmt.Errorf("open: %w", err)
}
rc = &ftpReadCloser{rc: readers.NewLimitedReadCloser(fd, limit), c: c, f: o.fs}
return rc, nil
@@ -1047,19 +1162,33 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
}
c, err := o.fs.getFtpConnection(ctx)
if err != nil {
return errors.Wrap(err, "Update")
return fmt.Errorf("Update: %w", err)
}
err = c.Stor(o.fs.opt.Enc.FromStandardPath(path), in)
// Ignore error 250 here - send by some servers
if err != nil {
switch errX := err.(type) {
case *textproto.Error:
switch errX.Code {
case ftp.StatusRequestedFileActionOK:
err = nil
}
}
}
if err != nil {
_ = c.Quit() // toss this connection to avoid sync errors
remove()
// recycle connection in advance to let remove() find free token
o.fs.putFtpConnection(nil, err)
return errors.Wrap(err, "update stor")
remove()
return fmt.Errorf("update stor: %w", err)
}
o.fs.putFtpConnection(&c, nil)
if err = o.SetModTime(ctx, src.ModTime(ctx)); err != nil {
return fmt.Errorf("SetModTime: %w", err)
}
o.info, err = o.fs.getInfo(ctx, path)
if err != nil {
return errors.Wrap(err, "update getinfo")
return fmt.Errorf("update getinfo: %w", err)
}
return nil
}
@@ -1078,7 +1207,7 @@ func (o *Object) Remove(ctx context.Context) (err error) {
} else {
c, err := o.fs.getFtpConnection(ctx)
if err != nil {
return errors.Wrap(err, "Remove")
return fmt.Errorf("Remove: %w", err)
}
err = c.Delete(o.fs.opt.Enc.FromStandardPath(path))
o.fs.putFtpConnection(&c, err)

View File

@@ -0,0 +1,115 @@
package ftp
import (
"context"
"fmt"
"strings"
"testing"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
"github.com/rclone/rclone/lib/readers"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
type settings map[string]interface{}
func deriveFs(ctx context.Context, t *testing.T, f fs.Fs, opts settings) fs.Fs {
fsName := strings.Split(f.Name(), "{")[0] // strip off hash
configMap := configmap.Simple{}
for key, val := range opts {
configMap[key] = fmt.Sprintf("%v", val)
}
remote := fmt.Sprintf("%s,%s:%s", fsName, configMap.String(), f.Root())
fixFs, err := fs.NewFs(ctx, remote)
require.NoError(t, err)
return fixFs
}
// test that big file uploads do not cause network i/o timeout
func (f *Fs) testUploadTimeout(t *testing.T) {
const (
fileSize = 100000000 // 100 MiB
idleTimeout = 40 * time.Millisecond // small because test server is local
maxTime = 10 * time.Second // prevent test hangup
)
if testing.Short() {
t.Skip("not running with -short")
}
ctx := context.Background()
ci := fs.GetConfig(ctx)
saveLowLevelRetries := ci.LowLevelRetries
saveTimeout := ci.Timeout
defer func() {
ci.LowLevelRetries = saveLowLevelRetries
ci.Timeout = saveTimeout
}()
ci.LowLevelRetries = 1
ci.Timeout = idleTimeout
upload := func(concurrency int, shutTimeout time.Duration) (obj fs.Object, err error) {
fixFs := deriveFs(ctx, t, f, settings{
"concurrency": concurrency,
"shut_timeout": shutTimeout,
})
// Make test object
fileTime := fstest.Time("2020-03-08T09:30:00.000000000Z")
meta := object.NewStaticObjectInfo("upload-timeout.test", fileTime, int64(fileSize), true, nil, nil)
data := readers.NewPatternReader(int64(fileSize))
// Run upload and ensure maximum time
done := make(chan bool)
deadline := time.After(maxTime)
go func() {
obj, err = fixFs.Put(ctx, data, meta)
done <- true
}()
select {
case <-done:
case <-deadline:
t.Fatalf("Upload got stuck for %v !", maxTime)
}
return obj, err
}
// non-zero shut_timeout should fix i/o errors
obj, err := upload(f.opt.Concurrency, time.Second)
assert.NoError(t, err)
assert.NotNil(t, obj)
if obj != nil {
_ = obj.Remove(ctx)
}
}
// rclone must support precise time with ProFtpd and PureFtpd out of the box.
// The VsFtpd server does not support the MFMT command to set file time like
// other servers but by default supports the MDTM command in the non-standard
// two-argument form for the same purpose.
// See "mdtm_write" in https://security.appspot.com/vsftpd/vsftpd_conf.html
func (f *Fs) testTimePrecision(t *testing.T) {
name := f.Name()
if pos := strings.Index(name, "{"); pos != -1 {
name = name[:pos]
}
switch name {
case "TestFTPProftpd", "TestFTPPureftpd", "TestFTPVsftpd":
assert.LessOrEqual(t, f.Precision(), time.Second)
}
}
// InternalTest dispatches all internal tests
func (f *Fs) InternalTest(t *testing.T) {
t.Run("UploadTimeout", f.testUploadTimeout)
t.Run("TimePrecision", f.testTimePrecision)
}
var _ fstests.InternalTester = (*Fs)(nil)

View File

@@ -9,25 +9,27 @@ import (
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
// TestIntegration runs integration tests against rclone FTP server
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestFTPProftpd:",
NilObject: (*ftp.Object)(nil),
})
}
func TestIntegration2(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("skipping as -remote is set")
}
fstests.Run(t, &fstests.Opt{
RemoteName: "TestFTPRclone:",
NilObject: (*ftp.Object)(nil),
})
}
func TestIntegration3(t *testing.T) {
// TestIntegrationProftpd runs integration tests against proFTPd
func TestIntegrationProftpd(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("skipping as -remote is set")
}
fstests.Run(t, &fstests.Opt{
RemoteName: "TestFTPProftpd:",
NilObject: (*ftp.Object)(nil),
})
}
// TestIntegrationPureftpd runs integration tests against pureFTPd
func TestIntegrationPureftpd(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("skipping as -remote is set")
}
@@ -37,12 +39,13 @@ func TestIntegration3(t *testing.T) {
})
}
// func TestIntegration4(t *testing.T) {
// if *fstest.RemoteName != "" {
// t.Skip("skipping as -remote is set")
// }
// fstests.Run(t, &fstests.Opt{
// RemoteName: "TestFTPVsftpd:",
// NilObject: (*ftp.Object)(nil),
// })
// }
// TestIntegrationVsftpd runs integration tests against vsFTPd
func TestIntegrationVsftpd(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("skipping as -remote is set")
}
fstests.Run(t, &fstests.Opt{
RemoteName: "TestFTPVsftpd:",
NilObject: (*ftp.Object)(nil),
})
}

View File

@@ -16,16 +16,16 @@ import (
"context"
"encoding/base64"
"encoding/hex"
"errors"
"fmt"
"io"
"io/ioutil"
"log"
"net/http"
"path"
"strconv"
"strings"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
@@ -51,10 +51,10 @@ import (
const (
rcloneClientID = "202264815644.apps.googleusercontent.com"
rcloneEncryptedClientSecret = "Uj7C9jGfb9gmeaV70Lh058cNkWvepr-Es9sBm0zdgil7JaOWF1VySw"
timeFormatIn = time.RFC3339
timeFormatOut = "2006-01-02T15:04:05.000000000Z07:00"
metaMtime = "mtime" // key to store mtime under in metadata
listChunks = 1000 // chunk size to read directory listings
timeFormat = time.RFC3339Nano
metaMtime = "mtime" // key to store mtime in metadata
metaMtimeGsutil = "goog-reserved-file-mtime" // key used by GSUtil to store mtime in metadata
listChunks = 1000 // chunk size to read directory listings
minSleep = 10 * time.Millisecond
)
@@ -65,7 +65,7 @@ var (
Endpoint: google.Endpoint,
ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.TitleBarRedirectURL,
RedirectURL: oauthutil.RedirectURL,
}
)
@@ -76,72 +76,71 @@ func init() {
Prefix: "gcs",
Description: "Google Cloud Storage (this is not Google Drive)",
NewFs: NewFs,
Config: func(ctx context.Context, name string, m configmap.Mapper) {
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
saFile, _ := m.Get("service_account_file")
saCreds, _ := m.Get("service_account_credentials")
anonymous, _ := m.Get("anonymous")
if saFile != "" || saCreds != "" || anonymous == "true" {
return
}
err := oauthutil.Config(ctx, "google cloud storage", name, m, storageConfig, nil)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
return nil, nil
}
return oauthutil.ConfigOut("", &oauthutil.Options{
OAuth2Config: storageConfig,
})
},
Options: append(oauthutil.SharedOptions, []fs.Option{{
Name: "project_number",
Help: "Project number.\nOptional - needed only for list/create/delete buckets - see your developer console.",
Help: "Project number.\n\nOptional - needed only for list/create/delete buckets - see your developer console.",
}, {
Name: "service_account_file",
Help: "Service Account Credentials JSON file path\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login." + env.ShellExpandHelp,
Help: "Service Account Credentials JSON file path.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login." + env.ShellExpandHelp,
}, {
Name: "service_account_credentials",
Help: "Service Account Credentials JSON blob\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
Help: "Service Account Credentials JSON blob.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
Hide: fs.OptionHideBoth,
}, {
Name: "anonymous",
Help: "Access public buckets and objects without credentials\nSet to 'true' if you just want to download files and don't configure credentials.",
Help: "Access public buckets and objects without credentials.\n\nSet to 'true' if you just want to download files and don't configure credentials.",
Default: false,
}, {
Name: "object_acl",
Help: "Access Control List for new objects.",
Examples: []fs.OptionExample{{
Value: "authenticatedRead",
Help: "Object owner gets OWNER access, and all Authenticated Users get READER access.",
Help: "Object owner gets OWNER access.\nAll Authenticated Users get READER access.",
}, {
Value: "bucketOwnerFullControl",
Help: "Object owner gets OWNER access, and project team owners get OWNER access.",
Help: "Object owner gets OWNER access.\nProject team owners get OWNER access.",
}, {
Value: "bucketOwnerRead",
Help: "Object owner gets OWNER access, and project team owners get READER access.",
Help: "Object owner gets OWNER access.\nProject team owners get READER access.",
}, {
Value: "private",
Help: "Object owner gets OWNER access [default if left blank].",
Help: "Object owner gets OWNER access.\nDefault if left blank.",
}, {
Value: "projectPrivate",
Help: "Object owner gets OWNER access, and project team members get access according to their roles.",
Help: "Object owner gets OWNER access.\nProject team members get access according to their roles.",
}, {
Value: "publicRead",
Help: "Object owner gets OWNER access, and all Users get READER access.",
Help: "Object owner gets OWNER access.\nAll Users get READER access.",
}},
}, {
Name: "bucket_acl",
Help: "Access Control List for new buckets.",
Examples: []fs.OptionExample{{
Value: "authenticatedRead",
Help: "Project team owners get OWNER access, and all Authenticated Users get READER access.",
Help: "Project team owners get OWNER access.\nAll Authenticated Users get READER access.",
}, {
Value: "private",
Help: "Project team owners get OWNER access [default if left blank].",
Help: "Project team owners get OWNER access.\nDefault if left blank.",
}, {
Value: "projectPrivate",
Help: "Project team members get access according to their roles.",
}, {
Value: "publicRead",
Help: "Project team owners get OWNER access, and all Users get READER access.",
Help: "Project team owners get OWNER access.\nAll Users get READER access.",
}, {
Value: "publicReadWrite",
Help: "Project team owners get OWNER access, and all Users get WRITER access.",
Help: "Project team owners get OWNER access.\nAll Users get WRITER access.",
}},
}, {
Name: "bucket_policy_only",
@@ -164,64 +163,112 @@ Docs: https://cloud.google.com/storage/docs/bucket-policy-only
Help: "Location for the newly created buckets.",
Examples: []fs.OptionExample{{
Value: "",
Help: "Empty for default location (US).",
Help: "Empty for default location (US)",
}, {
Value: "asia",
Help: "Multi-regional location for Asia.",
Help: "Multi-regional location for Asia",
}, {
Value: "eu",
Help: "Multi-regional location for Europe.",
Help: "Multi-regional location for Europe",
}, {
Value: "us",
Help: "Multi-regional location for United States.",
Help: "Multi-regional location for United States",
}, {
Value: "asia-east1",
Help: "Taiwan.",
Help: "Taiwan",
}, {
Value: "asia-east2",
Help: "Hong Kong.",
Help: "Hong Kong",
}, {
Value: "asia-northeast1",
Help: "Tokyo.",
Help: "Tokyo",
}, {
Value: "asia-northeast2",
Help: "Osaka",
}, {
Value: "asia-northeast3",
Help: "Seoul",
}, {
Value: "asia-south1",
Help: "Mumbai.",
Help: "Mumbai",
}, {
Value: "asia-south2",
Help: "Delhi",
}, {
Value: "asia-southeast1",
Help: "Singapore.",
Help: "Singapore",
}, {
Value: "asia-southeast2",
Help: "Jakarta",
}, {
Value: "australia-southeast1",
Help: "Sydney.",
Help: "Sydney",
}, {
Value: "australia-southeast2",
Help: "Melbourne",
}, {
Value: "europe-north1",
Help: "Finland.",
Help: "Finland",
}, {
Value: "europe-west1",
Help: "Belgium.",
Help: "Belgium",
}, {
Value: "europe-west2",
Help: "London.",
Help: "London",
}, {
Value: "europe-west3",
Help: "Frankfurt.",
Help: "Frankfurt",
}, {
Value: "europe-west4",
Help: "Netherlands.",
Help: "Netherlands",
}, {
Value: "europe-west6",
Help: "Zürich",
}, {
Value: "europe-central2",
Help: "Warsaw",
}, {
Value: "us-central1",
Help: "Iowa.",
Help: "Iowa",
}, {
Value: "us-east1",
Help: "South Carolina.",
Help: "South Carolina",
}, {
Value: "us-east4",
Help: "Northern Virginia.",
Help: "Northern Virginia",
}, {
Value: "us-west1",
Help: "Oregon.",
Help: "Oregon",
}, {
Value: "us-west2",
Help: "California.",
Help: "California",
}, {
Value: "us-west3",
Help: "Salt Lake City",
}, {
Value: "us-west4",
Help: "Las Vegas",
}, {
Value: "northamerica-northeast1",
Help: "Montréal",
}, {
Value: "northamerica-northeast2",
Help: "Toronto",
}, {
Value: "southamerica-east1",
Help: "São Paulo",
}, {
Value: "southamerica-west1",
Help: "Santiago",
}, {
Value: "asia1",
Help: "Dual region: asia-northeast1 and asia-northeast2.",
}, {
Value: "eur4",
Help: "Dual region: europe-north1 and europe-west4.",
}, {
Value: "nam4",
Help: "Dual region: us-central1 and us-east1.",
}},
}, {
Name: "storage_class",
@@ -376,7 +423,7 @@ func (o *Object) split() (bucket, bucketPath string) {
func getServiceAccountClient(ctx context.Context, credentialsData []byte) (*http.Client, error) {
conf, err := google.JWTConfigFromJSON(credentialsData, storageConfig.Scopes...)
if err != nil {
return nil, errors.Wrap(err, "error processing credentials")
return nil, fmt.Errorf("error processing credentials: %w", err)
}
ctxWithSpecialClient := oauthutil.Context(ctx, fshttp.NewClient(ctx))
return oauth2.NewClient(ctxWithSpecialClient, conf.TokenSource(ctxWithSpecialClient)), nil
@@ -409,7 +456,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
if opt.ServiceAccountCredentials == "" && opt.ServiceAccountFile != "" {
loadedCreds, err := ioutil.ReadFile(env.ShellExpand(opt.ServiceAccountFile))
if err != nil {
return nil, errors.Wrap(err, "error opening service account credentials file")
return nil, fmt.Errorf("error opening service account credentials file: %w", err)
}
opt.ServiceAccountCredentials = string(loadedCreds)
}
@@ -418,7 +465,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
} else if opt.ServiceAccountCredentials != "" {
oAuthClient, err = getServiceAccountClient(ctx, []byte(opt.ServiceAccountCredentials))
if err != nil {
return nil, errors.Wrap(err, "failed configuring Google Cloud Storage Service Account")
return nil, fmt.Errorf("failed configuring Google Cloud Storage Service Account: %w", err)
}
} else {
oAuthClient, _, err = oauthutil.NewClient(ctx, name, m, storageConfig)
@@ -426,7 +473,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
ctx := context.Background()
oAuthClient, err = google.DefaultClient(ctx, storage.DevstorageFullControlScope)
if err != nil {
return nil, errors.Wrap(err, "failed to configure Google Cloud Storage")
return nil, fmt.Errorf("failed to configure Google Cloud Storage: %w", err)
}
}
}
@@ -450,7 +497,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
f.client = oAuthClient
f.svc, err = storage.New(f.client)
if err != nil {
return nil, errors.Wrap(err, "couldn't create Google Cloud Storage client")
return nil, fmt.Errorf("couldn't create Google Cloud Storage client: %w", err)
}
if f.rootBucket != "" && f.rootDirectory != "" {
@@ -760,10 +807,10 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) (err error) {
return nil
} else if gErr, ok := err.(*googleapi.Error); ok {
if gErr.Code != http.StatusNotFound {
return errors.Wrap(err, "failed to get bucket")
return fmt.Errorf("failed to get bucket: %w", err)
}
} else {
return errors.Wrap(err, "failed to get bucket")
return fmt.Errorf("failed to get bucket: %w", err)
}
if f.opt.ProjectNumber == "" {
@@ -922,7 +969,7 @@ func (o *Object) setMetaData(info *storage.Object) {
// read mtime out of metadata if available
mtimeString, ok := info.Metadata[metaMtime]
if ok {
modTime, err := time.Parse(timeFormatIn, mtimeString)
modTime, err := time.Parse(timeFormat, mtimeString)
if err == nil {
o.modTime = modTime
return
@@ -930,8 +977,19 @@ func (o *Object) setMetaData(info *storage.Object) {
fs.Debugf(o, "Failed to read mtime from metadata: %s", err)
}
// Fallback to GSUtil mtime
mtimeGsutilString, ok := info.Metadata[metaMtimeGsutil]
if ok {
unixTimeSec, err := strconv.ParseInt(mtimeGsutilString, 10, 64)
if err == nil {
o.modTime = time.Unix(unixTimeSec, 0)
return
}
fs.Debugf(o, "Failed to read GSUtil mtime from metadata: %s", err)
}
// Fallback to the Updated time
modTime, err := time.Parse(timeFormatIn, info.Updated)
modTime, err := time.Parse(timeFormat, info.Updated)
if err != nil {
fs.Logf(o, "Bad time decode: %v", err)
} else {
@@ -988,7 +1046,8 @@ func (o *Object) ModTime(ctx context.Context) time.Time {
// Returns metadata for an object
func metadataFromModTime(modTime time.Time) map[string]string {
metadata := make(map[string]string, 1)
metadata[metaMtime] = modTime.Format(timeFormatOut)
metadata[metaMtime] = modTime.Format(timeFormat)
metadata[metaMtimeGsutil] = strconv.FormatInt(modTime.Unix(), 10)
return metadata
}
@@ -1000,11 +1059,11 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) (err error)
return err
}
// Add the mtime to the existing metadata
mtime := modTime.Format(timeFormatOut)
if object.Metadata == nil {
object.Metadata = make(map[string]string, 1)
}
object.Metadata[metaMtime] = mtime
object.Metadata[metaMtime] = modTime.Format(timeFormat)
object.Metadata[metaMtimeGsutil] = strconv.FormatInt(modTime.Unix(), 10)
// Copy the object to itself to update the metadata
// Using PATCH requires too many permissions
bucket, bucketPath := o.split()
@@ -1054,7 +1113,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
_, isRanging := req.Header["Range"]
if !(res.StatusCode == http.StatusOK || (isRanging && res.StatusCode == http.StatusPartialContent)) {
_ = res.Body.Close() // ignore error
return nil, errors.Errorf("bad response: %d: %s", res.StatusCode, res.Status)
return nil, fmt.Errorf("bad response: %d: %s", res.StatusCode, res.Status)
}
return res.Body, nil
}

View File

@@ -6,9 +6,9 @@ package googlephotos
import (
"context"
"encoding/json"
"errors"
"fmt"
"io"
golog "log"
"net/http"
"net/url"
"path"
@@ -18,9 +18,9 @@ import (
"sync"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/backend/googlephotos/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
@@ -29,6 +29,7 @@ import (
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/log"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/oauthutil"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/rest"
@@ -54,6 +55,7 @@ const (
minSleep = 10 * time.Millisecond
scopeReadOnly = "https://www.googleapis.com/auth/photoslibrary.readonly"
scopeReadWrite = "https://www.googleapis.com/auth/photoslibrary"
scopeAccess = 2 // position of access scope in list
)
var (
@@ -62,12 +64,12 @@ var (
Scopes: []string{
"openid",
"profile",
scopeReadWrite,
scopeReadWrite, // this must be at position scopeAccess
},
Endpoint: google.Endpoint,
ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.TitleBarRedirectURL,
RedirectURL: oauthutil.RedirectURL,
}
)
@@ -78,36 +80,36 @@ func init() {
Prefix: "gphotos",
Description: "Google Photos",
NewFs: NewFs,
Config: func(ctx context.Context, name string, m configmap.Mapper) {
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
fs.Errorf(nil, "Couldn't parse config into struct: %v", err)
return
return nil, fmt.Errorf("couldn't parse config into struct: %w", err)
}
// Fill in the scopes
if opt.ReadOnly {
oauthConfig.Scopes[0] = scopeReadOnly
} else {
oauthConfig.Scopes[0] = scopeReadWrite
switch config.State {
case "":
// Fill in the scopes
if opt.ReadOnly {
oauthConfig.Scopes[scopeAccess] = scopeReadOnly
} else {
oauthConfig.Scopes[scopeAccess] = scopeReadWrite
}
return oauthutil.ConfigOut("warning", &oauthutil.Options{
OAuth2Config: oauthConfig,
})
case "warning":
// Warn the user as required by google photos integration
return fs.ConfigConfirm("warning_done", true, "config_warning", `Warning
IMPORTANT: All media items uploaded to Google Photos with rclone
are stored in full resolution at original quality. These uploads
will count towards storage in your Google Account.`)
case "warning_done":
return nil, nil
}
// Do the oauth
err = oauthutil.Config(ctx, "google photos", name, m, oauthConfig, nil)
if err != nil {
golog.Fatalf("Failed to configure token: %v", err)
}
// Warn the user
fmt.Print(`
*** IMPORTANT: All media items uploaded to Google Photos with rclone
*** are stored in full resolution at original quality. These uploads
*** will count towards storage in your Google Account.
`)
return nil, fmt.Errorf("unknown state %q", config.State)
},
Options: append(oauthutil.SharedOptions, []fs.Option{{
Name: "read_only",
@@ -130,14 +132,14 @@ you want to read the media.`,
}, {
Name: "start_year",
Default: 2000,
Help: `Year limits the photos to be downloaded to those which are uploaded after the given year`,
Help: `Year limits the photos to be downloaded to those which are uploaded after the given year.`,
Advanced: true,
}, {
Name: "include_archived",
Default: false,
Help: `Also view and download archived media.
By default rclone does not request archived media. Thus, when syncing,
By default, rclone does not request archived media. Thus, when syncing,
archived media is not visible in directory listings or transferred.
Note that media in albums is always visible and synced, no matter
@@ -149,16 +151,24 @@ listings and transferred.
Without this flag, archived media will not be visible in directory
listings and won't be transferred.`,
Advanced: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
Advanced: true,
Default: (encoder.Base |
encoder.EncodeCrLf |
encoder.EncodeInvalidUtf8),
}}...),
})
}
// Options defines the configuration for this backend
type Options struct {
ReadOnly bool `config:"read_only"`
ReadSize bool `config:"read_size"`
StartYear int `config:"start_year"`
IncludeArchived bool `config:"include_archived"`
ReadOnly bool `config:"read_only"`
ReadSize bool `config:"read_size"`
StartYear int `config:"start_year"`
IncludeArchived bool `config:"include_archived"`
Enc encoder.MultiEncoder `config:"encoding"`
}
// Fs represents a remote storage server
@@ -282,7 +292,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
baseClient := fshttp.NewClient(ctx)
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(ctx, name, m, oauthConfig, baseClient)
if err != nil {
return nil, errors.Wrap(err, "failed to configure Box")
return nil, fmt.Errorf("failed to configure Box: %w", err)
}
root = strings.Trim(path.Clean(root), "/")
@@ -335,13 +345,13 @@ func (f *Fs) fetchEndpoint(ctx context.Context, name string) (endpoint string, e
return shouldRetry(ctx, resp, err)
})
if err != nil {
return "", errors.Wrap(err, "couldn't read openID config")
return "", fmt.Errorf("couldn't read openID config: %w", err)
}
// Find userinfo endpoint
endpoint, ok := openIDconfig[name].(string)
if !ok {
return "", errors.Errorf("couldn't find %q from openID config", name)
return "", fmt.Errorf("couldn't find %q from openID config", name)
}
return endpoint, nil
@@ -364,7 +374,7 @@ func (f *Fs) UserInfo(ctx context.Context) (userInfo map[string]string, err erro
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't read user info")
return nil, fmt.Errorf("couldn't read user info: %w", err)
}
return userInfo, nil
}
@@ -395,7 +405,7 @@ func (f *Fs) Disconnect(ctx context.Context) (err error) {
return shouldRetry(ctx, resp, err)
})
if err != nil {
return errors.Wrap(err, "couldn't revoke token")
return fmt.Errorf("couldn't revoke token: %w", err)
}
fs.Infof(f, "res = %+v", res)
return nil
@@ -482,7 +492,7 @@ func (f *Fs) listAlbums(ctx context.Context, shared bool) (all *albums, err erro
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't list albums")
return nil, fmt.Errorf("couldn't list albums: %w", err)
}
newAlbums := result.Albums
if shared {
@@ -496,7 +506,9 @@ func (f *Fs) listAlbums(ctx context.Context, shared bool) (all *albums, err erro
lastID = newAlbums[len(newAlbums)-1].ID
}
for i := range newAlbums {
all.add(&newAlbums[i])
anAlbum := newAlbums[i]
anAlbum.Title = f.opt.Enc.FromStandardPath(anAlbum.Title)
all.add(&anAlbum)
}
if result.NextPageToken == "" {
break
@@ -537,7 +549,7 @@ func (f *Fs) list(ctx context.Context, filter api.SearchFilter, fn listFn) (err
return shouldRetry(ctx, resp, err)
})
if err != nil {
return errors.Wrap(err, "couldn't list files")
return fmt.Errorf("couldn't list files: %w", err)
}
items := result.MediaItems
if len(items) > 0 && items[0].ID == lastID {
@@ -681,7 +693,7 @@ func (f *Fs) createAlbum(ctx context.Context, albumTitle string) (album *api.Alb
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't create album")
return nil, fmt.Errorf("couldn't create album: %w", err)
}
f.albums[false].add(&result)
return &result, nil
@@ -867,7 +879,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
return shouldRetry(ctx, resp, err)
})
if err != nil {
return errors.Wrap(err, "couldn't get media item")
return fmt.Errorf("couldn't get media item: %w", err)
}
o.setMetaData(&item)
return nil
@@ -1002,7 +1014,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return shouldRetry(ctx, resp, err)
})
if err != nil {
return errors.Wrap(err, "couldn't upload file")
return fmt.Errorf("couldn't upload file: %w", err)
}
uploadToken := strings.TrimSpace(string(token))
if uploadToken == "" {
@@ -1030,14 +1042,14 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return shouldRetry(ctx, resp, err)
})
if err != nil {
return errors.Wrap(err, "failed to create media item")
return fmt.Errorf("failed to create media item: %w", err)
}
if len(result.NewMediaItemResults) != 1 {
return errors.New("bad response to BatchCreate wrong number of items")
}
mediaItemResult := result.NewMediaItemResults[0]
if mediaItemResult.Status.Code != 0 {
return errors.Errorf("upload failed: %s (%d)", mediaItemResult.Status.Message, mediaItemResult.Status.Code)
return fmt.Errorf("upload failed: %s (%d)", mediaItemResult.Status.Message, mediaItemResult.Status.Code)
}
o.setMetaData(&mediaItemResult.MediaItem)
@@ -1059,7 +1071,7 @@ func (o *Object) Remove(ctx context.Context) (err error) {
albumTitle, fileName := match[1], match[2]
album, ok := o.fs.albums[false].get(albumTitle)
if !ok {
return errors.Errorf("couldn't file %q in album %q for delete", fileName, albumTitle)
return fmt.Errorf("couldn't file %q in album %q for delete", fileName, albumTitle)
}
opts := rest.Opts{
Method: "POST",
@@ -1075,7 +1087,7 @@ func (o *Object) Remove(ctx context.Context) (err error) {
return shouldRetry(ctx, resp, err)
})
if err != nil {
return errors.Wrap(err, "couldn't delete item from album")
return fmt.Errorf("couldn't delete item from album: %w", err)
}
return nil
}

View File

@@ -11,7 +11,6 @@ import (
"strings"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/backend/googlephotos/api"
"github.com/rclone/rclone/fs"
)
@@ -270,7 +269,7 @@ func days(ctx context.Context, f lister, prefix string, match []string) (entries
year := match[1]
current, err := time.Parse("2006", year)
if err != nil {
return nil, errors.Errorf("bad year %q", match[1])
return nil, fmt.Errorf("bad year %q", match[1])
}
currentYear := current.Year()
for current.Year() == currentYear {
@@ -284,7 +283,7 @@ func days(ctx context.Context, f lister, prefix string, match []string) (entries
func yearMonthDayFilter(ctx context.Context, f lister, match []string) (sf api.SearchFilter, err error) {
year, err := strconv.Atoi(match[1])
if err != nil || year < 1000 || year > 3000 {
return sf, errors.Errorf("bad year %q", match[1])
return sf, fmt.Errorf("bad year %q", match[1])
}
sf = api.SearchFilter{
Filters: &api.Filters{
@@ -300,14 +299,14 @@ func yearMonthDayFilter(ctx context.Context, f lister, match []string) (sf api.S
if len(match) >= 3 {
month, err := strconv.Atoi(match[2])
if err != nil || month < 1 || month > 12 {
return sf, errors.Errorf("bad month %q", match[2])
return sf, fmt.Errorf("bad month %q", match[2])
}
sf.Filters.DateFilter.Dates[0].Month = month
}
if len(match) >= 4 {
day, err := strconv.Atoi(match[3])
if err != nil || day < 1 || day > 31 {
return sf, errors.Errorf("bad day %q", match[3])
return sf, fmt.Errorf("bad day %q", match[3])
}
sf.Filters.DateFilter.Dates[0].Day = day
}

180
backend/hasher/commands.go Normal file
View File

@@ -0,0 +1,180 @@
package hasher
import (
"context"
"errors"
"fmt"
"path"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/cache"
"github.com/rclone/rclone/fs/fspath"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/lib/kv"
)
// Command the backend to run a named command
//
// The command run is name
// args may be used to read arguments from
// opts may be used to read optional arguments from
//
// The result should be capable of being JSON encoded
// If it is a string or a []string it will be shown to the user
// otherwise it will be JSON encoded and shown to the user like that
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
switch name {
case "drop":
return nil, f.db.Stop(true)
case "dump", "fulldump":
return nil, f.dbDump(ctx, name == "fulldump", "")
case "import", "stickyimport":
sticky := name == "stickyimport"
if len(arg) != 2 {
return nil, errors.New("please provide checksum type and path to sum file")
}
return nil, f.dbImport(ctx, arg[0], arg[1], sticky)
default:
return nil, fs.ErrorCommandNotFound
}
}
var commandHelp = []fs.CommandHelp{{
Name: "drop",
Short: "Drop cache",
Long: `Completely drop checksum cache.
Usage Example:
rclone backend drop hasher:
`,
}, {
Name: "dump",
Short: "Dump the database",
Long: "Dump cache records covered by the current remote",
}, {
Name: "fulldump",
Short: "Full dump of the database",
Long: "Dump all cache records in the database",
}, {
Name: "import",
Short: "Import a SUM file",
Long: `Amend hash cache from a SUM file and bind checksums to files by size/time.
Usage Example:
rclone backend import hasher:subdir md5 /path/to/sum.md5
`,
}, {
Name: "stickyimport",
Short: "Perform fast import of a SUM file",
Long: `Fill hash cache from a SUM file without verifying file fingerprints.
Usage Example:
rclone backend stickyimport hasher:subdir md5 remote:path/to/sum.md5
`,
}}
func (f *Fs) dbDump(ctx context.Context, full bool, root string) error {
if root == "" {
remoteFs, err := cache.Get(ctx, f.opt.Remote)
if err != nil {
return err
}
root = fspath.JoinRootPath(remoteFs.Root(), f.Root())
}
op := &kvDump{
full: full,
root: root,
path: f.db.Path(),
fs: f,
}
err := f.db.Do(false, op)
if err == kv.ErrEmpty {
fs.Infof(op.path, "empty")
err = nil
}
return err
}
func (f *Fs) dbImport(ctx context.Context, hashName, sumRemote string, sticky bool) error {
var hashType hash.Type
if err := hashType.Set(hashName); err != nil {
return err
}
if hashType == hash.None {
return errors.New("please provide a valid hash type")
}
if !f.suppHashes.Contains(hashType) {
return errors.New("unsupported hash type")
}
if !f.keepHashes.Contains(hashType) {
fs.Infof(nil, "Need not import hashes of this type")
return nil
}
_, sumPath, err := fspath.SplitFs(sumRemote)
if err != nil {
return err
}
sumFs, err := cache.Get(ctx, sumRemote)
switch err {
case fs.ErrorIsFile:
// ok
case nil:
return fmt.Errorf("not a file: %s", sumRemote)
default:
return err
}
sumObj, err := sumFs.NewObject(ctx, path.Base(sumPath))
if err != nil {
return fmt.Errorf("cannot open sum file: %w", err)
}
hashes, err := operations.ParseSumFile(ctx, sumObj)
if err != nil {
return fmt.Errorf("failed to parse sum file: %w", err)
}
if sticky {
rootPath := f.Fs.Root()
for remote, hashVal := range hashes {
key := path.Join(rootPath, remote)
hashSums := operations.HashSums{hashName: hashVal}
if err := f.putRawHashes(ctx, key, anyFingerprint, hashSums); err != nil {
fs.Errorf(nil, "%s: failed to import: %v", remote, err)
}
}
fs.Infof(nil, "Summary: %d checksum(s) imported", len(hashes))
return nil
}
const longImportThreshold = 100
if len(hashes) > longImportThreshold {
fs.Infof(nil, "Importing %d checksums. Please wait...", len(hashes))
}
doneCount := 0
err = operations.ListFn(ctx, f, func(obj fs.Object) {
remote := obj.Remote()
hash := hashes[remote]
hashes[remote] = "" // mark as handled
o, ok := obj.(*Object)
if ok && hash != "" {
if err := o.putHashes(ctx, hashMap{hashType: hash}); err != nil {
fs.Errorf(nil, "%s: failed to import: %v", remote, err)
}
accounting.Stats(ctx).NewCheckingTransfer(obj).Done(ctx, err)
doneCount++
}
})
if err != nil {
fs.Errorf(nil, "Import failed: %v", err)
}
skipCount := 0
for remote, emptyOrDone := range hashes {
if emptyOrDone != "" {
fs.Infof(nil, "Skip vanished object: %s", remote)
skipCount++
}
}
fs.Infof(nil, "Summary: %d imported, %d skipped", doneCount, skipCount)
return err
}

516
backend/hasher/hasher.go Normal file
View File

@@ -0,0 +1,516 @@
// Package hasher implements a checksum handling overlay backend
package hasher
import (
"context"
"encoding/gob"
"errors"
"fmt"
"io"
"path"
"strings"
"sync"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/cache"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fspath"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/kv"
)
// Register with Fs
func init() {
fs.Register(&fs.RegInfo{
Name: "hasher",
Description: "Better checksums for other remotes",
NewFs: NewFs,
CommandHelp: commandHelp,
Options: []fs.Option{{
Name: "remote",
Required: true,
Help: "Remote to cache checksums for (e.g. myRemote:path).",
}, {
Name: "hashes",
Default: fs.CommaSepList{"md5", "sha1"},
Advanced: false,
Help: "Comma separated list of supported checksum types.",
}, {
Name: "max_age",
Advanced: false,
Default: fs.DurationOff,
Help: "Maximum time to keep checksums in cache (0 = no cache, off = cache forever).",
}, {
Name: "auto_size",
Advanced: true,
Default: fs.SizeSuffix(0),
Help: "Auto-update checksum for files smaller than this size (disabled by default).",
}},
})
}
// Options defines the configuration for this backend
type Options struct {
Remote string `config:"remote"`
Hashes fs.CommaSepList `config:"hashes"`
AutoSize fs.SizeSuffix `config:"auto_size"`
MaxAge fs.Duration `config:"max_age"`
}
// Fs represents a wrapped fs.Fs
type Fs struct {
fs.Fs
name string
root string
wrapper fs.Fs
features *fs.Features
opt *Options
db *kv.DB
// fingerprinting
fpTime bool // true if using time in fingerprints
fpHash hash.Type // hash type to use in fingerprints or None
// hash types triaged by groups
suppHashes hash.Set // all supported checksum types
passHashes hash.Set // passed directly to the base without caching
slowHashes hash.Set // passed to the base and then cached
autoHashes hash.Set // calculated in-house and cached
keepHashes hash.Set // checksums to keep in cache (slow + auto)
}
var warnExperimental sync.Once
// NewFs constructs an Fs from the remote:path string
func NewFs(ctx context.Context, fsname, rpath string, cmap configmap.Mapper) (fs.Fs, error) {
if !kv.Supported() {
return nil, errors.New("hasher is not supported on this OS")
}
warnExperimental.Do(func() {
fs.Infof(nil, "Hasher is EXPERIMENTAL!")
})
opt := &Options{}
err := configstruct.Set(cmap, opt)
if err != nil {
return nil, err
}
if strings.HasPrefix(opt.Remote, fsname+":") {
return nil, errors.New("can't point remote at itself")
}
remotePath := fspath.JoinRootPath(opt.Remote, rpath)
baseFs, err := cache.Get(ctx, remotePath)
if err != nil && err != fs.ErrorIsFile {
return nil, fmt.Errorf("failed to derive base remote %q: %w", opt.Remote, err)
}
f := &Fs{
Fs: baseFs,
name: fsname,
root: rpath,
opt: opt,
}
baseFeatures := baseFs.Features()
f.fpTime = baseFs.Precision() != fs.ModTimeNotSupported
if baseFeatures.SlowHash {
f.slowHashes = f.Fs.Hashes()
} else {
f.passHashes = f.Fs.Hashes()
f.fpHash = f.passHashes.GetOne()
}
f.suppHashes = f.passHashes
f.suppHashes.Add(f.slowHashes.Array()...)
for _, hashName := range opt.Hashes {
var ht hash.Type
if err := ht.Set(hashName); err != nil {
return nil, fmt.Errorf("invalid token %q in hash string %q", hashName, opt.Hashes.String())
}
if !f.slowHashes.Contains(ht) {
f.autoHashes.Add(ht)
}
f.keepHashes.Add(ht)
f.suppHashes.Add(ht)
}
fs.Debugf(f, "Groups by usage: cached %s, passed %s, auto %s, slow %s, supported %s",
f.keepHashes, f.passHashes, f.autoHashes, f.slowHashes, f.suppHashes)
var nilSet hash.Set
if f.keepHashes == nilSet {
return nil, errors.New("configured hash_names have nothing to keep in cache")
}
if f.opt.MaxAge > 0 {
gob.Register(hashRecord{})
db, err := kv.Start(ctx, "hasher", f.Fs)
if err != nil {
return nil, err
}
f.db = db
}
stubFeatures := &fs.Features{
CanHaveEmptyDirectories: true,
IsLocal: true,
ReadMimeType: true,
WriteMimeType: true,
}
f.features = stubFeatures.Fill(ctx, f).Mask(ctx, f.Fs).WrapsFs(f, f.Fs)
cache.PinUntilFinalized(f.Fs, f)
return f, err
}
//
// Filesystem
//
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string { return f.name }
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string { return f.root }
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features { return f.features }
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() hash.Set { return f.suppHashes }
// String returns a description of the FS
// The "hasher::" prefix is a distinctive feature.
func (f *Fs) String() string {
return fmt.Sprintf("hasher::%s:%s", f.name, f.root)
}
// UnWrap returns the Fs that this Fs is wrapping
func (f *Fs) UnWrap() fs.Fs { return f.Fs }
// WrapFs returns the Fs that is wrapping this Fs
func (f *Fs) WrapFs() fs.Fs { return f.wrapper }
// SetWrapper sets the Fs that is wrapping this Fs
func (f *Fs) SetWrapper(wrapper fs.Fs) { f.wrapper = wrapper }
// Wrap base entries into hasher entries.
func (f *Fs) wrapEntries(baseEntries fs.DirEntries) (hashEntries fs.DirEntries, err error) {
hashEntries = baseEntries[:0] // work inplace
for _, entry := range baseEntries {
switch x := entry.(type) {
case fs.Object:
obj, err := f.wrapObject(x, nil)
if err != nil {
return nil, err
}
hashEntries = append(hashEntries, obj)
default:
hashEntries = append(hashEntries, entry) // trash in - trash out
}
}
return hashEntries, nil
}
// List the objects and directories in dir into entries.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
if entries, err = f.Fs.List(ctx, dir); err != nil {
return nil, err
}
return f.wrapEntries(entries)
}
// ListR lists the objects and directories recursively into out.
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
return f.Fs.Features().ListR(ctx, dir, func(baseEntries fs.DirEntries) error {
hashEntries, err := f.wrapEntries(baseEntries)
if err != nil {
return err
}
return callback(hashEntries)
})
}
// Purge a directory
func (f *Fs) Purge(ctx context.Context, dir string) error {
if do := f.Fs.Features().Purge; do != nil {
if err := do(ctx, dir); err != nil {
return err
}
err := f.db.Do(true, &kvPurge{
dir: path.Join(f.Fs.Root(), dir),
})
if err != nil {
fs.Errorf(f, "Failed to purge some hashes: %v", err)
}
return nil
}
return fs.ErrorCantPurge
}
// PutStream uploads to the remote path with undeterminate size.
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
if do := f.Fs.Features().PutStream; do != nil {
_ = f.pruneHash(src.Remote())
oResult, err := do(ctx, in, src, options...)
return f.wrapObject(oResult, err)
}
return nil, errors.New("PutStream not supported")
}
// PutUnchecked uploads the object, allowing duplicates.
func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
if do := f.Fs.Features().PutUnchecked; do != nil {
_ = f.pruneHash(src.Remote())
oResult, err := do(ctx, in, src, options...)
return f.wrapObject(oResult, err)
}
return nil, errors.New("PutUnchecked not supported")
}
// pruneHash deletes hash for a path
func (f *Fs) pruneHash(remote string) error {
return f.db.Do(true, &kvPrune{
key: path.Join(f.Fs.Root(), remote),
})
}
// CleanUp the trash in the Fs
func (f *Fs) CleanUp(ctx context.Context) error {
if do := f.Fs.Features().CleanUp; do != nil {
return do(ctx)
}
return errors.New("CleanUp not supported")
}
// About gets quota information from the Fs
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
if do := f.Fs.Features().About; do != nil {
return do(ctx)
}
return nil, errors.New("About not supported")
}
// ChangeNotify calls the passed function with a path that has had changes.
func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) {
if do := f.Fs.Features().ChangeNotify; do != nil {
do(ctx, notifyFunc, pollIntervalChan)
}
}
// UserInfo returns info about the connected user
func (f *Fs) UserInfo(ctx context.Context) (map[string]string, error) {
if do := f.Fs.Features().UserInfo; do != nil {
return do(ctx)
}
return nil, fs.ErrorNotImplemented
}
// Disconnect the current user
func (f *Fs) Disconnect(ctx context.Context) error {
if do := f.Fs.Features().Disconnect; do != nil {
return do(ctx)
}
return fs.ErrorNotImplemented
}
// MergeDirs merges the contents of all the directories passed
// in into the first one and rmdirs the other directories.
func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
if do := f.Fs.Features().MergeDirs; do != nil {
return do(ctx, dirs)
}
return errors.New("MergeDirs not supported")
}
// DirCacheFlush resets the directory cache - used in testing
// as an optional interface
func (f *Fs) DirCacheFlush() {
if do := f.Fs.Features().DirCacheFlush; do != nil {
do()
}
}
// PublicLink generates a public link to the remote path (usually readable by anyone)
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (string, error) {
if do := f.Fs.Features().PublicLink; do != nil {
return do(ctx, remote, expire, unlink)
}
return "", errors.New("PublicLink not supported")
}
// Copy src to this remote using server-side copy operations.
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
do := f.Fs.Features().Copy
if do == nil {
return nil, fs.ErrorCantCopy
}
o, ok := src.(*Object)
if !ok {
return nil, fs.ErrorCantCopy
}
oResult, err := do(ctx, o.Object, remote)
return f.wrapObject(oResult, err)
}
// Move src to this remote using server-side move operations.
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
do := f.Fs.Features().Move
if do == nil {
return nil, fs.ErrorCantMove
}
o, ok := src.(*Object)
if !ok {
return nil, fs.ErrorCantMove
}
oResult, err := do(ctx, o.Object, remote)
if err != nil {
return nil, err
}
_ = f.db.Do(true, &kvMove{
src: path.Join(f.Fs.Root(), src.Remote()),
dst: path.Join(f.Fs.Root(), remote),
dir: false,
fs: f,
})
return f.wrapObject(oResult, nil)
}
// DirMove moves src, srcRemote to this remote at dstRemote using server-side move operations.
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
do := f.Fs.Features().DirMove
if do == nil {
return fs.ErrorCantDirMove
}
srcFs, ok := src.(*Fs)
if !ok {
return fs.ErrorCantDirMove
}
err := do(ctx, srcFs.Fs, srcRemote, dstRemote)
if err == nil {
_ = f.db.Do(true, &kvMove{
src: path.Join(srcFs.Fs.Root(), srcRemote),
dst: path.Join(f.Fs.Root(), dstRemote),
dir: true,
fs: f,
})
}
return err
}
// Shutdown the backend, closing any background tasks and any cached connections.
func (f *Fs) Shutdown(ctx context.Context) (err error) {
err = f.db.Stop(false)
if do := f.Fs.Features().Shutdown; do != nil {
if err2 := do(ctx); err2 != nil {
err = err2
}
}
return
}
// NewObject finds the Object at remote.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
o, err := f.Fs.NewObject(ctx, remote)
return f.wrapObject(o, err)
}
//
// Object
//
// Object represents a composite file wrapping one or more data chunks
type Object struct {
fs.Object
f *Fs
}
// Wrap base object into hasher object
func (f *Fs) wrapObject(o fs.Object, err error) (obj fs.Object, outErr error) {
// log.Trace(o, "err=%v", err)("obj=%#v, outErr=%v", &obj, &outErr)
if err != nil {
return nil, err
}
if o == nil {
return nil, fs.ErrorObjectNotFound
}
return &Object{Object: o, f: f}, nil
}
// Fs returns read only access to the Fs that this object is part of
func (o *Object) Fs() fs.Info { return o.f }
// UnWrap returns the wrapped Object
func (o *Object) UnWrap() fs.Object { return o.Object }
// Return a string version
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.Object.String()
}
// ID returns the ID of the Object if possible
func (o *Object) ID() string {
if doer, ok := o.Object.(fs.IDer); ok {
return doer.ID()
}
return ""
}
// GetTier returns the Tier of the Object if possible
func (o *Object) GetTier() string {
if doer, ok := o.Object.(fs.GetTierer); ok {
return doer.GetTier()
}
return ""
}
// SetTier set the Tier of the Object if possible
func (o *Object) SetTier(tier string) error {
if doer, ok := o.Object.(fs.SetTierer); ok {
return doer.SetTier(tier)
}
return errors.New("SetTier not supported")
}
// MimeType of an Object if known, "" otherwise
func (o *Object) MimeType(ctx context.Context) string {
if doer, ok := o.Object.(fs.MimeTyper); ok {
return doer.MimeType(ctx)
}
return ""
}
// Check the interfaces are satisfied
var (
_ fs.Fs = (*Fs)(nil)
_ fs.Purger = (*Fs)(nil)
_ fs.Copier = (*Fs)(nil)
_ fs.Mover = (*Fs)(nil)
_ fs.DirMover = (*Fs)(nil)
_ fs.Commander = (*Fs)(nil)
_ fs.PutUncheckeder = (*Fs)(nil)
_ fs.PutStreamer = (*Fs)(nil)
_ fs.CleanUpper = (*Fs)(nil)
_ fs.UnWrapper = (*Fs)(nil)
_ fs.ListRer = (*Fs)(nil)
_ fs.Abouter = (*Fs)(nil)
_ fs.Wrapper = (*Fs)(nil)
_ fs.MergeDirser = (*Fs)(nil)
_ fs.DirCacheFlusher = (*Fs)(nil)
_ fs.ChangeNotifier = (*Fs)(nil)
_ fs.PublicLinker = (*Fs)(nil)
_ fs.UserInfoer = (*Fs)(nil)
_ fs.Disconnecter = (*Fs)(nil)
_ fs.Shutdowner = (*Fs)(nil)
_ fs.Object = (*Object)(nil)
_ fs.ObjectUnWrapper = (*Object)(nil)
_ fs.IDer = (*Object)(nil)
_ fs.SetTierer = (*Object)(nil)
_ fs.GetTierer = (*Object)(nil)
_ fs.MimeTyper = (*Object)(nil)
)

View File

@@ -0,0 +1,78 @@
package hasher
import (
"context"
"fmt"
"os"
"testing"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
"github.com/rclone/rclone/lib/kv"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func putFile(ctx context.Context, t *testing.T, f fs.Fs, name, data string) fs.Object {
mtime1 := fstest.Time("2001-02-03T04:05:06.499999999Z")
item := fstest.Item{Path: name, ModTime: mtime1}
_, o := fstests.PutTestContents(ctx, t, f, &item, data, true)
require.NotNil(t, o)
return o
}
func (f *Fs) testUploadFromCrypt(t *testing.T) {
// make a temporary local remote
tempRoot, err := fstest.LocalRemote()
require.NoError(t, err)
defer func() {
_ = os.RemoveAll(tempRoot)
}()
// make a temporary crypt remote
ctx := context.Background()
pass := obscure.MustObscure("crypt")
remote := fmt.Sprintf(":crypt,remote=%s,password=%s:", tempRoot, pass)
cryptFs, err := fs.NewFs(ctx, remote)
require.NoError(t, err)
// make a test file on the crypt remote
const dirName = "from_crypt_1"
const fileName = dirName + "/file_from_crypt_1"
const longTime = fs.ModTimeNotSupported
src := putFile(ctx, t, cryptFs, fileName, "doggy froggy")
// ensure that hash does not exist yet
_ = f.pruneHash(fileName)
hashType := f.keepHashes.GetOne()
hash, err := f.getRawHash(ctx, hashType, fileName, anyFingerprint, longTime)
assert.Error(t, err)
assert.Empty(t, hash)
// upload file to hasher
in, err := src.Open(ctx)
require.NoError(t, err)
dst, err := f.Put(ctx, in, src)
require.NoError(t, err)
assert.NotNil(t, dst)
// check that hash was created
hash, err = f.getRawHash(ctx, hashType, fileName, anyFingerprint, longTime)
assert.NoError(t, err)
assert.NotEmpty(t, hash)
//t.Logf("hash is %q", hash)
_ = operations.Purge(ctx, f, dirName)
}
// InternalTest dispatches all internal tests
func (f *Fs) InternalTest(t *testing.T) {
if !kv.Supported() {
t.Skip("hasher is not supported on this OS")
}
t.Run("UploadFromCrypt", f.testUploadFromCrypt)
}
var _ fstests.InternalTester = (*Fs)(nil)

View File

@@ -0,0 +1,38 @@
package hasher_test
import (
"os"
"path/filepath"
"testing"
"github.com/rclone/rclone/backend/hasher"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
"github.com/rclone/rclone/lib/kv"
_ "github.com/rclone/rclone/backend/all" // for integration tests
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
if !kv.Supported() {
t.Skip("hasher is not supported on this OS")
}
opt := fstests.Opt{
RemoteName: *fstest.RemoteName,
NilObject: (*hasher.Object)(nil),
UnimplementableFsMethods: []string{
"OpenWriterAt",
},
UnimplementableObjectMethods: []string{},
}
if *fstest.RemoteName == "" {
tempDir := filepath.Join(os.TempDir(), "rclone-hasher-test")
opt.ExtraConfig = []fstests.ExtraConfigItem{
{Name: "TestHasher", Key: "type", Value: "hasher"},
{Name: "TestHasher", Key: "remote", Value: tempDir},
}
opt.RemoteName = "TestHasher:"
}
fstests.Run(t, &opt)
}

315
backend/hasher/kv.go Normal file
View File

@@ -0,0 +1,315 @@
package hasher
import (
"bytes"
"context"
"encoding/gob"
"errors"
"fmt"
"strings"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/lib/kv"
)
const (
timeFormat = "2006-01-02T15:04:05.000000000-0700"
anyFingerprint = "*"
)
type hashMap map[hash.Type]string
type hashRecord struct {
Fp string // fingerprint
Hashes operations.HashSums
Created time.Time
}
func (r *hashRecord) encode(key string) ([]byte, error) {
var buf bytes.Buffer
if err := gob.NewEncoder(&buf).Encode(r); err != nil {
fs.Debugf(key, "hasher encoding %v: %v", r, err)
return nil, err
}
return buf.Bytes(), nil
}
func (r *hashRecord) decode(key string, data []byte) error {
if err := gob.NewDecoder(bytes.NewBuffer(data)).Decode(r); err != nil {
fs.Debugf(key, "hasher decoding %q failed: %v", data, err)
return err
}
return nil
}
// kvPrune: prune a single hash
type kvPrune struct {
key string
}
func (op *kvPrune) Do(ctx context.Context, b kv.Bucket) error {
return b.Delete([]byte(op.key))
}
// kvPurge: delete a subtree
type kvPurge struct {
dir string
}
func (op *kvPurge) Do(ctx context.Context, b kv.Bucket) error {
dir := op.dir
if !strings.HasSuffix(dir, "/") {
dir += "/"
}
var items []string
cur := b.Cursor()
bkey, _ := cur.Seek([]byte(dir))
for bkey != nil {
key := string(bkey)
if !strings.HasPrefix(key, dir) {
break
}
items = append(items, key[len(dir):])
bkey, _ = cur.Next()
}
nerr := 0
for _, sub := range items {
if err := b.Delete([]byte(dir + sub)); err != nil {
nerr++
}
}
fs.Debugf(dir, "%d hashes purged, %d failed", len(items)-nerr, nerr)
return nil
}
// kvMove: assign hashes to new path
type kvMove struct {
src string
dst string
dir bool
fs *Fs
}
func (op *kvMove) Do(ctx context.Context, b kv.Bucket) error {
src, dst := op.src, op.dst
if !op.dir {
err := moveHash(b, src, dst)
fs.Debugf(op.fs, "moving cached hash %s to %s (err: %v)", src, dst, err)
return err
}
if !strings.HasSuffix(src, "/") {
src += "/"
}
if !strings.HasSuffix(dst, "/") {
dst += "/"
}
var items []string
cur := b.Cursor()
bkey, _ := cur.Seek([]byte(src))
for bkey != nil {
key := string(bkey)
if !strings.HasPrefix(key, src) {
break
}
items = append(items, key[len(src):])
bkey, _ = cur.Next()
}
nerr := 0
for _, suffix := range items {
srcKey, dstKey := src+suffix, dst+suffix
err := moveHash(b, srcKey, dstKey)
fs.Debugf(op.fs, "Rename cache record %s -> %s (err: %v)", srcKey, dstKey, err)
if err != nil {
nerr++
}
}
fs.Debugf(op.fs, "%d hashes moved, %d failed", len(items)-nerr, nerr)
return nil
}
func moveHash(b kv.Bucket, src, dst string) error {
data := b.Get([]byte(src))
err := b.Delete([]byte(src))
if err != nil || len(data) == 0 {
return err
}
return b.Put([]byte(dst), data)
}
// kvGet: get single hash from database
type kvGet struct {
key string
fp string
hash string
val string
age time.Duration
}
func (op *kvGet) Do(ctx context.Context, b kv.Bucket) error {
data := b.Get([]byte(op.key))
if len(data) == 0 {
return errors.New("no record")
}
var r hashRecord
if err := r.decode(op.key, data); err != nil {
return errors.New("invalid record")
}
if !(r.Fp == anyFingerprint || op.fp == anyFingerprint || r.Fp == op.fp) {
return errors.New("fingerprint changed")
}
if time.Since(r.Created) > op.age {
return errors.New("record timed out")
}
if r.Hashes != nil {
op.val = r.Hashes[op.hash]
}
return nil
}
// kvPut: set hashes for an object by key
type kvPut struct {
key string
fp string
hashes operations.HashSums
age time.Duration
}
func (op *kvPut) Do(ctx context.Context, b kv.Bucket) (err error) {
data := b.Get([]byte(op.key))
var r hashRecord
if len(data) > 0 {
err = r.decode(op.key, data)
if err != nil || r.Fp != op.fp || time.Since(r.Created) > op.age {
r.Hashes = nil
}
}
if len(r.Hashes) == 0 {
r.Created = time.Now()
r.Hashes = operations.HashSums{}
r.Fp = op.fp
}
for hashType, hashVal := range op.hashes {
r.Hashes[hashType] = hashVal
}
if data, err = r.encode(op.key); err != nil {
return fmt.Errorf("marshal failed: %w", err)
}
if err = b.Put([]byte(op.key), data); err != nil {
return fmt.Errorf("put failed: %w", err)
}
return err
}
// kvDump: dump the database.
// Note: long dump can cause concurrent operations to fail.
type kvDump struct {
full bool
root string
path string
fs *Fs
num int
total int
}
func (op *kvDump) Do(ctx context.Context, b kv.Bucket) error {
f, baseRoot, dbPath := op.fs, op.root, op.path
if op.full {
total := 0
num := 0
_ = b.ForEach(func(bkey, data []byte) error {
total++
key := string(bkey)
include := (baseRoot == "" || key == baseRoot || strings.HasPrefix(key, baseRoot+"/"))
var r hashRecord
if err := r.decode(key, data); err != nil {
fs.Errorf(nil, "%s: invalid record: %v", key, err)
return nil
}
fmt.Println(f.dumpLine(&r, key, include, nil))
if include {
num++
}
return nil
})
fs.Infof(dbPath, "%d records out of %d", num, total)
op.num, op.total = num, total // for unit tests
return nil
}
num := 0
cur := b.Cursor()
var bkey, data []byte
if baseRoot != "" {
bkey, data = cur.Seek([]byte(baseRoot))
} else {
bkey, data = cur.First()
}
for bkey != nil {
key := string(bkey)
if !(baseRoot == "" || key == baseRoot || strings.HasPrefix(key, baseRoot+"/")) {
break
}
var r hashRecord
if err := r.decode(key, data); err != nil {
fs.Errorf(nil, "%s: invalid record: %v", key, err)
continue
}
if key = strings.TrimPrefix(key[len(baseRoot):], "/"); key == "" {
key = "/"
}
fmt.Println(f.dumpLine(&r, key, true, nil))
num++
bkey, data = cur.Next()
}
fs.Infof(dbPath, "%d records", num)
op.num = num // for unit tests
return nil
}
func (f *Fs) dumpLine(r *hashRecord, path string, include bool, err error) string {
var status string
switch {
case !include:
status = "ext"
case err != nil:
status = "bad"
case r.Fp == anyFingerprint:
status = "stk"
default:
status = "ok "
}
var hashes []string
for _, hashType := range f.keepHashes.Array() {
hashName := hashType.String()
hashVal := r.Hashes[hashName]
if hashVal == "" || err != nil {
hashVal = "-"
}
hashVal = fmt.Sprintf("%-*s", hash.Width(hashType, false), hashVal)
hashes = append(hashes, hashName+":"+hashVal)
}
hashesStr := strings.Join(hashes, " ")
age := time.Since(r.Created).Round(time.Second)
if age > 24*time.Hour {
age = age.Round(time.Hour)
}
if err != nil {
age = 0
}
ageStr := age.String()
if strings.HasSuffix(ageStr, "h0m0s") {
ageStr = strings.TrimSuffix(ageStr, "0m0s")
}
return fmt.Sprintf("%s %s %9s %s", status, hashesStr, ageStr, path)
}

305
backend/hasher/object.go Normal file
View File

@@ -0,0 +1,305 @@
package hasher
import (
"context"
"errors"
"fmt"
"io"
"io/ioutil"
"path"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/operations"
)
// obtain hash for an object
func (o *Object) getHash(ctx context.Context, hashType hash.Type) (string, error) {
maxAge := time.Duration(o.f.opt.MaxAge)
if maxAge <= 0 {
return "", nil
}
fp := o.fingerprint(ctx)
if fp == "" {
return "", errors.New("fingerprint failed")
}
return o.f.getRawHash(ctx, hashType, o.Remote(), fp, maxAge)
}
// obtain hash for a path
func (f *Fs) getRawHash(ctx context.Context, hashType hash.Type, remote, fp string, age time.Duration) (string, error) {
key := path.Join(f.Fs.Root(), remote)
op := &kvGet{
key: key,
fp: fp,
hash: hashType.String(),
age: age,
}
err := f.db.Do(false, op)
return op.val, err
}
// put new hashes for an object
func (o *Object) putHashes(ctx context.Context, rawHashes hashMap) error {
if o.f.opt.MaxAge <= 0 {
return nil
}
fp := o.fingerprint(ctx)
if fp == "" {
return nil
}
key := path.Join(o.f.Fs.Root(), o.Remote())
hashes := operations.HashSums{}
for hashType, hashVal := range rawHashes {
hashes[hashType.String()] = hashVal
}
return o.f.putRawHashes(ctx, key, fp, hashes)
}
// set hashes for a path without any validation
func (f *Fs) putRawHashes(ctx context.Context, key, fp string, hashes operations.HashSums) error {
return f.db.Do(true, &kvPut{
key: key,
fp: fp,
hashes: hashes,
age: time.Duration(f.opt.MaxAge),
})
}
// Hash returns the selected checksum of the file or "" if unavailable.
func (o *Object) Hash(ctx context.Context, hashType hash.Type) (hashVal string, err error) {
f := o.f
if f.passHashes.Contains(hashType) {
fs.Debugf(o, "pass %s", hashType)
return o.Object.Hash(ctx, hashType)
}
if !f.suppHashes.Contains(hashType) {
fs.Debugf(o, "unsupp %s", hashType)
return "", hash.ErrUnsupported
}
if hashVal, err = o.getHash(ctx, hashType); err != nil {
fs.Debugf(o, "getHash: %v", err)
err = nil
hashVal = ""
}
if hashVal != "" {
fs.Debugf(o, "cached %s = %q", hashType, hashVal)
return hashVal, nil
}
if f.slowHashes.Contains(hashType) {
fs.Debugf(o, "slow %s", hashType)
hashVal, err = o.Object.Hash(ctx, hashType)
if err == nil && hashVal != "" && f.keepHashes.Contains(hashType) {
if err = o.putHashes(ctx, hashMap{hashType: hashVal}); err != nil {
fs.Debugf(o, "putHashes: %v", err)
err = nil
}
}
return hashVal, err
}
if f.autoHashes.Contains(hashType) && o.Size() < int64(f.opt.AutoSize) {
_ = o.updateHashes(ctx)
if hashVal, err = o.getHash(ctx, hashType); err != nil {
fs.Debugf(o, "auto %s = %q (%v)", hashType, hashVal, err)
err = nil
}
}
return hashVal, err
}
// updateHashes performs implicit "rclone hashsum --download" and updates cache.
func (o *Object) updateHashes(ctx context.Context) error {
r, err := o.Open(ctx)
if err != nil {
fs.Infof(o, "update failed (open): %v", err)
return err
}
defer func() {
_ = r.Close()
}()
if _, err = io.Copy(ioutil.Discard, r); err != nil {
fs.Infof(o, "update failed (copy): %v", err)
return err
}
return nil
}
// Update the object with the given data, time and size.
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
_ = o.f.pruneHash(src.Remote())
return o.Object.Update(ctx, in, src, options...)
}
// Remove an object.
func (o *Object) Remove(ctx context.Context) error {
_ = o.f.pruneHash(o.Remote())
return o.Object.Remove(ctx)
}
// SetModTime sets the modification time of the file.
// Also prunes the cache entry when modtime changes so that
// touching a file will trigger checksum recalculation even
// on backends that don't provide modTime with fingerprint.
func (o *Object) SetModTime(ctx context.Context, mtime time.Time) error {
if mtime != o.Object.ModTime(ctx) {
_ = o.f.pruneHash(o.Remote())
}
return o.Object.SetModTime(ctx, mtime)
}
// Open opens the file for read.
// Full reads will also update object hashes.
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (r io.ReadCloser, err error) {
size := o.Size()
var offset, limit int64 = 0, -1
for _, option := range options {
switch opt := option.(type) {
case *fs.SeekOption:
offset = opt.Offset
case *fs.RangeOption:
offset, limit = opt.Decode(size)
}
}
if offset < 0 {
return nil, errors.New("invalid offset")
}
if limit < 0 {
limit = size - offset
}
if r, err = o.Object.Open(ctx, options...); err != nil {
return nil, err
}
if offset != 0 || limit < size {
// It's a partial read
return r, err
}
return o.f.newHashingReader(ctx, r, func(sums hashMap) {
if err := o.putHashes(ctx, sums); err != nil {
fs.Infof(o, "auto hashing error: %v", err)
}
})
}
// Put data into the remote path with given modTime and size
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
var (
o fs.Object
common hash.Set
rehash bool
hashes hashMap
)
if fsrc := src.Fs(); fsrc != nil {
common = fsrc.Hashes().Overlap(f.keepHashes)
// Rehash if source does not have all required hashes or hashing is slow
rehash = fsrc.Features().SlowHash || common != f.keepHashes
}
wrapIn := in
if rehash {
r, err := f.newHashingReader(ctx, in, func(sums hashMap) {
hashes = sums
})
fs.Debugf(src, "Rehash in-fly due to incomplete or slow source set %v (err: %v)", common, err)
if err == nil {
wrapIn = r
} else {
rehash = false
}
}
_ = f.pruneHash(src.Remote())
oResult, err := f.Fs.Put(ctx, wrapIn, src, options...)
o, err = f.wrapObject(oResult, err)
if err != nil {
return nil, err
}
if !rehash {
hashes = hashMap{}
for _, ht := range common.Array() {
if h, e := src.Hash(ctx, ht); e == nil && h != "" {
hashes[ht] = h
}
}
}
if len(hashes) > 0 {
err := o.(*Object).putHashes(ctx, hashes)
fs.Debugf(o, "Applied %d source hashes, err: %v", len(hashes), err)
}
return o, err
}
type hashingReader struct {
rd io.Reader
hasher *hash.MultiHasher
fun func(hashMap)
}
func (f *Fs) newHashingReader(ctx context.Context, rd io.Reader, fun func(hashMap)) (*hashingReader, error) {
hasher, err := hash.NewMultiHasherTypes(f.keepHashes)
if err != nil {
return nil, err
}
hr := &hashingReader{
rd: rd,
hasher: hasher,
fun: fun,
}
return hr, nil
}
func (r *hashingReader) Read(p []byte) (n int, err error) {
n, err = r.rd.Read(p)
if err != nil && err != io.EOF {
r.hasher = nil
}
if r.hasher != nil {
if _, errHash := r.hasher.Write(p[:n]); errHash != nil {
r.hasher = nil
err = errHash
}
}
if err == io.EOF && r.hasher != nil {
r.fun(r.hasher.Sums())
r.hasher = nil
}
return
}
func (r *hashingReader) Close() error {
if rc, ok := r.rd.(io.ReadCloser); ok {
return rc.Close()
}
return nil
}
// Return object fingerprint or empty string in case of errors
//
// Note that we can't use the generic `fs.Fingerprint` here because
// this fingerprint is used to pick _derived hashes_ that are slow
// to calculate or completely unsupported by the base remote.
//
// The hasher fingerprint must be based on `fsHash`, the first _fast_
// hash supported _by the underlying remote_ (if there is one),
// while `fs.Fingerprint` would select a hash _produced by hasher_
// creating unresolvable fingerprint loop.
func (o *Object) fingerprint(ctx context.Context) string {
size := o.Object.Size()
timeStr := "-"
if o.f.fpTime {
timeStr = o.Object.ModTime(ctx).UTC().Format(timeFormat)
if timeStr == "" {
return ""
}
}
hashStr := "-"
if o.f.fpHash != hash.None {
var err error
hashStr, err = o.Object.Hash(ctx, o.f.fpHash)
if hashStr == "" || err != nil {
return ""
}
}
return fmt.Sprintf("%d,%s,%s", size, timeStr, hashStr)
}

View File

@@ -1,3 +1,4 @@
//go:build !plan9
// +build !plan9
package hdfs
@@ -262,6 +263,98 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
return f.client.RemoveAll(realpath)
}
// Move src to this remote using server-side move operations.
//
// This is stored with the remote path given
//
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantMove
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object)
if !ok {
fs.Debugf(src, "Can't move - not same remote type")
return nil, fs.ErrorCantMove
}
// Get the real paths from the remote specs:
sourcePath := srcObj.fs.realpath(srcObj.remote)
targetPath := f.realpath(remote)
fs.Debugf(f, "rename [%s] to [%s]", sourcePath, targetPath)
// Make sure the target folder exists:
dirname := path.Dir(targetPath)
err := f.client.MkdirAll(dirname, 0755)
if err != nil {
return nil, err
}
// Do the move
// Note that the underlying HDFS library hard-codes Overwrite=True, but this is expected rclone behaviour.
err = f.client.Rename(sourcePath, targetPath)
if err != nil {
return nil, err
}
// Look up the resulting object
info, err := f.client.Stat(targetPath)
if err != nil {
return nil, err
}
// And return it:
return &Object{
fs: f,
remote: remote,
size: info.Size(),
modTime: info.ModTime(),
}, nil
}
// DirMove moves src, srcRemote to this remote at dstRemote
// using server-side move operations.
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantDirMove
//
// If destination exists then return fs.ErrorDirExists
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) (err error) {
srcFs, ok := src.(*Fs)
if !ok {
return fs.ErrorCantDirMove
}
// Get the real paths from the remote specs:
sourcePath := srcFs.realpath(srcRemote)
targetPath := f.realpath(dstRemote)
fs.Debugf(f, "rename [%s] to [%s]", sourcePath, targetPath)
// Check if the destination exists:
info, err := f.client.Stat(targetPath)
if err == nil {
fs.Debugf(f, "target directory already exits, IsDir = [%t]", info.IsDir())
return fs.ErrorDirExists
}
// Make sure the targets parent folder exists:
dirname := path.Dir(targetPath)
err = f.client.MkdirAll(dirname, 0755)
if err != nil {
return err
}
// Do the move
err = f.client.Rename(sourcePath, targetPath)
if err != nil {
return err
}
return nil
}
// About gets quota information from the Fs
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
info, err := f.client.StatFs()
@@ -317,4 +410,6 @@ var (
_ fs.Purger = (*Fs)(nil)
_ fs.PutStreamer = (*Fs)(nil)
_ fs.Abouter = (*Fs)(nil)
_ fs.Mover = (*Fs)(nil)
_ fs.DirMover = (*Fs)(nil)
)

View File

@@ -1,3 +1,4 @@
//go:build !plan9
// +build !plan9
package hdfs
@@ -18,41 +19,31 @@ func init() {
NewFs: NewFs,
Options: []fs.Option{{
Name: "namenode",
Help: "hadoop name node and port",
Help: "Hadoop name node and port.\n\nE.g. \"namenode:8020\" to connect to host namenode at port 8020.",
Required: true,
Examples: []fs.OptionExample{{
Value: "namenode:8020",
Help: "Connect to host namenode at port 8020",
}},
}, {
Name: "username",
Help: "hadoop user name",
Required: false,
Name: "username",
Help: "Hadoop user name.",
Examples: []fs.OptionExample{{
Value: "root",
Help: "Connect to hdfs as root",
Help: "Connect to hdfs as root.",
}},
}, {
Name: "service_principal_name",
Help: `Kerberos service principal name for the namenode
Help: `Kerberos service principal name for the namenode.
Enables KERBEROS authentication. Specifies the Service Principal Name
(<SERVICE>/<FQDN>) for the namenode.`,
Required: false,
Examples: []fs.OptionExample{{
Value: "hdfs/namenode.hadoop.docker",
Help: "Namenode running as service 'hdfs' with FQDN 'namenode.hadoop.docker'.",
}},
(SERVICE/FQDN) for the namenode. E.g. \"hdfs/namenode.hadoop.docker\"
for namenode running as service 'hdfs' with FQDN 'namenode.hadoop.docker'.`,
Advanced: true,
}, {
Name: "data_transfer_protection",
Help: `Kerberos data transfer protection: authentication|integrity|privacy
Help: `Kerberos data transfer protection: authentication|integrity|privacy.
Specifies whether or not authentication, data signature integrity
checks, and wire encryption is required when communicating the the
datanodes. Possible values are 'authentication', 'integrity' and
'privacy'. Used only with KERBEROS enabled.`,
Required: false,
Examples: []fs.OptionExample{{
Value: "privacy",
Help: "Ensure authentication, integrity and encryption enabled.",

View File

@@ -1,5 +1,6 @@
// Test HDFS filesystem interface
//go:build !plan9
// +build !plan9
package hdfs_test

View File

@@ -1,6 +1,7 @@
// Build for hdfs for unsupported platforms to stop go complaining
// about "no buildable Go source files "
//go:build plan9
// +build plan9
package hdfs

View File

@@ -1,3 +1,4 @@
//go:build !plan9
// +build !plan9
package hdfs

View File

@@ -6,6 +6,8 @@ package http
import (
"context"
"errors"
"fmt"
"io"
"mime"
"net/http"
@@ -16,7 +18,6 @@ import (
"sync"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
@@ -38,33 +39,25 @@ func init() {
NewFs: NewFs,
Options: []fs.Option{{
Name: "url",
Help: "URL of http host to connect to",
Help: "URL of http host to connect to.\n\nE.g. \"https://example.com\", or \"https://user:pass@example.com\" to use a username and password.",
Required: true,
Examples: []fs.OptionExample{{
Value: "https://example.com",
Help: "Connect to example.com",
}, {
Value: "https://user:pass@example.com",
Help: "Connect to example.com using a username and password",
}},
}, {
Name: "headers",
Help: `Set HTTP headers for all transactions
Help: `Set HTTP headers for all transactions.
Use this to set additional HTTP headers for all transactions
Use this to set additional HTTP headers for all transactions.
The input format is comma separated list of key,value pairs. Standard
[CSV encoding](https://godoc.org/encoding/csv) may be used.
For example to set a Cookie use 'Cookie,name=value', or '"Cookie","name=value"'.
For example, to set a Cookie use 'Cookie,name=value', or '"Cookie","name=value"'.
You can set multiple headers, e.g. '"Cookie","name=value","Authorization","xxx"'.
`,
You can set multiple headers, e.g. '"Cookie","name=value","Authorization","xxx"'.`,
Default: fs.CommaSepList{},
Advanced: true,
}, {
Name: "no_slash",
Help: `Set this if the site doesn't end directories with /
Help: `Set this if the site doesn't end directories with /.
Use this if your target website does not use / on the end of
directories.
@@ -80,8 +73,9 @@ directories.`,
Advanced: true,
}, {
Name: "no_head",
Help: `Don't use HEAD requests to find file sizes in dir listing
Help: `Don't use HEAD requests.
HEAD requests are mainly used to find file sizes in dir listing.
If your site is being very slow to load then you can try this option.
Normally rclone does a HEAD request for each potential file in a
directory listing to:
@@ -90,12 +84,9 @@ directory listing to:
- check it really exists
- check to see if it is a directory
If you set this option, rclone will not do the HEAD request. This will mean
- directory listings are much quicker
- rclone won't have the times or sizes of any files
- some files that don't exist may be in the listing
`,
If you set this option, rclone will not do the HEAD request. This will mean
that directory listings are much quicker, but rclone won't have the times or
sizes of any files, and some files that don't exist may be in the listing.`,
Default: false,
Advanced: true,
}},
@@ -139,11 +130,87 @@ func statusError(res *http.Response, err error) error {
}
if res.StatusCode < 200 || res.StatusCode > 299 {
_ = res.Body.Close()
return errors.Errorf("HTTP Error %d: %s", res.StatusCode, res.Status)
return fmt.Errorf("HTTP Error: %s", res.Status)
}
return nil
}
// getFsEndpoint decides if url is to be considered a file or directory,
// and returns a proper endpoint url to use for the fs.
func getFsEndpoint(ctx context.Context, client *http.Client, url string, opt *Options) (string, bool) {
// If url ends with '/' it is already a proper url always assumed to be a directory.
if url[len(url)-1] == '/' {
return url, false
}
// If url does not end with '/' we send a HEAD request to decide
// if it is directory or file, and if directory appends the missing
// '/', or if file returns the directory url to parent instead.
createFileResult := func() (string, bool) {
fs.Debugf(nil, "If path is a directory you must add a trailing '/'")
parent, _ := path.Split(url)
return parent, true
}
createDirResult := func() (string, bool) {
fs.Debugf(nil, "To avoid the initial HEAD request add a trailing '/' to the path")
return url + "/", false
}
// If HEAD requests are not allowed we just have to assume it is a file.
if opt.NoHead {
fs.Debugf(nil, "Assuming path is a file as --http-no-head is set")
return createFileResult()
}
// Use a client which doesn't follow redirects so the server
// doesn't redirect http://host/dir to http://host/dir/
noRedir := *client
noRedir.CheckRedirect = func(req *http.Request, via []*http.Request) error {
return http.ErrUseLastResponse
}
req, err := http.NewRequestWithContext(ctx, "HEAD", url, nil)
if err != nil {
fs.Debugf(nil, "Assuming path is a file as HEAD request could not be created: %v", err)
return createFileResult()
}
addHeaders(req, opt)
res, err := noRedir.Do(req)
if err != nil {
fs.Debugf(nil, "Assuming path is a file as HEAD request could not be sent: %v", err)
return createFileResult()
}
if res.StatusCode == http.StatusNotFound {
fs.Debugf(nil, "Assuming path is a directory as HEAD response is it does not exist as a file (%s)", res.Status)
return createDirResult()
}
if res.StatusCode == http.StatusMovedPermanently ||
res.StatusCode == http.StatusFound ||
res.StatusCode == http.StatusSeeOther ||
res.StatusCode == http.StatusTemporaryRedirect ||
res.StatusCode == http.StatusPermanentRedirect {
redir := res.Header.Get("Location")
if redir != "" {
if redir[len(redir)-1] == '/' {
fs.Debugf(nil, "Assuming path is a directory as HEAD response is redirect (%s) to a path that ends with '/': %s", res.Status, redir)
return createDirResult()
}
fs.Debugf(nil, "Assuming path is a file as HEAD response is redirect (%s) to a path that does not end with '/': %s", res.Status, redir)
return createFileResult()
}
fs.Debugf(nil, "Assuming path is a file as HEAD response is redirect (%s) but no location header", res.Status)
return createFileResult()
}
if res.StatusCode < 200 || res.StatusCode > 299 {
// Example is 403 (http.StatusForbidden) for servers not allowing HEAD requests.
fs.Debugf(nil, "Assuming path is a file as HEAD response is an error (%s)", res.Status)
return createFileResult()
}
fs.Debugf(nil, "Assuming path is a file as HEAD response is success (%s)", res.Status)
return createFileResult()
}
// NewFs creates a new Fs object from the name and root. It connects to
// the host specified in the config file.
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
@@ -174,37 +241,9 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
client := fshttp.NewClient(ctx)
var isFile = false
if !strings.HasSuffix(u.String(), "/") {
// Make a client which doesn't follow redirects so the server
// doesn't redirect http://host/dir to http://host/dir/
noRedir := *client
noRedir.CheckRedirect = func(req *http.Request, via []*http.Request) error {
return http.ErrUseLastResponse
}
// check to see if points to a file
req, err := http.NewRequestWithContext(ctx, "HEAD", u.String(), nil)
if err == nil {
addHeaders(req, opt)
res, err := noRedir.Do(req)
err = statusError(res, err)
if err == nil {
isFile = true
}
}
}
newRoot := u.String()
if isFile {
// Point to the parent if this is a file
newRoot, _ = path.Split(u.String())
} else {
if !strings.HasSuffix(newRoot, "/") {
newRoot += "/"
}
}
u, err = url.Parse(newRoot)
endpoint, isFile := getFsEndpoint(ctx, client, u.String(), opt)
fs.Debugf(nil, "Root: %s", endpoint)
u, err = url.Parse(endpoint)
if err != nil {
return nil, err
}
@@ -222,12 +261,16 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
f.features = (&fs.Features{
CanHaveEmptyDirectories: true,
}).Fill(ctx, f)
if isFile {
// return an error with an fs which points to the parent
return f, fs.ErrorIsFile
}
if !strings.HasSuffix(f.endpointURL, "/") {
return nil, errors.New("internal error: url doesn't end with /")
}
return f, nil
}
@@ -303,7 +346,7 @@ func parseName(base *url.URL, name string) (string, error) {
}
// check it doesn't have URL parameters
uStr := u.String()
if strings.Index(uStr, "?") >= 0 {
if strings.Contains(uStr, "?") {
return "", errFoundQuestionMark
}
// check that this is going back to the same host and scheme
@@ -384,15 +427,15 @@ func (f *Fs) readDir(ctx context.Context, dir string) (names []string, err error
URL := f.url(dir)
u, err := url.Parse(URL)
if err != nil {
return nil, errors.Wrap(err, "failed to readDir")
return nil, fmt.Errorf("failed to readDir: %w", err)
}
if !strings.HasSuffix(URL, "/") {
return nil, errors.Errorf("internal error: readDir URL %q didn't end in /", URL)
return nil, fmt.Errorf("internal error: readDir URL %q didn't end in /", URL)
}
// Do the request
req, err := http.NewRequestWithContext(ctx, "GET", URL, nil)
if err != nil {
return nil, errors.Wrap(err, "readDir failed")
return nil, fmt.Errorf("readDir failed: %w", err)
}
f.addHeaders(req)
res, err := f.httpClient.Do(req)
@@ -404,7 +447,7 @@ func (f *Fs) readDir(ctx context.Context, dir string) (names []string, err error
}
err = statusError(res, err)
if err != nil {
return nil, errors.Wrap(err, "failed to readDir")
return nil, fmt.Errorf("failed to readDir: %w", err)
}
contentType := strings.SplitN(res.Header.Get("Content-Type"), ";", 2)[0]
@@ -412,10 +455,10 @@ func (f *Fs) readDir(ctx context.Context, dir string) (names []string, err error
case "text/html":
names, err = parse(u, res.Body)
if err != nil {
return nil, errors.Wrap(err, "readDir")
return nil, fmt.Errorf("readDir: %w", err)
}
default:
return nil, errors.Errorf("Can't parse content type %q", contentType)
return nil, fmt.Errorf("can't parse content type %q", contentType)
}
return names, nil
}
@@ -435,7 +478,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
}
names, err := f.readDir(ctx, dir)
if err != nil {
return nil, errors.Wrapf(err, "error listing %q", dir)
return nil, fmt.Errorf("error listing %q: %w", dir, err)
}
var (
entriesMu sync.Mutex // to protect entries
@@ -547,7 +590,7 @@ func (o *Object) stat(ctx context.Context) error {
url := o.url()
req, err := http.NewRequestWithContext(ctx, "HEAD", url, nil)
if err != nil {
return errors.Wrap(err, "stat failed")
return fmt.Errorf("stat failed: %w", err)
}
o.fs.addHeaders(req)
res, err := o.fs.httpClient.Do(req)
@@ -556,7 +599,7 @@ func (o *Object) stat(ctx context.Context) error {
}
err = statusError(res, err)
if err != nil {
return errors.Wrap(err, "failed to stat")
return fmt.Errorf("failed to stat: %w", err)
}
t, err := http.ParseTime(res.Header.Get("Last-Modified"))
if err != nil {
@@ -569,7 +612,7 @@ func (o *Object) stat(ctx context.Context) error {
if o.fs.opt.NoSlash {
mediaType, _, err := mime.ParseMediaType(o.contentType)
if err != nil {
return errors.Wrapf(err, "failed to parse Content-Type: %q", o.contentType)
return fmt.Errorf("failed to parse Content-Type: %q: %w", o.contentType, err)
}
if mediaType == "text/html" {
return fs.ErrorNotAFile
@@ -595,7 +638,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
url := o.url()
req, err := http.NewRequestWithContext(ctx, "GET", url, nil)
if err != nil {
return nil, errors.Wrap(err, "Open failed")
return nil, fmt.Errorf("Open failed: %w", err)
}
// Add optional headers
@@ -608,7 +651,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
res, err := o.fs.httpClient.Do(req)
err = statusError(res, err)
if err != nil {
return nil, errors.Wrap(err, "Open failed")
return nil, fmt.Errorf("Open failed: %w", err)
}
return res.Body, nil
}

View File

@@ -8,8 +8,10 @@ import (
"net/http/httptest"
"net/url"
"os"
"path"
"path/filepath"
"sort"
"strconv"
"strings"
"testing"
"time"
@@ -24,10 +26,11 @@ import (
)
var (
remoteName = "TestHTTP"
testPath = "test"
filesPath = filepath.Join(testPath, "files")
headers = []string{"X-Potato", "sausage", "X-Rhubarb", "cucumber"}
remoteName = "TestHTTP"
testPath = "test"
filesPath = filepath.Join(testPath, "files")
headers = []string{"X-Potato", "sausage", "X-Rhubarb", "cucumber"}
lineEndSize = 1
)
// prepareServer the test server and return a function to tidy it up afterwards
@@ -35,6 +38,22 @@ func prepareServer(t *testing.T) (configmap.Simple, func()) {
// file server for test/files
fileServer := http.FileServer(http.Dir(filesPath))
// verify the file path is correct, and also check which line endings
// are used to get sizes right ("\n" except on Windows, but even there
// we may have "\n" or "\r\n" depending on git crlf setting)
fileList, err := ioutil.ReadDir(filesPath)
require.NoError(t, err)
require.Greater(t, len(fileList), 0)
for _, file := range fileList {
if !file.IsDir() {
data, _ := ioutil.ReadFile(filepath.Join(filesPath, file.Name()))
if strings.HasSuffix(string(data), "\r\n") {
lineEndSize = 2
}
break
}
}
// test the headers are there then pass on to fileServer
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
what := fmt.Sprintf("%s %s: Header ", r.Method, r.URL.Path)
@@ -47,7 +66,7 @@ func prepareServer(t *testing.T) (configmap.Simple, func()) {
ts := httptest.NewServer(handler)
// Configure the remote
configfile.LoadConfig(context.Background())
configfile.Install()
// fs.Config.LogLevel = fs.LogLevelDebug
// fs.Config.DumpHeaders = true
// fs.Config.DumpBodies = true
@@ -91,7 +110,7 @@ func testListRoot(t *testing.T, f fs.Fs, noSlash bool) {
e = entries[1]
assert.Equal(t, "one%.txt", e.Remote())
assert.Equal(t, int64(6), e.Size())
assert.Equal(t, int64(5+lineEndSize), e.Size())
_, ok = e.(*Object)
assert.True(t, ok)
@@ -108,7 +127,7 @@ func testListRoot(t *testing.T, f fs.Fs, noSlash bool) {
_, ok = e.(fs.Directory)
assert.True(t, ok)
} else {
assert.Equal(t, int64(41), e.Size())
assert.Equal(t, int64(40+lineEndSize), e.Size())
_, ok = e.(*Object)
assert.True(t, ok)
}
@@ -141,7 +160,7 @@ func TestListSubDir(t *testing.T) {
e := entries[0]
assert.Equal(t, "three/underthree.txt", e.Remote())
assert.Equal(t, int64(9), e.Size())
assert.Equal(t, int64(8+lineEndSize), e.Size())
_, ok := e.(*Object)
assert.True(t, ok)
}
@@ -154,7 +173,7 @@ func TestNewObject(t *testing.T) {
require.NoError(t, err)
assert.Equal(t, "four/under four.txt", o.Remote())
assert.Equal(t, int64(9), o.Size())
assert.Equal(t, int64(8+lineEndSize), o.Size())
_, ok := o.(*Object)
assert.True(t, ok)
@@ -187,7 +206,11 @@ func TestOpen(t *testing.T) {
data, err := ioutil.ReadAll(fd)
require.NoError(t, err)
require.NoError(t, fd.Close())
assert.Equal(t, "beetroot\n", string(data))
if lineEndSize == 2 {
assert.Equal(t, "beetroot\r\n", string(data))
} else {
assert.Equal(t, "beetroot\n", string(data))
}
// Test with range request
fd, err = o.Open(context.Background(), &fs.RangeOption{Start: 1, End: 5})
@@ -236,7 +259,7 @@ func TestIsAFileSubDir(t *testing.T) {
e := entries[0]
assert.Equal(t, "underthree.txt", e.Remote())
assert.Equal(t, int64(9), e.Size())
assert.Equal(t, int64(8+lineEndSize), e.Size())
_, ok := e.(*Object)
assert.True(t, ok)
}
@@ -353,3 +376,106 @@ func TestParseCaddy(t *testing.T) {
"v1.36-22-g06ea13a-ssh-agentβ/",
})
}
func TestFsNoSlashRoots(t *testing.T) {
// Test Fs with roots that does not end with '/', the logic that
// decides if url is to be considered a file or directory, based
// on result from a HEAD request.
// Handler for faking HEAD responses with different status codes
headCount := 0
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.Method == "HEAD" {
headCount++
responseCode, err := strconv.Atoi(path.Base(r.URL.String()))
require.NoError(t, err)
if strings.HasPrefix(r.URL.String(), "/redirect/") {
var redir string
if strings.HasPrefix(r.URL.String(), "/redirect/file/") {
redir = "/redirected"
} else if strings.HasPrefix(r.URL.String(), "/redirect/dir/") {
redir = "/redirected/"
} else {
require.Fail(t, "Redirect test requests must start with '/redirect/file/' or '/redirect/dir/'")
}
http.Redirect(w, r, redir, responseCode)
} else {
http.Error(w, http.StatusText(responseCode), responseCode)
}
}
})
// Make the test server
ts := httptest.NewServer(handler)
defer ts.Close()
// Configure the remote
configfile.Install()
m := configmap.Simple{
"type": "http",
"url": ts.URL,
}
// Test
for i, test := range []struct {
root string
isFile bool
}{
// 2xx success
{"parent/200", true},
{"parent/204", true},
// 3xx redirection Redirect status 301, 302, 303, 307, 308
{"redirect/file/301", true}, // Request is redirected to "/redirected"
{"redirect/dir/301", false}, // Request is redirected to "/redirected/"
{"redirect/file/302", true}, // Request is redirected to "/redirected"
{"redirect/dir/302", false}, // Request is redirected to "/redirected/"
{"redirect/file/303", true}, // Request is redirected to "/redirected"
{"redirect/dir/303", false}, // Request is redirected to "/redirected/"
{"redirect/file/304", true}, // Not really a redirect, handled like 4xx errors (below)
{"redirect/file/305", true}, // Not really a redirect, handled like 4xx errors (below)
{"redirect/file/306", true}, // Not really a redirect, handled like 4xx errors (below)
{"redirect/file/307", true}, // Request is redirected to "/redirected"
{"redirect/dir/307", false}, // Request is redirected to "/redirected/"
{"redirect/file/308", true}, // Request is redirected to "/redirected"
{"redirect/dir/308", false}, // Request is redirected to "/redirected/"
// 4xx client errors
{"parent/403", true}, // Forbidden status (head request blocked)
{"parent/404", false}, // Not found status
} {
for _, noHead := range []bool{false, true} {
var isFile bool
if noHead {
m.Set("no_head", "true")
isFile = true
} else {
m.Set("no_head", "false")
isFile = test.isFile
}
headCount = 0
f, err := NewFs(context.Background(), remoteName, test.root, m)
if noHead {
assert.Equal(t, 0, headCount)
} else {
assert.Equal(t, 1, headCount)
}
if isFile {
assert.ErrorIs(t, err, fs.ErrorIsFile)
} else {
assert.NoError(t, err)
}
var endpoint string
if isFile {
parent, _ := path.Split(test.root)
endpoint = "/" + parent
} else {
endpoint = "/" + test.root + "/"
}
what := fmt.Sprintf("i=%d, root=%q, isFile=%v, noHead=%v", i, test.root, isFile, noHead)
assert.Equal(t, ts.URL+endpoint, f.String(), what)
}
}
}

View File

@@ -9,15 +9,14 @@ package hubic
import (
"context"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"log"
"net/http"
"strings"
"time"
swiftLib "github.com/ncw/swift/v2"
"github.com/pkg/errors"
"github.com/rclone/rclone/backend/swift"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
@@ -56,11 +55,10 @@ func init() {
Name: "hubic",
Description: "Hubic",
NewFs: NewFs,
Config: func(ctx context.Context, name string, m configmap.Mapper) {
err := oauthutil.Config(ctx, "hubic", name, m, oauthConfig, nil)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
return oauthutil.ConfigOut("", &oauthutil.Options{
OAuth2Config: oauthConfig,
})
},
Options: append(oauthutil.SharedOptions, swift.SharedOptions...),
})
@@ -122,7 +120,7 @@ func (f *Fs) getCredentials(ctx context.Context) (err error) {
if resp.StatusCode < 200 || resp.StatusCode > 299 {
body, _ := ioutil.ReadAll(resp.Body)
bodyStr := strings.TrimSpace(strings.Replace(string(body), "\n", " ", -1))
return errors.Errorf("failed to get credentials: %s: %s", resp.Status, bodyStr)
return fmt.Errorf("failed to get credentials: %s: %s", resp.Status, bodyStr)
}
decoder := json.NewDecoder(resp.Body)
var result credentials
@@ -148,7 +146,7 @@ func (f *Fs) getCredentials(ctx context.Context) (err error) {
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
client, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
if err != nil {
return nil, errors.Wrap(err, "failed to configure Hubic")
return nil, fmt.Errorf("failed to configure Hubic: %w", err)
}
f := &Fs{
@@ -165,7 +163,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
}
err = c.Authenticate(ctx)
if err != nil {
return nil, errors.Wrap(err, "error authenticating swift connection")
return nil, fmt.Errorf("error authenticating swift connection: %w", err)
}
// Parse config into swift.Options struct

View File

@@ -2,10 +2,9 @@ package api
import (
"encoding/xml"
"errors"
"fmt"
"time"
"github.com/pkg/errors"
)
const (
@@ -368,6 +367,7 @@ type JottaFile struct {
XMLName xml.Name
Name string `xml:"name,attr"`
Deleted Flag `xml:"deleted,attr"`
PublicURI string `xml:"publicURI"`
PublicSharePath string `xml:"publicSharePath"`
State string `xml:"currentRevision>state"`
CreatedAt Time `xml:"currentRevision>created"`

File diff suppressed because it is too large Load Diff

View File

@@ -28,33 +28,57 @@ import (
func init() {
fs.Register(&fs.RegInfo{
Name: "koofr",
Description: "Koofr",
Description: "Koofr, Digi Storage and other Koofr-compatible storage providers",
NewFs: NewFs,
Options: []fs.Option{{
Name: fs.ConfigProvider,
Help: "Choose your storage provider.",
// NOTE if you add a new provider here, then add it in the
// setProviderDefaults() function and update options accordingly
Examples: []fs.OptionExample{{
Value: "koofr",
Help: "Koofr, https://app.koofr.net/",
}, {
Value: "digistorage",
Help: "Digi Storage, https://storage.rcs-rds.ro/",
}, {
Value: "other",
Help: "Any other Koofr API compatible storage service",
}},
}, {
Name: "endpoint",
Help: "The Koofr API endpoint to use",
Default: "https://app.koofr.net",
Help: "The Koofr API endpoint to use.",
Provider: "other",
Required: true,
Advanced: true,
}, {
Name: "mountid",
Help: "Mount ID of the mount to use. If omitted, the primary mount is used.",
Required: false,
Default: "",
Help: "Mount ID of the mount to use.\n\nIf omitted, the primary mount is used.",
Advanced: true,
}, {
Name: "setmtime",
Help: "Does the backend support setting modification time. Set this to false if you use a mount ID that points to a Dropbox or Amazon Drive backend.",
Help: "Does the backend support setting modification time.\n\nSet this to false if you use a mount ID that points to a Dropbox or Amazon Drive backend.",
Default: true,
Required: true,
Advanced: true,
}, {
Name: "user",
Help: "Your Koofr user name",
Help: "Your user name.",
Required: true,
}, {
Name: "password",
Help: "Your Koofr password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password)",
Help: "Your password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password).",
Provider: "koofr",
IsPassword: true,
Required: true,
}, {
Name: "password",
Help: "Your password for rclone (generate one at https://storage.rcs-rds.ro/app/admin/preferences/password).",
Provider: "digistorage",
IsPassword: true,
Required: true,
}, {
Name: "password",
Help: "Your password for rclone (generate one at your service's settings page).",
Provider: "other",
IsPassword: true,
Required: true,
}, {
@@ -71,6 +95,7 @@ func init() {
// Options represent the configuration of the Koofr backend
type Options struct {
Provider string `config:"provider"`
Endpoint string `config:"endpoint"`
MountID string `config:"mountid"`
User string `config:"user"`
@@ -255,13 +280,38 @@ func (f *Fs) fullPath(part string) string {
return f.opt.Enc.FromStandardPath(path.Join("/", f.root, part))
}
// NewFs constructs a new filesystem given a root path and configuration options
func setProviderDefaults(opt *Options) {
// handle old, provider-less configs
if opt.Provider == "" {
if opt.Endpoint == "" || strings.HasPrefix(opt.Endpoint, "https://app.koofr.net") {
opt.Provider = "koofr"
} else if strings.HasPrefix(opt.Endpoint, "https://storage.rcs-rds.ro") {
opt.Provider = "digistorage"
} else {
opt.Provider = "other"
}
}
// now assign an endpoint
if opt.Provider == "koofr" {
opt.Endpoint = "https://app.koofr.net"
} else if opt.Provider == "digistorage" {
opt.Endpoint = "https://storage.rcs-rds.ro"
}
}
// NewFs constructs a new filesystem given a root path and rclone configuration options
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
opt := new(Options)
err = configstruct.Set(m, opt)
if err != nil {
return nil, err
}
setProviderDefaults(opt)
return NewFsFromOptions(ctx, name, root, opt)
}
// NewFsFromOptions constructs a new filesystem given a root path and internal configuration options
func NewFsFromOptions(ctx context.Context, name, root string, opt *Options) (ff fs.Fs, err error) {
pass, err := obscure.Reveal(opt.Password)
if err != nil {
return nil, err
@@ -344,7 +394,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (obj fs.Object, err e
return nil, translateErrorsObject(err)
}
if info.Type == "dir" {
return nil, fs.ErrorNotAFile
return nil, fs.ErrorIsDir
}
return &Object{
fs: f,
@@ -534,7 +584,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
return nil
}
// About reports space usage (with a MB precision)
// About reports space usage (with a MiB precision)
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
mount, err := f.client.MountsDetails(f.mountID)
if err != nil {
@@ -608,5 +658,25 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
if err != nil {
return "", translateErrorsDir(err)
}
return linkData.ShortURL, nil
// URL returned by API looks like following:
//
// https://app.koofr.net/links/35d9fb92-74a3-4930-b4ed-57f123bfb1a6
//
// Direct url looks like following:
//
// https://app.koofr.net/content/links/39a6cc01-3b23-477a-8059-c0fb3b0f15de/files/get?path=%2F
//
// I am not sure about meaning of "path" parameter; in my expriments
// it is always "%2F", and omitting it or putting any other value
// results in 404.
//
// There is one more quirk: direct link to file in / returns that file,
// direct link to file somewhere else in hierarchy returns zip archive
// with one member.
link := linkData.URL
link = strings.ReplaceAll(link, "/links", "/content/links")
link += "/files/get?path=%2F"
return link, nil
}

View File

@@ -1,13 +1,14 @@
//go:build darwin || dragonfly || freebsd || linux
// +build darwin dragonfly freebsd linux
package local
import (
"context"
"fmt"
"os"
"syscall"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
)
@@ -19,7 +20,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
if os.IsNotExist(err) {
return nil, fs.ErrorDirNotFound
}
return nil, errors.Wrap(err, "failed to read disk usage")
return nil, fmt.Errorf("failed to read disk usage: %w", err)
}
bs := int64(s.Bsize) // nolint: unconvert
usage := &fs.Usage{

View File

@@ -1,13 +1,14 @@
//go:build windows
// +build windows
package local
import (
"context"
"fmt"
"syscall"
"unsafe"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
)
@@ -23,7 +24,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
uintptr(unsafe.Pointer(&free)), // lpTotalNumberOfFreeBytes
)
if e1 != syscall.Errno(0) {
return nil, errors.Wrap(e1, "failed to read disk usage")
return nil, fmt.Errorf("failed to read disk usage: %w", e1)
}
usage := &fs.Usage{
Total: fs.NewUsageValue(total), // quota of bytes that can be used

View File

@@ -1,11 +0,0 @@
//+build darwin
package local
import "github.com/rclone/rclone/lib/encoder"
// This is the encoding used by the local backend for macOS
//
// macOS can't store invalid UTF-8, it converts them into %XX encoding
const defaultEnc = (encoder.Base |
encoder.EncodeInvalidUtf8)

View File

@@ -1,8 +0,0 @@
//+build !windows,!darwin
package local
import "github.com/rclone/rclone/lib/encoder"
// This is the encoding used by the local backend for non windows platforms
const defaultEnc = encoder.Base

View File

@@ -1,4 +1,5 @@
//+build !linux
//go:build !linux
// +build !linux
package local

View File

@@ -1,4 +1,5 @@
//+build linux
//go:build linux
// +build linux
package local

View File

@@ -1,3 +1,4 @@
//go:build windows || plan9 || js
// +build windows plan9 js
package local

View File

@@ -1,3 +1,4 @@
//go:build !windows && !plan9 && !js
// +build !windows,!plan9,!js
package local

View File

@@ -4,6 +4,7 @@ package local
import (
"bytes"
"context"
"errors"
"fmt"
"io"
"io/ioutil"
@@ -16,7 +17,6 @@ import (
"time"
"unicode/utf8"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/config"
@@ -27,6 +27,7 @@ import (
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/file"
"github.com/rclone/rclone/lib/readers"
"golang.org/x/text/unicode/norm"
)
// Constants
@@ -43,11 +44,11 @@ func init() {
CommandHelp: commandHelp,
Options: []fs.Option{{
Name: "nounc",
Help: "Disable UNC (long path names) conversion on Windows",
Help: "Disable UNC (long path names) conversion on Windows.",
Advanced: runtime.GOOS != "windows",
Examples: []fs.OptionExample{{
Value: "true",
Help: "Disables long file names",
Help: "Disables long file names.",
}},
}, {
Name: "copy_links",
@@ -58,7 +59,7 @@ func init() {
Advanced: true,
}, {
Name: "links",
Help: "Translate symlinks to/from regular files with a '" + linkSuffix + "' extension",
Help: "Translate symlinks to/from regular files with a '" + linkSuffix + "' extension.",
Default: false,
NoPrefix: true,
ShortOpt: "l",
@@ -66,6 +67,7 @@ func init() {
}, {
Name: "skip_links",
Help: `Don't warn about skipped symlinks.
This flag disables warning messages on skipped symlinks or junction
points, as you explicitly acknowledge that they should be skipped.`,
Default: false,
@@ -73,30 +75,39 @@ points, as you explicitly acknowledge that they should be skipped.`,
Advanced: true,
}, {
Name: "zero_size_links",
Help: `Assume the Stat size of links is zero (and read them instead)
Help: `Assume the Stat size of links is zero (and read them instead) (deprecated).
On some virtual filesystems (such ash LucidLink), reading a link size via a Stat call always returns 0.
However, on unix it reads as the length of the text in the link. This may cause errors like this when
syncing:
Rclone used to use the Stat size of links as the link size, but this fails in quite a few places:
Failed to copy: corrupted on transfer: sizes differ 0 vs 13
- Windows
- On some virtual filesystems (such ash LucidLink)
- Android
Setting this flag causes rclone to read the link and use that as the size of the link
instead of 0 which in most cases fixes the problem.`,
So rclone now always reads the link.
`,
Default: false,
Advanced: true,
}, {
Name: "no_unicode_normalization",
Help: `Don't apply unicode normalization to paths and filenames (Deprecated)
Name: "unicode_normalization",
Help: `Apply unicode NFC normalization to paths and filenames.
This flag is deprecated now. Rclone no longer normalizes unicode file
names, but it compares them with unicode normalization in the sync
routine instead.`,
This flag can be used to normalize file names into unicode NFC form
that are read from the local filesystem.
Rclone does not normally touch the encoding of file names it reads from
the file system.
This can be useful when using macOS as it normally provides decomposed (NFD)
unicode which in some language (eg Korean) doesn't display properly on
some OSes.
Note that rclone compares filenames with unicode normalization in the sync
routine so this flag shouldn't normally be used.`,
Default: false,
Advanced: true,
}, {
Name: "no_check_updated",
Help: `Don't check to see if the files change during upload
Help: `Don't check to see if the files change during upload.
Normally rclone checks the size and modification time of files as they
are being uploaded and aborts with a message which starts "can't copy
@@ -142,7 +153,7 @@ to override the default choice.`,
Advanced: true,
}, {
Name: "case_insensitive",
Help: `Force the filesystem to report itself as case insensitive
Help: `Force the filesystem to report itself as case insensitive.
Normally the local backend declares itself as case insensitive on
Windows/macOS and case sensitive for everything else. Use this flag
@@ -151,7 +162,7 @@ to override the default choice.`,
Advanced: true,
}, {
Name: "no_preallocate",
Help: `Disable preallocation of disk space for transferred files
Help: `Disable preallocation of disk space for transferred files.
Preallocation of disk space helps prevent filesystem fragmentation.
However, some virtual filesystem layers (such as Google Drive File
@@ -162,7 +173,7 @@ Use this flag to disable preallocation.`,
Advanced: true,
}, {
Name: "no_sparse",
Help: `Disable sparse files for multi-thread downloads
Help: `Disable sparse files for multi-thread downloads.
On Windows platforms rclone will make sparse files when doing
multi-thread downloads. This avoids long pauses on large files where
@@ -172,7 +183,7 @@ cause disk fragmentation and can be slow to work with.`,
Advanced: true,
}, {
Name: "no_set_modtime",
Help: `Disable setting modtime
Help: `Disable setting modtime.
Normally rclone updates modification time of files after they are done
uploading. This can cause permissions issues on Linux platforms when
@@ -185,7 +196,7 @@ enabled, rclone will no longer update the modtime after copying a file.`,
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
Advanced: true,
Default: defaultEnc,
Default: encoder.OS,
}},
}
fs.Register(fsi)
@@ -196,8 +207,7 @@ type Options struct {
FollowSymlinks bool `config:"copy_links"`
TranslateSymlinks bool `config:"links"`
SkipSymlinks bool `config:"skip_links"`
ZeroSizeLinks bool `config:"zero_size_links"`
NoUTFNorm bool `config:"no_unicode_normalization"`
UTFNorm bool `config:"unicode_normalization"`
NoCheckUpdated bool `config:"no_check_updated"`
NoUNC bool `config:"nounc"`
OneFileSystem bool `config:"one_file_system"`
@@ -256,10 +266,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
return nil, errLinksAndCopyLinks
}
if opt.NoUTFNorm {
fs.Errorf(nil, "The --local-no-unicode-normalization flag is deprecated and will be removed")
}
f := &Fs{
name: name,
opt: *opt,
@@ -396,7 +402,7 @@ func (f *Fs) newObjectWithInfo(remote string, info os.FileInfo) (fs.Object, erro
}
if o.mode.IsDir() {
return nil, errors.Wrapf(fs.ErrorNotAFile, "%q", remote)
return nil, fs.ErrorIsDir
}
return o, nil
}
@@ -426,7 +432,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
fd, err := os.Open(fsDirPath)
if err != nil {
isPerm := os.IsPermission(err)
err = errors.Wrapf(err, "failed to open directory %q", dir)
err = fmt.Errorf("failed to open directory %q: %w", dir, err)
fs.Errorf(dir, "%v", err)
if isPerm {
_ = accounting.Stats(ctx).Error(fserrors.NoRetryError(err))
@@ -437,7 +443,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
defer func() {
cerr := fd.Close()
if cerr != nil && err == nil {
err = errors.Wrapf(cerr, "failed to close directory %q:", dir)
err = fmt.Errorf("failed to close directory %q:: %w", dir, cerr)
}
}()
@@ -462,8 +468,12 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
for _, name := range names {
namepath := filepath.Join(fsDirPath, name)
fi, fierr := os.Lstat(namepath)
if os.IsNotExist(fierr) {
// skip entry removed by a concurrent goroutine
continue
}
if fierr != nil {
err = errors.Wrapf(err, "failed to read directory %q", namepath)
err = fmt.Errorf("failed to read directory %q: %w", namepath, err)
fs.Errorf(dir, "%v", fierr)
_ = accounting.Stats(ctx).Error(fserrors.NoRetryError(fierr)) // fail the sync
continue
@@ -473,7 +483,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
}
}
if err != nil {
return nil, errors.Wrap(err, "failed to read directory entry")
return nil, fmt.Errorf("failed to read directory entry: %w", err)
}
for _, fi := range fis {
@@ -486,7 +496,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
fi, err = os.Stat(localPath)
if os.IsNotExist(err) || isCircularSymlinkError(err) {
// Skip bad symlinks and circular symlinks
err = fserrors.NoRetryError(errors.Wrap(err, "symlink"))
err = fserrors.NoRetryError(fmt.Errorf("symlink: %w", err))
fs.Errorf(newRemote, "Listing error: %v", err)
err = accounting.Stats(ctx).Error(err)
continue
@@ -522,6 +532,9 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
}
func (f *Fs) cleanRemote(dir, filename string) (remote string) {
if f.opt.UTFNorm {
filename = norm.NFC.String(filename)
}
remote = path.Join(dir, f.opt.Enc.ToStandardName(filename))
if !utf8.ValidString(filename) {
@@ -557,9 +570,8 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
// Mkdir creates the directory if it doesn't exist
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
// FIXME: https://github.com/syncthing/syncthing/blob/master/lib/osutil/mkdirall_windows.go
localPath := f.localPath(dir)
err := os.MkdirAll(localPath, 0777)
err := file.MkdirAll(localPath, 0777)
if err != nil {
return err
}
@@ -660,7 +672,7 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
return err
}
if !fi.Mode().IsDir() {
return errors.Errorf("can't purge non directory: %q", dir)
return fmt.Errorf("can't purge non directory: %q", dir)
}
return os.RemoveAll(dir)
}
@@ -753,7 +765,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
// Create parent of destination
dstParentPath := filepath.Dir(dstPath)
err = os.MkdirAll(dstParentPath, 0777)
err = file.MkdirAll(dstParentPath, 0777)
if err != nil {
return err
}
@@ -854,12 +866,12 @@ func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
err := o.lstat()
var changed bool
if err != nil {
if os.IsNotExist(errors.Cause(err)) {
if errors.Is(err, os.ErrNotExist) {
// If file not found then we assume any accumulated
// hashes are OK - this will error on Open
changed = true
} else {
return "", errors.Wrap(err, "hash: failed to stat")
return "", fmt.Errorf("hash: failed to stat: %w", err)
}
} else {
o.fs.objectMetaMu.RLock()
@@ -888,16 +900,16 @@ func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
in = readers.NewLimitedReadCloser(in, o.size)
}
if err != nil {
return "", errors.Wrap(err, "hash: failed to open")
return "", fmt.Errorf("hash: failed to open: %w", err)
}
var hashes map[hash.Type]string
hashes, err = hash.StreamTypes(in, hash.NewHashSet(r))
closeErr := in.Close()
if err != nil {
return "", errors.Wrap(err, "hash: failed to read")
return "", fmt.Errorf("hash: failed to read: %w", err)
}
if closeErr != nil {
return "", errors.Wrap(closeErr, "hash: failed to close")
return "", fmt.Errorf("hash: failed to close: %w", closeErr)
}
hashValue = hashes[r]
o.fs.objectMetaMu.Lock()
@@ -978,17 +990,17 @@ func (file *localOpenFile) Read(p []byte) (n int, err error) {
// Check if file has the same size and modTime
fi, err := file.fd.Stat()
if err != nil {
return 0, errors.Wrap(err, "can't read status of source file while transferring")
return 0, fmt.Errorf("can't read status of source file while transferring: %w", err)
}
file.o.fs.objectMetaMu.RLock()
oldtime := file.o.modTime
oldsize := file.o.size
file.o.fs.objectMetaMu.RUnlock()
if oldsize != fi.Size() {
return 0, fserrors.NoLowLevelRetryError(errors.Errorf("can't copy - source file is being updated (size changed from %d to %d)", oldsize, fi.Size()))
return 0, fserrors.NoLowLevelRetryError(fmt.Errorf("can't copy - source file is being updated (size changed from %d to %d)", oldsize, fi.Size()))
}
if !oldtime.Equal(fi.ModTime()) {
return 0, fserrors.NoLowLevelRetryError(errors.Errorf("can't copy - source file is being updated (mod time changed from %v to %v)", oldtime, fi.ModTime()))
return 0, fserrors.NoLowLevelRetryError(fmt.Errorf("can't copy - source file is being updated (mod time changed from %v to %v)", oldtime, fi.ModTime()))
}
}
@@ -1087,7 +1099,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
// mkdirAll makes all the directories needed to store the object
func (o *Object) mkdirAll() error {
dir := filepath.Dir(o.path)
return os.MkdirAll(dir, 0777)
return file.MkdirAll(dir, 0777)
}
type nopWriterCloser struct {
@@ -1121,6 +1133,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return err
}
// Wipe hashes before update
o.clearHashCache()
var symlinkData bytes.Buffer
// If the object is a regular file, create it.
// If it is a translated link, just read in the contents, and
@@ -1267,9 +1282,13 @@ func (o *Object) setMetadata(info os.FileInfo) {
o.modTime = info.ModTime()
o.mode = info.Mode()
o.fs.objectMetaMu.Unlock()
// On Windows links read as 0 size so set the correct size here
// Optionally, users can turn this feature on with the zero_size_links flag
if (runtime.GOOS == "windows" || o.fs.opt.ZeroSizeLinks) && o.translatedLink {
// Read the size of the link.
//
// The value in info.Size() is not always correct
// - Windows links read as 0 size
// - Some virtual filesystems (such ash LucidLink) links read as 0 size
// - Android - some versions the links are larger than readlink suggests
if o.translatedLink {
linkdst, err := os.Readlink(o.path)
if err != nil {
fs.Errorf(o, "Failed to read link size: %v", err)
@@ -1279,6 +1298,13 @@ func (o *Object) setMetadata(info os.FileInfo) {
}
}
// clearHashCache wipes any cached hashes for the object
func (o *Object) clearHashCache() {
o.fs.objectMetaMu.Lock()
o.hashes = nil
o.fs.objectMetaMu.Unlock()
}
// Stat an Object into info
func (o *Object) lstat() error {
info, err := o.fs.lstat(o.path)
@@ -1290,6 +1316,7 @@ func (o *Object) lstat() error {
// Remove an object
func (o *Object) Remove(ctx context.Context) error {
o.clearHashCache()
return remove(o.path)
}

View File

@@ -1,6 +1,7 @@
package local
import (
"bytes"
"context"
"io/ioutil"
"os"
@@ -12,6 +13,7 @@ import (
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/lib/file"
"github.com/rclone/rclone/lib/readers"
@@ -93,16 +95,16 @@ func TestSymlink(t *testing.T) {
file2d := fstest.NewItem("symlink.txt", "hello", modTime1)
// Check with no symlink flags
fstest.CheckItems(t, r.Flocal, file1)
fstest.CheckItems(t, r.Fremote)
r.CheckLocalItems(t, file1)
r.CheckRemoteItems(t)
// Set fs into "-L" mode
f.opt.FollowSymlinks = true
f.opt.TranslateSymlinks = false
f.lstat = os.Stat
fstest.CheckItems(t, r.Flocal, file1, file2d)
fstest.CheckItems(t, r.Fremote)
r.CheckLocalItems(t, file1, file2d)
r.CheckRemoteItems(t)
// Set fs into "-l" mode
f.opt.FollowSymlinks = false
@@ -111,7 +113,7 @@ func TestSymlink(t *testing.T) {
fstest.CheckListingWithPrecision(t, r.Flocal, []fstest.Item{file1, file2}, nil, fs.ModTimeNotSupported)
if haveLChtimes {
fstest.CheckItems(t, r.Flocal, file1, file2)
r.CheckLocalItems(t, file1, file2)
}
// Create a symlink
@@ -119,7 +121,7 @@ func TestSymlink(t *testing.T) {
file3 := r.WriteObjectTo(ctx, r.Flocal, "symlink2.txt"+linkSuffix, "file.txt", modTime3, false)
fstest.CheckListingWithPrecision(t, r.Flocal, []fstest.Item{file1, file2, file3}, nil, fs.ModTimeNotSupported)
if haveLChtimes {
fstest.CheckItems(t, r.Flocal, file1, file2, file3)
r.CheckLocalItems(t, file1, file2, file3)
}
// Check it got the correct contents
@@ -166,3 +168,64 @@ func TestSymlinkError(t *testing.T) {
_, err := NewFs(context.Background(), "local", "/", m)
assert.Equal(t, errLinksAndCopyLinks, err)
}
// Test hashes on updating an object
func TestHashOnUpdate(t *testing.T) {
ctx := context.Background()
r := fstest.NewRun(t)
defer r.Finalise()
const filePath = "file.txt"
when := time.Now()
r.WriteFile(filePath, "content", when)
f := r.Flocal.(*Fs)
// Get the object
o, err := f.NewObject(ctx, filePath)
require.NoError(t, err)
// Test the hash is as we expect
md5, err := o.Hash(ctx, hash.MD5)
require.NoError(t, err)
assert.Equal(t, "9a0364b9e99bb480dd25e1f0284c8555", md5)
// Reupload it with diferent contents but same size and timestamp
var b = bytes.NewBufferString("CONTENT")
src := object.NewStaticObjectInfo(filePath, when, int64(b.Len()), true, nil, f)
err = o.Update(ctx, b, src)
require.NoError(t, err)
// Check the hash is as expected
md5, err = o.Hash(ctx, hash.MD5)
require.NoError(t, err)
assert.Equal(t, "45685e95985e20822fb2538a522a5ccf", md5)
}
// Test hashes on deleting an object
func TestHashOnDelete(t *testing.T) {
ctx := context.Background()
r := fstest.NewRun(t)
defer r.Finalise()
const filePath = "file.txt"
when := time.Now()
r.WriteFile(filePath, "content", when)
f := r.Flocal.(*Fs)
// Get the object
o, err := f.NewObject(ctx, filePath)
require.NoError(t, err)
// Test the hash is as we expect
md5, err := o.Hash(ctx, hash.MD5)
require.NoError(t, err)
assert.Equal(t, "9a0364b9e99bb480dd25e1f0284c8555", md5)
// Delete the object
require.NoError(t, o.Remove(ctx))
// Test the hash cache is empty
require.Nil(t, o.(*Object).hashes)
// Test the hash returns an error
_, err = o.Hash(ctx, hash.MD5)
require.Error(t, err)
}

View File

@@ -1,5 +1,6 @@
// Device reading functions
//go:build !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris
// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris
package local

View File

@@ -1,5 +1,6 @@
// Device reading functions
//go:build darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris
// +build darwin dragonfly freebsd linux netbsd openbsd solaris
package local

View File

@@ -1,4 +1,5 @@
//+build !windows
//go:build !windows
// +build !windows
package local

View File

@@ -1,4 +1,5 @@
//+build windows
//go:build windows
// +build windows
package local

View File

@@ -1,3 +1,4 @@
//go:build !windows && !plan9 && !js
// +build !windows,!plan9,!js
package local

Some files were not shown because too many files have changed in this diff Show More