1
0
mirror of https://github.com/rclone/rclone.git synced 2026-01-21 11:53:17 +00:00

Compare commits

...

250 Commits

Author SHA1 Message Date
Nick Craig-Wood
b0c54538b0 drive: make backend config -o config add a combined AllDrives remote
This adjusts

    rclone backend drives -o config drive:

So that it also emits a config section called `AllDrives` which uses
the combine backend to make a backend which combines all the shared
drives into one.

It also makes sure that all the shared drive names are valid rclone
config names, deduplicating if necessary.

Fixes #4506
2022-04-21 09:58:59 +01:00
Nick Craig-Wood
e62b9d017d Add combine backend to combine multiple remotes in one directory tree - FIXME WIP
Needs
- docs
- integration tests

Fixes #5600
2022-04-21 09:12:50 +01:00
Nick Craig-Wood
632f626f08 fstests: check for wrapped errors in ListR test 2022-04-20 17:56:15 +01:00
Nick Craig-Wood
bab91e4402 putio: ignore URL encoded files as these fail in the integration tests 2022-04-15 17:57:15 +01:00
Nick Craig-Wood
fde40319ef koofr: remove digistorage from integration tests as no account 2022-04-15 17:57:15 +01:00
Nick Craig-Wood
94e330d4fa onedrive: remove onedrive China from integration tests as we no longer have an account 2022-04-15 17:57:15 +01:00
Nick Craig-Wood
087543d723 sftp: ignore failing entries in rsync.net integration tests 2022-04-15 17:57:15 +01:00
Nick Craig-Wood
6a759d936a storj: fix bucket creation on Move picked up by integration tests 2022-04-15 17:57:15 +01:00
Nick Craig-Wood
7c31240bb8 Add Nick Gooding to contributors 2022-04-15 17:57:15 +01:00
Nick Gooding
25146b4306 googlecloudstorage: add --gcs-no-check-bucket to minimise transactions and perms
Adds a configuration option to the GCS backend to allow skipping the
check if a bucket exists before copying an object to it, much like
f406dbb added for S3.
2022-04-14 11:18:36 +01:00
Nick Craig-Wood
240561850b test makefiles: add --chargen flag to make ascii chargen files 2022-04-13 23:07:56 +01:00
Nil Alexandrov
39a1e37441 netstorage: add support contacts to netstorage doc 2022-04-13 23:07:21 +01:00
Nick Craig-Wood
4c02f50ef5 build: update github.com/billziss-gh to github.com/winfsp 2022-04-13 10:18:26 +01:00
Nick Craig-Wood
f583b86334 test makefiles: fix crash if --min-file-size <= --max-file-size 2022-04-12 13:45:20 +01:00
Nick Craig-Wood
118e8e1470 test makefiles: add --sparse, --zero, --pattern and --ascii flags 2022-04-12 13:45:20 +01:00
Nick Craig-Wood
afcea9c72b test makefile: implement new test command to write a single file 2022-04-12 12:57:16 +01:00
Nick Craig-Wood
27176cc6bb config: use os.UserCacheDir from go stdlib to find cache dir #6095
When this code was originally implemented os.UserCacheDir wasn't
public so this used a copy of the code. This commit replaces that now
out of date copy with a call to the now public stdlib function.
2022-04-11 11:44:15 +01:00
Nick Craig-Wood
f1e4b7da7b Add Adrien Rey-Jarthon to contributors 2022-04-11 11:44:15 +01:00
albertony
f065a267f6 docs: fix some links to command pages 2022-04-07 15:50:41 +02:00
Adrien Rey-Jarthon
17f8014909 docs: Note that Scaleway C14 is deprecating SFTP in favor of S3
This updates the documentation to reflect the new C14 Cold Storage API
works with S3 and not with SFTP any more.

See: https://github.com/rclone/rclone/issues/1080#issuecomment-1082088870
2022-04-05 11:11:52 +01:00
Nick Craig-Wood
8ba04562c3 build: update android go build to 1.18.x and NDK to 23.1.7779620 2022-04-04 20:35:17 +01:00
Nick Craig-Wood
285747b1d1 build: update to go1.18 and make go1.16 the minimum required version 2022-04-04 20:35:17 +01:00
Nick Craig-Wood
7bb8b8f4ba cache: fix bug after golang.org/x/time/rate update
Before this change the cache backend was passing -1 into
rate.NewLimiter to mean unlimited transactions per second.

In a recent update this immediately returns a rate limit error as
might be expected.

This patch uses rate.Inf as indicated by the docs to signal no limits
are required.
2022-04-04 20:35:17 +01:00
Nick Craig-Wood
59c242bbf6 build: update dependencies
Also:

- dropbox: fix compile after API change in upstream library
2022-04-04 20:35:17 +01:00
Nick Craig-Wood
a2bacd7d3f Add rafma0 to contributors 2022-04-04 20:35:17 +01:00
Nick Craig-Wood
9babcc4811 Add GH to contributors 2022-04-04 20:35:17 +01:00
Nick Craig-Wood
a0f665ec3c Add KARBOWSKI Piotr to contributors 2022-04-04 20:35:17 +01:00
Nick Craig-Wood
ecdf42c17f Add Tobias Klauser to contributors 2022-04-04 20:35:17 +01:00
rafma0
be9ee1d138 putio: fix multithread download and other ranged requests
Before this change the 206 responses from putio Range requests were being
returned as errors.

This change checks for 200 and 206 in the GET response now.
2022-04-04 11:15:55 +01:00
GH
9e9ead2ac4 onedrive: note that sharepoint also changes web files (.html, .aspx) 2022-04-03 12:43:23 +01:00
KARBOWSKI Piotr
4f78226f8b sftp: Fix OpenSSH 8.8+ RSA keys incompatibility (#6076)
Updates golang.org/x/crypto to v0.0.0-20220331220935-ae2d96664a29.

Fixes the issues with connecting to OpenSSH 8.8+ remotes in case the
client uses RSA key pair due to OpenSSH dropping support for SHA1 based
ssh-rsa signature.

Bug: https://github.com/rclone/rclone/issues/6076
Bug: https://github.com/golang/go/issues/37278
Signed-off-by: KARBOWSKI Piotr <piotr.karbowski@gmail.com>
2022-04-01 12:49:39 +01:00
Tobias Klauser
54c9c3156c fs/config, lib/terminal: use golang.org/x/term
golang.org/x/crypto/ssh/terminal is deprecated in favor of
golang.org/x/term, see https://pkg.go.dev/golang.org/x/crypto/ssh/terminal

The latter also supports ReadPassword on solaris, so enable the
respective functionality in fs/config for solaris as well.
2022-04-01 12:48:18 +01:00
Nick Craig-Wood
6ecbbf796e netstorage: make levels of headings consistent 2022-03-31 18:11:37 +01:00
Nick Craig-Wood
603e51c43f s3: sync providers in config description with providers 2022-03-31 17:55:54 +01:00
Nick Craig-Wood
ca4671126e Add Berkan Teber to contributors 2022-03-31 17:55:54 +01:00
Berkan Teber
6ea26b508a putio: handle rate limit errors
For rate limit errors, "x-ratelimit-reset" header is now respected.
2022-03-30 12:25:53 +01:00
Nick Craig-Wood
887cccb2c1 filter: fix timezone of --min-age/-max-age from UTC to local as documented
Before this change if the timezone was omitted in a
--min-age/--max-age time specifier then rclone defaulted to a UTC
timezone.

This is documented as using the local timezone if the time zone
specifier is omitted which is a much more useful default and this
patch corrects the implementation to agree with the documentation.

See: https://forum.rclone.org/t/problem-utc-windows-europe-1-summer-problem/29917
2022-03-28 11:47:27 +01:00
Nick Craig-Wood
d975196cfa dropbox: fix retries of multipart uploads with incorrect_offset error
Before this fix, rclone retries chunks of multipart uploads. However
if they had been partially received dropbox would reply with an
incorrect_offset error which rclone was ignoring.

This patch parses the new offset from the error response and uses it
to adjust the data that rclone sends so it is the same as what dropbox
is expecting.

See: https://forum.rclone.org/t/dropbox-rate-limiting-for-upload/29779
2022-03-25 15:39:01 +00:00
Nick Craig-Wood
1f39b28f49 googlecloudstorage: use the s3 pacer to speed up transactions
This commit switches Google Cloud Storage from the drive pacer to the
s3 pacer. The main difference between them is that the s3 pacer does
not limit transactions in the non-error case. This is appropriate for
a cloud storage backend where you pay for each transaction.
2022-03-25 15:28:59 +00:00
Nick Craig-Wood
2738db22fb pacer: default the Google pacer to a burst of 100 to fix gcs pacing
Before this change the pacer defaulted to a burst of 1 which mean that
it kept being activated unecessarily.

This affected Google Cloud Storage and Google Photos.

See: https://forum.rclone.org/t/no-traverse-too-slow-with-lot-of-files/29886/12
2022-03-25 15:28:59 +00:00
Nick Craig-Wood
1978ddde73 Add GuoXingbin to contributors 2022-03-25 15:28:59 +00:00
GuoXingbin
c2bfda22ab s3: Add ChinaMobile EOS to provider list
China Mobile Ecloud Elastic Object Storage (EOS) is a cloud object storage service, and is fully compatible with S3.

Fixes #6054
2022-03-24 11:57:00 +00:00
Nick Craig-Wood
d4da9b98d6 vfs: add --vfs-fast-fingerprint for less accurate but faster fingerprints 2022-03-22 16:33:24 +00:00
Nick Craig-Wood
e4f5912294 azureblob: fix lint error with golangci-lint 1.45.0 2022-03-22 16:33:24 +00:00
Nick Craig-Wood
750fffdf71 netstorage: fix unescaped HTML in documentation 2022-03-18 14:40:12 +00:00
Nick Craig-Wood
388e74af52 Start v1.59.0-DEV development 2022-03-18 14:04:22 +00:00
Nick Craig-Wood
f9354fff2f Version v1.58.0 2022-03-18 12:29:54 +00:00
Nick Craig-Wood
ff1f173fc2 build: add bisync.md to docs builder and fix missing tardigrade.md stub 2022-03-18 11:22:23 +00:00
Nick Craig-Wood
f8073a7b63 build: ensure the Go version used for the build is always up to date #6020 2022-03-17 17:14:50 +00:00
Nick Craig-Wood
807f1cedaa hasher: fix crash on object not found
Before this fix `NewObject` could return a wrapped `fs.Object(nil)`
which caused a crash. This was caused by `wrapObject` returning a
`nil` `*Object` which was cast into an `fs.Object`.

This changes the interface of `wrapObject` so it returns an
`fs.Object` instead of a `*Object` and an error which must be checked.
This forces the callers to return a `nil` object rather than an
`fs.Object(nil)`.

See: https://forum.rclone.org/t/panic-in-hasher-when-mounting-with-vfs-cache-and-not-synced-data-in-the-cache/29697/11
2022-03-16 11:30:26 +00:00
Nick Craig-Wood
bf9c68c88a storj: implement server side Move 2022-03-14 15:44:56 +00:00
Nick Craig-Wood
189cba0fbe s3: add other regions for Lyve and correct Provider name 2022-03-14 15:43:35 +00:00
Nick Craig-Wood
69f726f16c Add Nil Alexandrov to contributors 2022-03-14 15:43:35 +00:00
Nil Alexandrov
65652f7a75 Add Akamai Netstorage as a new backend. 2022-03-09 12:42:22 +00:00
Nil Alexandrov
47f9ab2f56 lib/rest: add support for setting trailers 2022-03-09 12:42:22 +00:00
Nick Craig-Wood
5dd51e6149 union: fix deadlock when one part of a multi-upload fails
Before this fix, rclone would deadlock when uploading two files at
once, if one errored. This caused the other file to block in the multi
reader and never complete.

This fix drains the input buffer on error which allows the other
upload to complete.

See: https://forum.rclone.org/t/union-with-create-policy-all-copy-stuck-when-first-union-fails/29601
2022-03-09 11:30:55 +00:00
Nick Craig-Wood
6a6d254a9f s3: add support for Seagate Lyve Cloud storage 2022-03-09 11:30:55 +00:00
jaKa
fd453f2c7b koofr: renamed digistorage to exclude the romania part. 2022-03-08 22:39:23 +00:00
jaKa
5d06a82c5d koofr: add digistorage service as a koofr provider. 2022-03-08 10:36:18 +00:00
Nick Craig-Wood
847868b4ba ftp: hard fork github.com/jlaffaye/ftp to fix go get
Having a replace directive in go.mod causes "go get
github.com/rclone/rclone" to fail as it discussed in this Go issue:
https://github.com/golang/go/issues/44840

This is apparently how the Go team want go.mod to work, so this commit
hard forks github.com/jlaffaye/ftp into github.com/rclone/ftp so we
can remove the `replace` directive from the go.mod file.

Fixes #5810
2022-03-07 09:55:49 +00:00
Ivan Andreev
38ca178cf3 mailru: fix int32 overflow on arm32 - fixes #6003 2022-03-06 13:33:57 +00:00
Nick Craig-Wood
9427d22f99 Add ctrl-q to contributors 2022-03-06 13:33:26 +00:00
ctrl-q
7b1428a498 onedrive: Do not retry on 400 pathIsTooLong 2022-03-06 13:05:05 +00:00
Nick Craig-Wood
ec72432cec vfs: fix failed to _ensure cache internal error: downloaders is nil error
This error was caused by renaming an open file.

When the file was renamed in the cache, the downloaders were cleared,
however the downloaders were not re-opened when needed again, instead
this error was generated.

This fix re-opens the downloaders if they have been closed by renaming
the file.

Fixes #5984
2022-03-03 17:43:29 +00:00
Nick Craig-Wood
2339172df2 pcloud: fix pre-1970 time stamps - fixes #5917
Before this change rclone send pre-1970 timestamps as negative
numbers. pCloud ignores these and sets them as todays date.

This change sends the timestamps as unsigned 64 bit integers (which is
how the binary protocol sends them) and pCloud accepts the (actually
negative) timestamp like this.
2022-03-03 17:18:40 +00:00
Nick Craig-Wood
268b808bf8 filter: add {{ regexp }} syntax to pattern matches - fixes #4074
There has been a desire from more advanced rclone users to have regexp
filtering as well as the glob filtering.

This patch adds regexp filtering using this syntax `{{ regexp }}`
which is currently a syntax error, so is backwards compatibile.

This means regexps can be used everywhere globs can be used, and that
they also can be mixed with globs in the same pattern, eg `*.{{jpe?g}}`
2022-03-03 17:16:28 +00:00
Nick Craig-Wood
74898bac3b build: add windows/arm64 build - NB this does not support mount yet #5828 2022-03-03 17:13:32 +00:00
Nick Craig-Wood
e0fbca02d4 compress: fix memory leak - fixes #6013
Before this change we forgot to close the compressor when checking to
see if an object was compressible.
2022-03-03 17:10:21 +00:00
Nick Craig-Wood
21355b4208 sync: Fix --max-duration so it doesn't retry when the duration is exceeded
Before this change, if the --max-duration limit was reached then
rclone would retry the sync as a fatal error wasn't raised.

This checks the deadline and raises a fatal error if necessary at the
end of the sync.

Fixes #6002
2022-03-03 17:08:16 +00:00
Nick Craig-Wood
251b84ff2c sftp: fix unecessary seeking when uploading and downloading files
This stops the SFTP library issuing out of order writes which fixes
the problems uploading to `serve sftp` from the `sftp` backend.

This was fixes upstream in this pull request: https://github.com/pkg/sftp/pull/482

Fixes #5806
2022-03-03 17:02:35 +00:00
Nick Craig-Wood
537b62917f s3: add --s3-use-multipart-etag provider quirk #5993
Before this change the new multipart upload ETag checking code was
failing in the integration tests with Alibaba OSS.

Apparently Alibaba calculate the ETag in a different way to AWS.

This introduces a new provider quirk with a flag to disable the
checking of the ETag for multipart uploads.

Mulpart Etag checking has been enabled for all providers that we can
test for and work, and left disabled for the others.
2022-03-01 16:36:39 +00:00
Nick Craig-Wood
71a784cfa2 compress: fix crash if metadata upload failed - fixes #5994
Before this changed the backend attempted to delete a nil object if
the metadata upload failed.
2022-02-28 19:47:52 +00:00
Nick Craig-Wood
8ee0fe9863 serve docker: disable linux tests in CI as they are locking up regularly 2022-02-28 18:01:47 +00:00
Nick Craig-Wood
8f164e4df5 s3: Use the ETag on multipart transfers to verify the transfer was OK
Before this rclone ignored the ETag on multipart uploads which missed
an opportunity for a whole file integrity check.

This adds that check which means that we now check even harder that
multipart uploads have arrived properly.

See #5993
2022-02-25 16:19:03 +00:00
Nick Craig-Wood
06ecc6511b drive: when using a link type --drive-export-formats show all doc types
Before this change we always hid unexportable document types (eg
Google maps).

After this change, if using --drive-export-formats
url/desktop/link.html/webloc we will show links for all documents
regardless of whether they are exportable or not as the links to them
work regardless of whether they are exportable or not.

See: https://forum.rclone.org/t/rclone-mount-for-google-drive-does-not-show-as-web-links-the-google-documents-of-the-google-my-map-gmap-type/29415
2022-02-25 16:08:11 +00:00
Nick Craig-Wood
3529bdec9b sftp: update docs on how to create known_hosts file
This also removes the note on the limitation that only one entry per
host is allowed in the file as it works with many entries provided
they have different key types.

See: https://forum.rclone.org/t/rclone-fails-ssh-handshakes-with-rsync-nets-sftp-when-a-known-hosts-file-is-specified/29206/
2022-02-25 16:08:11 +00:00
partev
486b43f8c7 doc: fix a typo
"and this it may require you to unblock it temporarily" -> "and it may require you to unblock it temporarily"
2022-02-22 21:05:05 +00:00
Nick Craig-Wood
89f0e4df80 swift: fix about so it shows info about the current container only
Before this change `rclone about swift:container` would show aggregate
info about all the containers, not just the one in use.

This causes a problem if container listing is disabled (for example in
the Blomp service).

This fix makes `rclone about swift:container` show only the info about
the given `container`. If aggregate info about all the containers is
required then use `rclone about swift:`.

See: https://forum.rclone.org/t/rclone-mount-blomp-problem/29151/18
2022-02-22 12:55:57 +00:00
Nick Craig-Wood
399fb5b7fb Add Vincent Murphy to contributors 2022-02-22 12:55:57 +00:00
Vincent Murphy
19f1ed949c docs: Fix broken test_proxy.py link 2022-02-22 12:26:17 +00:00
Nick Craig-Wood
d3a1001094 drive: add --drive-skip-dangling-shortcuts flag - fixes #5949
This flag enables dangling shortcuts to be skipped without an error.
2022-02-22 12:22:21 +00:00
Nick Craig-Wood
dc7e3ea1e3 drive,gcs,googlephotos: disable OAuth OOB flow (copy a token) due to google deprecation
Before this change, rclone supported authorizing for remote systems by
going to a URL and cutting and pasting a token from Google. This is
known as the OAuth out-of-band (oob) flow.

This, while very convenient for users, has been shown to be insecure
and has been deprecated by Google.

https://developers.googleblog.com/2022/02/making-oauth-flows-safer.html#disallowed-oob

> OAuth out-of-band (OOB) is a legacy flow developed to support native
> clients which do not have a redirect URI like web apps to accept the
> credentials after a user approves an OAuth consent request. The OOB
> flow poses a remote phishing risk and clients must migrate to an
> alternative method to protect against this vulnerability. New
> clients will be unable to use this flow starting on Feb 28, 2022.

This change disables that flow, and forces the user to use the
redirect URL flow. (This is the flow used already for local configs.)

In practice this will mean that instead of cutting and pasting a token
for remote config, it will be necessary to run "rclone authorize"
instead. This is how all the other OAuth backends work so it is a well
tested code path.

Fixes #6000
2022-02-18 12:46:30 +00:00
Nick Craig-Wood
f22b703a51 storj: rename tardigrade backend to storj backend #5616
This adds an alias for backwards compatibility and leaves a stub
documentation page to redirect people to the new documentation.
2022-02-11 11:04:15 +00:00
Nick Craig-Wood
c40129d610 fs: allow backends to have aliases #5616
This allows a backend to have multiple aliases. These aliases are
hidden from `rclone config` and the command line flags are hidden from
the user. However the flags, environment varialbes and config for the
alias will work just fine.
2022-02-11 11:04:15 +00:00
Nick Craig-Wood
8dc93f1792 Add Márton Elek to contributors 2022-02-11 11:04:03 +00:00
Nick Craig-Wood
f4c40bf79d mount: add --devname to set the device name sent to FUSE for mount display
Before this change, the device name was always the remote:path rclone
was configured with. However this can contain sensitive information
and it appears in the `mount` output, so `--devname` allows the user
to configure it.

See: https://forum.rclone.org/t/rclone-mount-blomp-problem/29151/11
2022-02-09 11:56:43 +00:00
Nick Craig-Wood
9cc50a614b s3: add note about Storj provider bug and workaround
See: https://github.com/storj/gateway-mt/issues/39
2022-02-08 11:40:29 +00:00
Elek, Márton
bcb07a67f6 tardigrade: update docs to explain differences between s3 and this backend
Co-authored-by: Caleb Case <calebcase@gmail.com>
2022-02-08 11:40:29 +00:00
Márton Elek
25ea04f1db s3: add specific provider for Storj Shared gateways
- unsupported features (Copy) are turned off for Storj
- enable urlEncodedListing for Storj provider
- set chunksize to 64Mb
2022-02-08 11:40:29 +00:00
Nick Craig-Wood
06ffd4882d onedrive: add --onedrive-root-folder-id flag #5948
This is to navigate to difficult to find folders in onedrive.
2022-02-07 12:29:36 +00:00
Nick Craig-Wood
19a5e1d63b docs: document --disable-http2 #5253 2022-02-07 12:29:36 +00:00
Nick Craig-Wood
ec88b66dad Add Abhiraj to contributors 2022-02-07 12:29:36 +00:00
Abhiraj
aa2d7f00c2 drive: added --drive-copy-shortcut-content - fixes #4604 2022-02-04 11:37:58 +00:00
Nick Craig-Wood
3e125443aa build: fix ARM architecture version in .deb packages after nfpm change
Fixes #5973
2022-02-03 11:24:06 +00:00
Nick Craig-Wood
3c271b8b1e Add Eng Zer Jun to contributors 2022-02-03 11:24:06 +00:00
Nick Craig-Wood
6d92ba2c6c Add viveknathani to contributors 2022-02-03 11:24:06 +00:00
albertony
c26dc69e1b docs/jottacloud: add note that mime types are not available with --fast-list 2022-02-02 13:12:50 +01:00
albertony
b0de0b4609 docs: include all commands in online help top menu drop-down 2022-02-01 20:40:50 +01:00
albertony
f54641511a librclone: add support for mount commands
Fixes #5661
2022-02-01 19:29:36 +01:00
Eng Zer Jun
8cf76f5e11 test: use T.TempDir to create temporary test directory
The directory created by `T.TempDir` is automatically removed when the
test and all its subtests complete.

Reference: https://pkg.go.dev/testing#T.TempDir
Signed-off-by: Eng Zer Jun <engzerjun@gmail.com>
2022-02-01 11:47:04 +00:00
viveknathani
18c24014da docs/content: describe mandatory fields for drive
Making a client-id for Google Drive requires you to add two more fields
besides the already documented "Application name" field. This commit
documents what should be written for those two fields.

Fixes #5967
2022-02-01 11:42:12 +00:00
Nick Craig-Wood
0ae39bda8d docs: fix and reword --update docs
After discussion on the forum with @bandwidth, this rewords the
--update docs to be correct and easier to understand.

See: https://forum.rclone.org/t/help-understanding-update/28937
2022-02-01 11:07:51 +00:00
Nick Craig-Wood
051685baa1 s3: fix multipart upload with --no-head flag - Fixes #5956
Before this change a multipart upload with the --no-head flag returned
the MD5SUM as a base64 string rather than a Hex string as the rest of
rclone was expecting.
2022-01-29 12:48:51 +00:00
albertony
07f53aebdc touch: fix issue where directory is created instead of file
Detected on ftp, sftp and Dropbox backends.

Fixes #5952
2022-01-28 20:29:12 +01:00
albertony
bd6d36b3f6 docs: improve standard list of properties for options 2022-01-28 19:43:51 +01:00
Nick Craig-Wood
b168479429 gcs: add missing regions - fixes #5955 2022-01-28 12:34:13 +00:00
Nick Craig-Wood
b447b0cd78 build: upgrade actions runner macos-11 to fix macOS build problems #5951 2022-01-27 17:33:04 +00:00
Nick Craig-Wood
4bd2386632 build: don't specify macos SDK any more as default is good enough #5951
This fixes the build, in particular the error:

    Failed to run ["xcrun" "--sdk" "macosx11.1" "--show-sdk-path"]: exit status 1
2022-01-27 17:33:04 +00:00
Nick Craig-Wood
83b6b62c1b build: disable cmount tests under macOS and the CI since they are locking up
This fixes #5951 and allows the macOS builds to run again

See #5960 for more info.
2022-01-27 17:33:04 +00:00
Nick Craig-Wood
5826cc9d9e Add Paulo Martins to contributors 2022-01-27 17:33:04 +00:00
Nick Craig-Wood
252432ae54 Add Gourav T to contributors 2022-01-27 17:33:04 +00:00
Nick Craig-Wood
8821629333 Add Isaac Levy to contributors 2022-01-27 17:33:04 +00:00
Nick Craig-Wood
a2092a8faf Add Vanessasaurus to contributors 2022-01-27 17:33:04 +00:00
Nick Craig-Wood
2b6f4241b4 Add Alain Nussbaumer to contributors 2022-01-27 17:33:04 +00:00
Nick Craig-Wood
e3dd16d490 Add Charlie Jiang to contributors 2022-01-27 17:33:04 +00:00
Nick Craig-Wood
9e1fd923f6 Add Yunhai Luo to contributors 2022-01-27 17:33:04 +00:00
Nick Craig-Wood
3684789858 Add Koopa to contributors 2022-01-27 17:33:04 +00:00
Nick Craig-Wood
1ac1dd428a Add Niels van de Weem to contributors 2022-01-27 17:33:04 +00:00
Nick Craig-Wood
65dbd29c22 Add Kim to contributors 2022-01-27 17:33:04 +00:00
albertony
164774d7e1 Add Shmz Ozggrn to contributors 2022-01-27 09:43:42 +01:00
Shmz Ozggrn
507020f408 docs: Use Adaptive Logo in README 2022-01-27 09:35:36 +01:00
albertony
a667e03fc9 http: improved recognition of url pointing to a single file - fixes #5929 2022-01-26 11:41:01 +01:00
albertony
1045344943 http: status string already includes the status code 2022-01-26 11:41:01 +01:00
albertony
5e469db420 docs/http: fix list layout in --http-no-head help
Existing help text ended with a list, but then auto-generated list items
Config, Env Var, Type and Default would be included in the same list.
2022-01-26 11:41:01 +01:00
albertony
946e84d194 http: use string contains instead of index 2022-01-26 11:41:01 +01:00
albertony
162aba60eb http: error strings should not be capitalized 2022-01-26 11:41:01 +01:00
albertony
d8a874c32b Make http tests line ending agnostic 2022-01-26 11:41:01 +01:00
albertony
9c451d9ac6 Fix linting errors 2022-01-26 00:02:17 +01:00
albertony
8f3f24672c docs/serve: move help for template option into separate section 2022-01-25 18:19:21 +01:00
Paulo Martins
0eb7b716d9 s3: document Content-MD5 workaround for object-lock enabled buckets - Fixes #5765 2022-01-25 16:10:57 +00:00
Gourav T
ee9684e60f fichier: implemented about functionality 2022-01-25 15:53:58 +00:00
negative0
e0cbe413e1 rc: Allow user to disable authentication for web gui 2022-01-25 15:52:30 +00:00
albertony
2523dd6220 version: report correct friendly-name for windows 10/11 versions after 2004
Until Windows 10 version 2004 (May 2020) this can be found from registry entry
ReleaseID, after that we must use entry DisplayVersion (ReleaseId is stuck at 2009).
Source: https://ss64.com/nt/ver.html
2022-01-24 21:27:42 +01:00
albertony
c504d97017 config: fix display of config choices with empty help text 2022-01-18 20:17:57 +01:00
albertony
b783f09fc6 config: show default and example values in correct input syntax instead of quoted and escaped golang string syntax
See #5551
2022-01-16 14:57:38 +01:00
albertony
a301478a13 config: improved punctuation in initial config prompt 2022-01-16 14:57:38 +01:00
albertony
63b450a2a5 config: minor improvement of help text for encoding option
See #5551
2022-01-16 14:57:38 +01:00
albertony
843b77aaaa docs/ftp: improved default value description of port and username options
See #5551
2022-01-16 14:57:38 +01:00
albertony
3641727edb config: fix issue where required password options had to be re-entered when editing existing remote
See #5551
2022-01-16 14:57:38 +01:00
albertony
38e2f835ed config: fix handling of default, exclusive and required properties of multiple-choice options
Previously an empty input (just pressing enter) was only allowed for multiple-choice
options that did not have the Exclusive property set. With this change the existing
Required property is introduced into the multiple choice handling, so that one can have
Exclusive and Required options where only a value from the list is allowed, and one can
have Exclusive but not Required options where an empty value is accepted but any
non-empty value must still be matching an item from the list.

Fixes #5549

See #5551
2022-01-16 14:57:38 +01:00
albertony
bd4bbed592 config: remove explicit setting of required property to true for options with a default value
See #5551
2022-01-16 14:57:38 +01:00
albertony
994b501188 config: remove explicit setting of required property to its default value false
See #5551
2022-01-16 14:57:38 +01:00
albertony
dfa9381814 docs/jottacloud: correct reference to temp-dir 2022-01-16 14:34:15 +01:00
albertony
2a85feda4b docs/jottacloud: add note about upload only being supported on jotta device 2022-01-16 14:34:15 +01:00
albertony
ad46af9168 docs/librclone: note that adding -ldflags -s to the build command will reduce size of library file 2022-01-16 14:32:01 +01:00
albertony
2fed02211c docs/librclone: document use from C/C++ on Windows 2022-01-16 14:11:56 +01:00
albertony
237daa8aaf dedupe: add quit as a choice in interactive mode
Fixes #5881
2022-01-14 19:57:48 +01:00
albertony
8aeca6c033 docs: align menu items when icons have different sizes 2022-01-14 17:39:27 +00:00
albertony
fd82876086 librclone: allow empty string or null input instead of empty json object 2022-01-14 17:37:13 +00:00
Isaac Levy
be1a668e95 onedrive: minor optimization of quickxorhash
This patch avoids creating a new slice header in favour of a for loop.

This saves a few instructions!
2022-01-14 17:30:56 +00:00
Vanessasaurus
9d4eab32d8 cmd: fix broken example link in help.go
This link appears to be broken, so here is another reference to (I think) the same file that provides a good example of coba. We could also do the current commit 8312004f41/cli/cobra.go although it might be better to maintain an up to date example.
2022-01-13 16:26:19 +00:00
Alain Nussbaumer
b4ba7b69b8 dlna: change icons to the newest ones. 2022-01-13 16:23:24 +00:00
albertony
deef659aef Add Bumsu Hyeon to contributors 2022-01-13 13:25:20 +01:00
Bumsu Hyeon
4b99e84242 vfs/cache: fix handling of special characters in file names (#5875) 2022-01-13 13:23:25 +01:00
albertony
06bdf7c64c Add Lu Wang to contributors 2022-01-12 21:33:35 +01:00
Lu Wang
e1225b5729 docs/s3: fixed max-age example 2022-01-12 21:31:54 +01:00
albertony
871cc2f62d docs: fix links to rc sections 2022-01-12 19:51:26 +01:00
Charlie Jiang
bc23bf11db onedrive: add config option for oauth scope Sites.Read.All (#5883) 2022-01-10 21:28:19 +08:00
albertony
b55575e622 docs: fix typo 2022-01-03 18:46:40 +01:00
albertony
328f0e7135 docs: fix links to rc debug commands 2021-12-30 21:52:34 +01:00
albertony
a52814eed9 docs: fix links to rc data types section 2021-12-30 20:46:39 +01:00
albertony
071a9e882d docs: capitalization of flag usage strings 2021-12-30 14:07:24 +01:00
albertony
4e2ca3330c tree: remove obsolete --human replaced by global --human-readable - fixes #5868 2021-12-21 20:17:00 +01:00
Yunhai Luo
408d9f3e7a s3: Add GLACIER_IR storage class 2021-12-03 14:46:45 +00:00
Koopa
0681a5c86a lib/rest: process HTML entities within XML
MEGAcmd currently includes escaped HTML4 entites in its XML messages.
This behavior deviates from the XML standard, but currently it prevents
rclone from being able to use the remote.
2021-12-01 16:31:43 +00:00
Niels van de Weem
df09c3f555 pcloud: add support for recursive list 2021-12-01 15:58:44 +00:00
Kim
c41814fd2d backend:jottacloud change api used by ListR ( --fast-list ) 2021-12-01 14:21:37 +01:00
Nick Craig-Wood
c2557cc432 azureblob: fix crash with SAS URL and no container - fixes #5820
Before this change attempting NewObject on a SAS URL's root would
crash the Azure SDK.

This change detects that using the code from this previous fix

f7404f52e7 azureblob: fix crash when listing outside a SAS URL's root - fixes #4851

And returns not object not found instead.

It also prevents things being uploaded to the root of the SAS URL
which also crashes the Azure SDK.
2021-11-27 16:18:18 +00:00
Nick Craig-Wood
3425726c50 oauthutil: fix crash when webrowser requests /robots.txt - fixes #5836
Before this change the oauth webserver would crash if it received a
request to /robots.txt.

This patch makes it ignore (with 404 error) any paths it isn't
expecting.
2021-11-25 12:12:14 +00:00
Nick Craig-Wood
46175a22d8 Add Logeshwaran Murugesan to contributors 2021-11-25 12:11:47 +00:00
Logeshwaran Murugesan
bcf0e15ad7 Simplify content length processing in s3 with download url 2021-11-25 12:03:14 +00:00
Nick Craig-Wood
b91c349cd5 local: fix hash invalidation which caused errors with local crypt mount
Before this fix if a file was updated, but to the same length and
timestamp then the local backend would return the wrong (cached)
hashes for the object.

This happens regularly on a crypted local disk mount when the VFS
thinks files have been changed but actually their contents are
identical to that written previously. This is because when files are
uploaded their nonce changes so the contents of the file changes but
the timestamp and size remain the same because the file didn't
actually change.

This causes errors like this:

    ERROR: file: Failed to copy: corrupted on transfer: md5 crypted
    hash differ "X" vs "Y"

This turned out to be because the local backend wasn't clearing its
cache of hashes when the file was updated.

This fix clears the hash cache for Update and Remove.

It also puts a src and destination in the crypt message to make future
debugging easier.

Fixes #4031
2021-11-24 12:09:34 +00:00
Nick Craig-Wood
d252816706 vfs: add vfs/stats remote control to show statistics - fixes #5816 2021-11-23 18:00:21 +00:00
Nick Craig-Wood
729117af68 Add GGG KILLER to contributors 2021-11-23 18:00:21 +00:00
GGG KILLER
cd4d8d55ec docs: add a note about the B2 download_url format
Currently the B2 docs don't specify which format the download_url
setting should have, and if you input it wrong, there is nothing
in the verbose logs or anywhere else that can let you know that.
2021-11-23 17:57:34 +00:00
Nick Craig-Wood
f26abc89a6 union: fix treatment of remotes with // in
See: https://forum.rclone.org/t/connection-string-with-union-backend-and-a-lot-of-quotes/27577
2021-11-23 17:41:12 +00:00
lindwurm
b5abbe819f s3: Add Wasabi AP Northeast 2 endpoint info
* Wasabi starts to provide AP Northeast 2 (Osaka) endpoint, so add it to the list
* Rename ap-northeast-1 as "AP Northeast 1 (Tokyo)" from "AP Northeast"

Signed-off-by: lindwurm <lindwurm.q@gmail.com>
2021-11-22 18:02:57 +00:00
Nick Craig-Wood
a351484997 sftp: fix timeout on hashing large files by sending keepalives
Before this fix the SFTP sessions could timeout when doing hashes if
they took longer than the --timeout parameter.

This patch sends keepalive packets every minute while a shell command
is running to keep the connection open.

See: https://forum.rclone.org/t/rclone-check-over-sftp-failure-to-calculate-md5-hash-for-large-files/27487
2021-11-22 15:26:29 +00:00
Nick Craig-Wood
099eff8891 sftp: refactor so we only have one way of running remote commands
This also returns errors from running ssh Hash commands which we
didn't do before.
2021-11-22 15:26:29 +00:00
albertony
c4cb167d4a Add rsapkf and Will Holtz to contributors 2021-11-21 19:26:05 +01:00
Will Holtz
38e100ab19 docs/config: more explicit doc for config create --all with params 2021-11-21 19:22:19 +01:00
rsapkf
db95a0d6c3 docs/pcloud: fix typo 2021-11-21 19:16:19 +01:00
Nick Craig-Wood
df07964db3 azureblob: raise --azureblob-upload-concurrency to 16 by default
After speed testing it was discovered that upload speed goes up pretty
much linearly with upload concurrency. This patch changes the default
from 4 to 16 which means that rclone will use 16 * 4M = 64M per
transfer which is OK even for low memory devices.

This adds a note that performance may be increased by increasing
upload concurrency.

See: https://forum.rclone.org/t/performance-of-rclone-vs-azcopy/27437/9
2021-11-18 16:09:02 +00:00
Nick Craig-Wood
fbc4c4ad9a azureblob: remove 100MB upper limit on chunk_size as it is no longer needed 2021-11-18 16:09:02 +00:00
Nick Craig-Wood
4454b3e1ae azureblob: implement --azureblob-upload-concurrency parameter to speed uploads
See: https://forum.rclone.org/t/performance-of-rclone-vs-azcopy/27437
2021-11-18 16:08:57 +00:00
Nick Craig-Wood
f9321fccbb Add deinferno to contributors 2021-11-18 15:51:45 +00:00
Ole Frost
3c2252b7c0 fs/operations: add server-side moves to stats
Fixes #5430
2021-11-18 12:20:56 +00:00
Cnly
51c952654c fstests: treat accountUpgradeRequired as success for OneDrive PublicLink 2021-11-17 17:35:17 +00:00
deinferno
80e47be65f yandex: add permanent deletion support 2021-11-17 16:57:41 +00:00
Michał Matczuk
38dc3e93ee fshttp: add prometheus metrics for HTTP status code
This patch adds rclone_http_status_code counter vector labeled by

* host,
* method,
* code.

It allows to see HTTP errors, backoffs etc.

The Metrics struct is designed for extensibility.
Adding new metrics is a matter of adding them to Metrics struct and including them in the response handling.

This feature has been discussed in the forum [1].

[1] https://forum.rclone.org/t/prometheus-metrics/14484
2021-11-17 18:38:12 +03:00
Nick Craig-Wood
ba6730720d Fix repeated error messages after pkg/errors removal 2021-11-15 17:58:40 +00:00
Nick Craig-Wood
7735b5c694 Add Sinan Tan to contributors 2021-11-15 17:58:40 +00:00
Nick Craig-Wood
d45b3479ee Add Andy Jackson to contributors 2021-11-15 17:58:40 +00:00
Nick Craig-Wood
4c5df0a765 Add Fredric Arklid to contributors 2021-11-15 17:58:40 +00:00
Sinan Tan
8c61a09be2 crypt: add test cases and documentation for base64 and base32768 filename encoding #5801 2021-11-15 17:57:02 +00:00
Max Sum
c217145cae crypt: add base64 and base32768 filename encoding options #5801 2021-11-15 17:57:02 +00:00
thomae
4c93378f0e serve sftp: update docs on --stdio 2021-11-12 10:49:35 +00:00
thomae
f9e54f96c3 docs/sftp: fix typo 2021-11-11 19:20:15 +01:00
Andy Jackson
af0fcd03cb hdfs: add file and directory move/rename support 2021-11-11 16:41:43 +00:00
albertony
00aafc957e sftp: add rclone to list of supported md5sum/sha1sum commands to look for
See #5781
2021-11-11 15:16:45 +01:00
albertony
29abbd2032 hashsum: support creating hash from data received on stdin
See #5781
2021-11-11 15:16:45 +01:00
Fredric Arklid
663b2d9c46 jottacloud: Add support for Tele2 Cloud 2021-11-11 12:32:23 +00:00
Nick Craig-Wood
f36d6d01b5 rc: fix operations/publiclink default for expires parameter
Before this change the expires parameter was defaulting to 0 if not
provided.

This change makes it default to fs.DurationOff which is the same as
the `rclone link` command.

See: https://forum.rclone.org/t/operations-publiclink-from-dropbox-error-not-autorized/27374
2021-11-11 11:14:22 +00:00
Nick Craig-Wood
0c03aa3a8b dropbox: speed up directory listings by specifying 1000 items in a chunk 2021-11-11 11:14:22 +00:00
Nick Craig-Wood
caa2b8bf40 dropbox: save an API request when at the root
Before this change, rclone always emitted an API request to discover
what type of thing the root is.

This is unecessary as it is always a directory.
2021-11-11 11:14:22 +00:00
Nick Craig-Wood
421e840e37 Add Borna Butkovic to contributors 2021-11-11 11:14:22 +00:00
Nick Craig-Wood
9b57d27be4 Add David to contributors 2021-11-11 11:14:22 +00:00
Borna Butkovic
627ac1b2d9 ftp: add --ftp-ask-password to prompt for password when needed 2021-11-10 17:34:14 +00:00
David
ae395d8cf0 rc: error on web GUI update won't be fatal - fixes #5385 2021-11-10 17:05:13 +00:00
Ankur Gupta
f04520a6e3 operations: fix goroutine leak in case of copy retry
Whenever transfer.Account() is called, a new goroutine acc.averageLoop()
is started. This goroutine exits only when the channel acc.exit is closed.
acc.exit is closed when acc.Done() is called, which happens during tr.Done().

However, if tr.Reset is called during a copy low level retry, it replaces
the tr.acc, without calling acc.Done(), which results in the goroutine
mentioned above never exiting.

This commit calls acc.Done() during a tr.Reset()
2021-11-10 16:44:29 +00:00
Nick Craig-Wood
c968c3e41c build: raise minimum go version to go1.15
This was necessary because go1.14 seems to have a modules related bug
which means it tries to build modules even though the uses of them are
all disabled with build constraints. This seems to be fixed in go1.15.
2021-11-10 16:11:12 +00:00
Nick Craig-Wood
3661791e82 serve restic: disable for go1.16 and earlier after update 2021-11-10 15:42:50 +00:00
Nick Craig-Wood
4198763c35 build: update all dependencies 2021-11-10 10:34:56 +00:00
Nick Craig-Wood
3de47b8ed4 build: upgrade go.mod file to go1.17 2021-11-10 10:34:56 +00:00
Nick Craig-Wood
71b8e1e80b build: more docs on upgrading dependencies 2021-11-10 10:34:56 +00:00
Nick Craig-Wood
7366e97dfc mega: fix error handling broken by removal of github.com/pkg/errors
There were instances of errors.Wrap being called with a nil error
which the conversion didn't deal with correctly.
2021-11-09 13:43:45 +00:00
Nick Craig-Wood
21ba4d9a18 onedrive: fix error handling broken by removal of github.com/pkg/errors
There were instances of errors.Wrap being called with a nil error
which the conversion didn't deal with correctly.
2021-11-09 13:43:45 +00:00
Nick Craig-Wood
96e099d8e7 union: fix error handling broken by removal of github.com/pkg/errors
There were instances of errors.Wrap being called with a nil error
which the conversion didn't deal with correctly.
2021-11-09 13:43:45 +00:00
Nick Craig-Wood
2a31b5bdd6 Add bbabich to contributors 2021-11-09 13:43:45 +00:00
Nick Craig-Wood
9bdfe4c36f Add Vitor Arruda to contributors 2021-11-09 13:43:45 +00:00
Nick Craig-Wood
e3a2f539fe Add Chris Lu to contributors 2021-11-09 13:43:45 +00:00
Nick Craig-Wood
ffa943e31f Add Carlo Mion to contributors 2021-11-09 13:43:45 +00:00
bbabich
b16f603c51 s3: Add RackCorp object storage to providers 2021-11-09 11:46:58 +00:00
database64128
a7a8372976 🧪 fstest: fix time tests on Windows and add convenience methods to check local and remote fs with precision
Previously only the fs being checked on gets passed to
GetModifyWindow(). However, in most tests, the test files are
generated in the local fs and transferred to the remote fs. So the
local fs time precision has to be taken into account.

This meant that on Windows the time tests failed because the
local fs has a time precision of 100ns. Checking remote items uploaded
from local fs on Windows also requires a modify window of 100ns.
2021-11-09 11:43:36 +00:00
Vitor Arruda
9beb0677e4 backend: Fix union eplus policy returned nil 2021-11-08 11:55:27 +00:00
Nick Craig-Wood
e43b5ce5e5 Remove github.com/pkg/errors and replace with std library version
This is possible now that we no longer support go1.12 and brings
rclone into line with standard practices in the Go world.

This also removes errors.New and errors.Errorf from lib/errors and
prefers the stdlib errors package over lib/errors.
2021-11-07 11:53:30 +00:00
Chris Lu
97328e5755 Improve description for SeaweedFS 2021-11-06 21:01:50 +03:00
Carlo Mion
7b7d780fff stats: fix missing StatsInfo fields in the computation of the group sum 2021-11-05 15:33:00 +00:00
Carlo Mion
c2600f9e4d stats: fix missing computation of transferQueueSize when summing up statistics group - fixes #5749 2021-11-05 15:33:00 +00:00
Ivan Andreev
7bd853ce35 Add Roberto Ricci to contributors 2021-11-05 18:29:47 +03:00
Roberto Ricci
05150cfb1d backend/ftp: increase testUploadTimeout.maxTime to 10 seconds
On slow machines (e.g. Github CI), especially if GOARCH=386,
the test for cmd/serve/ftp could fail if this value is too small.

Fixes #5783
2021-11-05 18:27:44 +03:00
albertony
25366268fe Add Atílio Antônio to contributors 2021-11-04 12:55:49 +01:00
Atílio Antônio
c08d48a50d docs: improve grammar and fix typos (#5361)
This alters some comments in source files, but is interested mainly in documentation files and help messages.
2021-11-04 12:50:43 +01:00
Nick Craig-Wood
454574e2cc s3: collect the provider quirks into a single function and update
This removes the checks against the provider throughout the code and
puts them into a single setQuirks function for easy maintenance when
adding a new provider.

It also updates the quirks with the results of testing against
backends we have access to.

This also adds a list_url_encode parameter so that quirk can be
manually set.
2021-11-03 21:44:09 +00:00
Nick Craig-Wood
9218a3eb00 fs: add a tristate true/false/unset configuration value 2021-11-03 21:44:09 +00:00
Nick Craig-Wood
1e4ef4b4d5 Add Felix Bünemann to contributors 2021-11-03 21:44:09 +00:00
Nick Craig-Wood
8d92f7d697 s3: fallback to ListObject v1 on unsupported providers
This implements a quirks system for providers and notes which
providers we have tested to support ListObjectsV2.

For those providers which don't support ListObjectsV2 we use the
original ListObjects call.
2021-11-03 19:13:50 +00:00
Felix Bünemann
fd56abc5f2 s3: Use ListObjectsV2 for faster listings
Using ListObjectsV2 with a continuation token is about 5-6x faster than
ListObjectsV2 with a marker.
2021-11-03 19:13:50 +00:00
Ivan Andreev
b323bf34e2 sync/test: skip test ConcurrentTruncate on uptobox (take 2)
The test is not applicable to uptobox which can't upload empty files.
The test was not skipped as intended because the direct error was compared.
This fix will compare error Cause because Sync wraps the error.
2021-11-02 19:24:23 +03:00
Ivan Andreev
e78e73eae7 lib/encoder: fix benchmarks
Some day in the past the Slash encode option was added to Onedrive
encoder so it began to encode slashes in file names rather then treat
them as path separators.
This patch adapts benchmark test cases accordingly.

Fixes #5659
2021-11-02 19:23:16 +03:00
Nick Craig-Wood
f51a5eca2e fstests: add encoding test for URL encoded path name #5768
Add an encoding test to make sure backends can deal with a URL encoded
path name. This is a fairly common failing in backends and has been an
intermittent problem with onedrive itself.
2021-11-02 15:59:36 +00:00
albertony
39e2af7974 config: allow dot in remote names (#5606) 2021-11-01 20:50:06 +01:00
Ivan Andreev
b3217adf08 Add Chris Nelson to contributors 2021-11-01 21:24:06 +03:00
Ivan Andreev
074234119a bisync: documentation #5164
Co-authored-by: Chris Nelson <stuff@cjnaz.com>
2021-11-01 21:00:27 +03:00
Ivan Andreev
6210e22ab5 bisync: implementation #5164
Fixes #118

Co-authored-by: Chris Nelson <stuff@cjnaz.com>
2021-11-01 21:00:27 +03:00
Ivan Andreev
940e99a929 bisync: test scenarios #5164
Co-authored-by: Chris Nelson <stuff@cjnaz.com>
2021-11-01 21:00:27 +03:00
Ivan Andreev
79b6866b57 rc: export NewErrParamInvalid #5164 2021-11-01 21:00:27 +03:00
Ivan Andreev
c142e3edcc filter: export GlobToRegexp #5164 2021-11-01 21:00:27 +03:00
Nick Craig-Wood
5c646dff9a Start v1.58.0-DEV development 2021-11-01 16:54:17 +00:00
Nick Craig-Wood
19dfaf7440 docs: fix shortcode rendering on download page 2021-11-01 16:50:52 +00:00
898 changed files with 49595 additions and 17698 deletions

View File

@@ -9,7 +9,7 @@ We understand you are having a problem with rclone; we want to help you with tha
**STOP and READ**
**YOUR POST WILL BE REMOVED IF IT IS LOW QUALITY**:
Please show the effort you've put in to solving the problem and please be specific.
Please show the effort you've put into solving the problem and please be specific.
People are volunteering their time to help! Low effort posts are not likely to get good answers!
If you think you might have found a bug, try to replicate it with the latest beta (or stable).

View File

@@ -25,12 +25,12 @@ jobs:
strategy:
fail-fast: false
matrix:
job_name: ['linux', 'mac_amd64', 'mac_arm64', 'windows_amd64', 'windows_386', 'other_os', 'go1.14', 'go1.15', 'go1.16']
job_name: ['linux', 'mac_amd64', 'mac_arm64', 'windows_amd64', 'windows_386', 'other_os', 'go1.16', 'go1.17']
include:
- job_name: linux
os: ubuntu-latest
go: '1.17.x'
go: '1.18.x'
gotags: cmount
build_flags: '-include "^linux/"'
check: true
@@ -40,8 +40,8 @@ jobs:
deploy: true
- job_name: mac_amd64
os: macOS-latest
go: '1.17.x'
os: macos-11
go: '1.18.x'
gotags: 'cmount'
build_flags: '-include "^darwin/amd64" -cgo'
quicktest: true
@@ -49,15 +49,15 @@ jobs:
deploy: true
- job_name: mac_arm64
os: macOS-latest
go: '1.17.x'
os: macos-11
go: '1.18.x'
gotags: 'cmount'
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -macos-sdk macosx11.1 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
deploy: true
- job_name: windows_amd64
os: windows-latest
go: '1.17.x'
go: '1.18.x'
gotags: cmount
build_flags: '-include "^windows/amd64" -cgo'
build_args: '-buildmode exe'
@@ -67,7 +67,7 @@ jobs:
- job_name: windows_386
os: windows-latest
go: '1.17.x'
go: '1.18.x'
gotags: cmount
goarch: '386'
cgo: '1'
@@ -78,29 +78,23 @@ jobs:
- job_name: other_os
os: ubuntu-latest
go: '1.17.x'
build_flags: '-exclude "^(windows/|darwin/|linux/)"'
go: '1.18.x'
build_flags: '-exclude "^(windows/(386|amd64)|darwin/|linux/)"'
compile_all: true
deploy: true
- job_name: go1.14
os: ubuntu-latest
go: '1.14.x'
quicktest: true
racequicktest: true
- job_name: go1.15
os: ubuntu-latest
go: '1.15.x'
quicktest: true
racequicktest: true
- job_name: go1.16
os: ubuntu-latest
go: '1.16.x'
quicktest: true
racequicktest: true
- job_name: go1.17
os: ubuntu-latest
go: '1.17.x'
quicktest: true
racequicktest: true
name: ${{ matrix.job_name }}
runs-on: ${{ matrix.os }}
@@ -116,6 +110,7 @@ jobs:
with:
stable: 'false'
go-version: ${{ matrix.go }}
check-latest: true
- name: Set environment variables
shell: bash
@@ -140,7 +135,7 @@ jobs:
run: |
brew update
brew install --cask macfuse
if: matrix.os == 'macOS-latest'
if: matrix.os == 'macos-11'
- name: Install Libraries on Windows
shell: powershell
@@ -251,14 +246,14 @@ jobs:
fetch-depth: 0
# Upgrade together with NDK version
- name: Set up Go 1.16
- name: Set up Go
uses: actions/setup-go@v1
with:
go-version: 1.16
go-version: 1.18.x
# Upgrade together with Go version. Using a GitHub-provided version saves around 2 minutes.
- name: Force NDK version
run: echo "y" | sudo ${ANDROID_HOME}/tools/bin/sdkmanager --install "ndk;22.1.7171670" | grep -v = || true
run: echo "y" | sudo ${ANDROID_HOME}/tools/bin/sdkmanager --install "ndk;23.1.7779620" | grep -v = || true
- name: Go module cache
uses: actions/cache@v2
@@ -279,8 +274,8 @@ jobs:
- name: install gomobile
run: |
go get golang.org/x/mobile/cmd/gobind
go get golang.org/x/mobile/cmd/gomobile
go install golang.org/x/mobile/cmd/gobind@latest
go install golang.org/x/mobile/cmd/gomobile@latest
env PATH=$PATH:~/go/bin gomobile init
- name: arm-v7a gomobile build
@@ -289,7 +284,7 @@ jobs:
- name: arm-v7a Set environment variables
shell: bash
run: |
echo "CC=$(echo $ANDROID_HOME/ndk/22.1.7171670/toolchains/llvm/prebuilt/linux-x86_64/bin/armv7a-linux-androideabi16-clang)" >> $GITHUB_ENV
echo "CC=$(echo $ANDROID_HOME/ndk/23.1.7779620/toolchains/llvm/prebuilt/linux-x86_64/bin/armv7a-linux-androideabi16-clang)" >> $GITHUB_ENV
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
echo 'GOOS=android' >> $GITHUB_ENV
echo 'GOARCH=arm' >> $GITHUB_ENV
@@ -302,7 +297,7 @@ jobs:
- name: arm64-v8a Set environment variables
shell: bash
run: |
echo "CC=$(echo $ANDROID_HOME/ndk/22.1.7171670/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android21-clang)" >> $GITHUB_ENV
echo "CC=$(echo $ANDROID_HOME/ndk/23.1.7779620/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android21-clang)" >> $GITHUB_ENV
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
echo 'GOOS=android' >> $GITHUB_ENV
echo 'GOARCH=arm64' >> $GITHUB_ENV
@@ -315,7 +310,7 @@ jobs:
- name: x86 Set environment variables
shell: bash
run: |
echo "CC=$(echo $ANDROID_HOME/ndk/22.1.7171670/toolchains/llvm/prebuilt/linux-x86_64/bin/i686-linux-android16-clang)" >> $GITHUB_ENV
echo "CC=$(echo $ANDROID_HOME/ndk/23.1.7779620/toolchains/llvm/prebuilt/linux-x86_64/bin/i686-linux-android16-clang)" >> $GITHUB_ENV
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
echo 'GOOS=android' >> $GITHUB_ENV
echo 'GOARCH=386' >> $GITHUB_ENV
@@ -328,7 +323,7 @@ jobs:
- name: x64 Set environment variables
shell: bash
run: |
echo "CC=$(echo $ANDROID_HOME/ndk/22.1.7171670/toolchains/llvm/prebuilt/linux-x86_64/bin/x86_64-linux-android21-clang)" >> $GITHUB_ENV
echo "CC=$(echo $ANDROID_HOME/ndk/23.1.7779620/toolchains/llvm/prebuilt/linux-x86_64/bin/x86_64-linux-android21-clang)" >> $GITHUB_ENV
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
echo 'GOOS=android' >> $GITHUB_ENV
echo 'GOARCH=amd64' >> $GITHUB_ENV

View File

@@ -223,7 +223,7 @@ find the results at https://pub.rclone.org/integration-tests/
Rclone code is organised into a small number of top level directories
with modules beneath.
* backend - the rclone backends for interfacing to cloud providers -
* backend - the rclone backends for interfacing to cloud providers -
* all - import this to load all the cloud providers
* ...providers
* bin - scripts for use while building or maintaining rclone
@@ -233,7 +233,7 @@ with modules beneath.
* cmdtest - end-to-end tests of commands, flags, environment variables,...
* docs - the documentation and website
* content - adjust these docs only - everything else is autogenerated
* command - these are auto generated - edit the corresponding .go file
* command - these are auto-generated - edit the corresponding .go file
* fs - main rclone definitions - minimal amount of code
* accounting - bandwidth limiting and statistics
* asyncreader - an io.Reader which reads ahead
@@ -299,7 +299,7 @@ the source file in the `Help:` field.
countries, it looks better without an ending period/full stop character.
The only documentation you need to edit are the `docs/content/*.md`
files. The `MANUAL.*`, `rclone.1`, web site, etc. are all auto generated
files. The `MANUAL.*`, `rclone.1`, website, etc. are all auto-generated
from those during the release process. See the `make doc` and `make
website` targets in the Makefile if you are interested in how. You
don't need to run these when adding a feature.
@@ -350,7 +350,7 @@ And here is an example of a longer one:
```
mount: fix hang on errored upload
In certain circumstances if an upload failed then the mount could hang
In certain circumstances, if an upload failed then the mount could hang
indefinitely. This was fixed by closing the read pipe after the Put
completed. This will cause the write side to return a pipe closed
error fixing the hang.
@@ -382,7 +382,7 @@ and `go.sum` in the same commit as your other changes.
If you need to update a dependency then run
GO111MODULE=on go get -u github.com/pkg/errors
GO111MODULE=on go get -u golang.org/x/crypto
Check in a single commit as above.
@@ -425,8 +425,8 @@ Research
Getting going
* Create `backend/remote/remote.go` (copy this from a similar remote)
* box is a good one to start from if you have a directory based remote
* b2 is a good one to start from if you have a bucket based remote
* box is a good one to start from if you have a directory-based remote
* b2 is a good one to start from if you have a bucket-based remote
* Add your remote to the imports in `backend/all/all.go`
* HTTP based remotes are easiest to maintain if they use rclone's rest module, but if there is a really good go SDK then use that instead.
* Try to implement as many optional methods as possible as it makes the remote more usable.

View File

@@ -15,11 +15,11 @@ Current active maintainers of rclone are:
| Ivan Andreev | @ivandeex | chunker & mailru backends |
| Max Sum | @Max-Sum | union backend |
| Fred | @creativeprojects | seafile backend |
| Caleb Case | @calebcase | tardigrade backend |
| Caleb Case | @calebcase | storj backend |
**This is a work in progress Draft**
This is a guide for how to be an rclone maintainer. This is mostly a writeup of what I (@ncw) attempt to do.
This is a guide for how to be an rclone maintainer. This is mostly a write-up of what I (@ncw) attempt to do.
## Triaging Tickets ##
@@ -27,15 +27,15 @@ When a ticket comes in it should be triaged. This means it should be classified
Rclone uses the labels like this:
* `bug` - a definite verified bug
* `bug` - a definitely verified bug
* `can't reproduce` - a problem which we can't reproduce
* `doc fix` - a bug in the documentation - if users need help understanding the docs add this label
* `duplicate` - normally close these and ask the user to subscribe to the original
* `enhancement: new remote` - a new rclone backend
* `enhancement` - a new feature
* `FUSE` - to do with `rclone mount` command
* `good first issue` - mark these if you find a small self contained issue - these get shown to new visitors to the project
* `help` wanted - mark these if you find a self contained issue - these get shown to new visitors to the project
* `good first issue` - mark these if you find a small self-contained issue - these get shown to new visitors to the project
* `help` wanted - mark these if you find a self-contained issue - these get shown to new visitors to the project
* `IMPORTANT` - note to maintainers not to forget to fix this for the release
* `maintenance` - internal enhancement, code re-organisation, etc.
* `Needs Go 1.XX` - waiting for that version of Go to be released
@@ -51,7 +51,7 @@ The milestones have these meanings:
* v1.XX - stuff we would like to fit into this release
* v1.XX+1 - stuff we are leaving until the next release
* Soon - stuff we think is a good idea - waiting to be scheduled to a release
* Soon - stuff we think is a good idea - waiting to be scheduled for a release
* Help wanted - blue sky stuff that might get moved up, or someone could help with
* Known bugs - bugs waiting on external factors or we aren't going to fix for the moment
@@ -65,7 +65,7 @@ Close tickets as soon as you can - make sure they are tagged with a release. Po
Try to process pull requests promptly!
Merging pull requests on GitHub itself works quite well now-a-days so you can squash and rebase or rebase pull requests. rclone doesn't use merge commits. Use the squash and rebase option if you need to edit the commit message.
Merging pull requests on GitHub itself works quite well nowadays so you can squash and rebase or rebase pull requests. rclone doesn't use merge commits. Use the squash and rebase option if you need to edit the commit message.
After merging the commit, in your local master branch, do `git pull` then run `bin/update-authors.py` to update the authors file then `git push`.
@@ -81,15 +81,15 @@ Rclone aims for a 6-8 week release cycle. Sometimes release cycles take longer
High impact regressions should be fixed before the next release.
Near the start of the release cycle the dependencies should be updated with `make update` to give time for bugs to surface.
Near the start of the release cycle, the dependencies should be updated with `make update` to give time for bugs to surface.
Towards the end of the release cycle try not to merge anything too big so let things settle down.
Follow the instructions in RELEASE.md for making the release. Note that the testing part is the most time consuming often needing several rounds of test and fix depending on exactly how many new features rclone has gained.
Follow the instructions in RELEASE.md for making the release. Note that the testing part is the most time-consuming often needing several rounds of test and fix depending on exactly how many new features rclone has gained.
## Mailing list ##
There is now an invite only mailing list for rclone developers `rclone-dev` on google groups.
There is now an invite-only mailing list for rclone developers `rclone-dev` on google groups.
## TODO ##

5267
MANUAL.html generated

File diff suppressed because it is too large Load Diff

5734
MANUAL.md generated

File diff suppressed because it is too large Load Diff

7550
MANUAL.txt generated

File diff suppressed because it is too large Load Diff

View File

@@ -104,10 +104,14 @@ showupdates:
@echo "*** Direct dependencies that could be updated ***"
@GO111MODULE=on go list -u -f '{{if (and (not (or .Main .Indirect)) .Update)}}{{.Path}}: {{.Version}} -> {{.Update.Version}}{{end}}' -m all 2> /dev/null
# Update direct dependencies only
updatedirect:
GO111MODULE=on go get -d $$(go list -m -f '{{if not (or .Main .Indirect)}}{{.Path}}{{end}}' all)
GO111MODULE=on go mod tidy
# Update direct and indirect dependencies and test dependencies
update:
GO111MODULE=on go get -u -t ./...
-#GO111MODULE=on go get -d $(go list -m -f '{{if not (or .Main .Indirect)}}{{.Path}}{{end}}' all)
GO111MODULE=on go get -d -u -t ./...
GO111MODULE=on go mod tidy
# Tidy the module dependencies

View File

@@ -1,8 +1,9 @@
[<img src="https://rclone.org/img/logo_on_light__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/)
[<img src="https://rclone.org/img/logo_on_light__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/#gh-light-mode-only)
[<img src="https://rclone.org/img/logo_on_dark__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/#gh-dark-mode-only)
[Website](https://rclone.org) |
[Documentation](https://rclone.org/docs/) |
[Download](https://rclone.org/downloads/) |
[Download](https://rclone.org/downloads/) |
[Contributing](CONTRIBUTING.md) |
[Changelog](https://rclone.org/changelog/) |
[Installation](https://rclone.org/install/) |
@@ -10,24 +11,27 @@
[![Build Status](https://github.com/rclone/rclone/workflows/build/badge.svg)](https://github.com/rclone/rclone/actions?query=workflow%3Abuild)
[![Go Report Card](https://goreportcard.com/badge/github.com/rclone/rclone)](https://goreportcard.com/report/github.com/rclone/rclone)
[![GoDoc](https://godoc.org/github.com/rclone/rclone?status.svg)](https://godoc.org/github.com/rclone/rclone)
[![GoDoc](https://godoc.org/github.com/rclone/rclone?status.svg)](https://godoc.org/github.com/rclone/rclone)
[![Docker Pulls](https://img.shields.io/docker/pulls/rclone/rclone)](https://hub.docker.com/r/rclone/rclone)
# Rclone
Rclone *("rsync for cloud storage")* is a command line program to sync files and directories to and from different cloud storage providers.
Rclone *("rsync for cloud storage")* is a command-line program to sync files and directories to and from different cloud storage providers.
## Storage providers
* 1Fichier [:page_facing_up:](https://rclone.org/fichier/)
* Akamai Netstorage [:page_facing_up:](https://rclone.org/netstorage/)
* Alibaba Cloud (Aliyun) Object Storage System (OSS) [:page_facing_up:](https://rclone.org/s3/#alibaba-oss)
* Amazon Drive [:page_facing_up:](https://rclone.org/amazonclouddrive/) ([See note](https://rclone.org/amazonclouddrive/#status))
* Amazon S3 [:page_facing_up:](https://rclone.org/s3/)
* Backblaze B2 [:page_facing_up:](https://rclone.org/b2/)
* Box [:page_facing_up:](https://rclone.org/box/)
* Ceph [:page_facing_up:](https://rclone.org/s3/#ceph)
* China Mobile Ecloud Elastic Object Storage (EOS) [:page_facing_up:](https://rclone.org/s3/#china-mobile-ecloud-eos)
* Citrix ShareFile [:page_facing_up:](https://rclone.org/sharefile/)
* DigitalOcean Spaces [:page_facing_up:](https://rclone.org/s3/#digitalocean-spaces)
* Digi Storage [:page_facing_up:](https://rclone.org/koofr/#digi-storage)
* Dreamhost [:page_facing_up:](https://rclone.org/s3/#dreamhost)
* Dropbox [:page_facing_up:](https://rclone.org/dropbox/)
* Enterprise File Fabric [:page_facing_up:](https://rclone.org/filefabric/)
@@ -59,20 +63,21 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and
* put.io [:page_facing_up:](https://rclone.org/putio/)
* QingStor [:page_facing_up:](https://rclone.org/qingstor/)
* Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
* RackCorp Object Storage [:page_facing_up:](https://rclone.org/s3/#RackCorp)
* Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)
* Seafile [:page_facing_up:](https://rclone.org/seafile/)
* SeaweedFS [:page_facing_up:](https://rclone.org/s3/#seaweedfs)
* SFTP [:page_facing_up:](https://rclone.org/sftp/)
* StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
* Storj [:page_facing_up:](https://rclone.org/storj/)
* SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)
* Tardigrade [:page_facing_up:](https://rclone.org/tardigrade/)
* Tencent Cloud Object Storage (COS) [:page_facing_up:](https://rclone.org/s3/#tencent-cos)
* Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi)
* WebDAV [:page_facing_up:](https://rclone.org/webdav/)
* Yandex Disk [:page_facing_up:](https://rclone.org/yandex/)
* Zoho WorkDrive [:page_facing_up:](https://rclone.org/zoho/)
* The local filesystem [:page_facing_up:](https://rclone.org/local/)
Please see [the full list of all storage providers and their features](https://rclone.org/overview/)
## Features

View File

@@ -34,13 +34,24 @@ This file describes how to make the various kinds of releases
* make startdev # make startstable for stable branch
* # announce with forum post, twitter post, patreon post
## Update dependencies
Early in the next release cycle update the dependencies
* Review any pinned packages in go.mod and remove if possible
* make update
* git status
* git add new files
* make updatedirect
* make
* git commit -a -v
* make update
* make
* roll back any updates which didn't compile
* git commit -a -v --amend
Note that `make update` updates all direct and indirect dependencies
and there can occasionally be forwards compatibility problems with
doing that so it may be necessary to roll back dependencies to the
version specified by `make updatedirect` in order to get rclone to
build.
## Making a point release

View File

@@ -1 +1 @@
v1.57.0
v1.59.0

View File

@@ -9,6 +9,7 @@ import (
_ "github.com/rclone/rclone/backend/box"
_ "github.com/rclone/rclone/backend/cache"
_ "github.com/rclone/rclone/backend/chunker"
_ "github.com/rclone/rclone/backend/combine"
_ "github.com/rclone/rclone/backend/compress"
_ "github.com/rclone/rclone/backend/crypt"
_ "github.com/rclone/rclone/backend/drive"
@@ -28,6 +29,7 @@ import (
_ "github.com/rclone/rclone/backend/mailru"
_ "github.com/rclone/rclone/backend/mega"
_ "github.com/rclone/rclone/backend/memory"
_ "github.com/rclone/rclone/backend/netstorage"
_ "github.com/rclone/rclone/backend/onedrive"
_ "github.com/rclone/rclone/backend/opendrive"
_ "github.com/rclone/rclone/backend/pcloud"
@@ -39,9 +41,9 @@ import (
_ "github.com/rclone/rclone/backend/sftp"
_ "github.com/rclone/rclone/backend/sharefile"
_ "github.com/rclone/rclone/backend/sia"
_ "github.com/rclone/rclone/backend/storj"
_ "github.com/rclone/rclone/backend/sugarsync"
_ "github.com/rclone/rclone/backend/swift"
_ "github.com/rclone/rclone/backend/tardigrade"
_ "github.com/rclone/rclone/backend/union"
_ "github.com/rclone/rclone/backend/uptobox"
_ "github.com/rclone/rclone/backend/webdav"

View File

@@ -14,6 +14,7 @@ we ignore assets completely!
import (
"context"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
@@ -22,7 +23,6 @@ import (
"time"
acd "github.com/ncw/go-acd"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
@@ -259,7 +259,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
}
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(ctx, name, m, acdConfig, baseClient)
if err != nil {
return nil, errors.Wrap(err, "failed to configure Amazon Drive")
return nil, fmt.Errorf("failed to configure Amazon Drive: %w", err)
}
c := acd.NewClient(oAuthClient)
@@ -292,13 +292,13 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
return f.shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "failed to get endpoints")
return nil, fmt.Errorf("failed to get endpoints: %w", err)
}
// Get rootID
rootInfo, err := f.getRootInfo(ctx)
if err != nil || rootInfo.Id == nil {
return nil, errors.Wrap(err, "failed to get root")
return nil, fmt.Errorf("failed to get root: %w", err)
}
f.trueRootID = *rootInfo.Id

View File

@@ -10,6 +10,7 @@ import (
"encoding/base64"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
@@ -24,7 +25,6 @@ import (
"github.com/Azure/azure-pipeline-go/pipeline"
"github.com/Azure/azure-storage-blob-go/azblob"
"github.com/Azure/go-autorest/autorest/adal"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
@@ -50,8 +50,6 @@ const (
timeFormatOut = "2006-01-02T15:04:05.000000000Z07:00"
storageDefaultBaseURL = "blob.core.windows.net"
defaultChunkSize = 4 * fs.Mebi
maxChunkSize = 100 * fs.Mebi
uploadConcurrency = 4
defaultAccessTier = azblob.AccessTierNone
maxTryTimeout = time.Hour * 24 * 365 //max time of an azure web request response window (whether or not data is flowing)
// Default storage account, key and blob endpoint for emulator support,
@@ -134,12 +132,33 @@ msi_client_id, or msi_mi_res_id parameters.`,
Advanced: true,
}, {
Name: "chunk_size",
Help: `Upload chunk size (<= 100 MiB).
Help: `Upload chunk size.
Note that this is stored in memory and there may be up to
"--transfers" chunks stored at once in memory.`,
"--transfers" * "--azureblob-upload-concurrency" chunks stored at once
in memory.`,
Default: defaultChunkSize,
Advanced: true,
}, {
Name: "upload_concurrency",
Help: `Concurrency for multipart uploads.
This is the number of chunks of the same file that are uploaded
concurrently.
If you are uploading small numbers of large files over high-speed
links and these uploads do not fully utilize your bandwidth, then
increasing this may help to speed up the transfers.
In tests, upload speed increases almost linearly with upload
concurrency. For example to fill a gigabit pipe it may be necessary to
raise this to 64. Note that this will use more memory.
Note that chunks are stored in memory and there may be up to
"--transfers" * "--azureblob-upload-concurrency" chunks stored at once
in memory.`,
Default: 16,
Advanced: true,
}, {
Name: "list_chunk",
Help: `Size of blob list.
@@ -257,6 +276,7 @@ type Options struct {
Endpoint string `config:"endpoint"`
SASURL string `config:"sas_url"`
ChunkSize fs.SizeSuffix `config:"chunk_size"`
UploadConcurrency int `config:"upload_concurrency"`
ListChunkSize uint `config:"list_chunk"`
AccessTier string `config:"access_tier"`
ArchiveTierDelete bool `config:"archive_tier_delete"`
@@ -414,10 +434,7 @@ func (f *Fs) shouldRetry(ctx context.Context, err error) (bool, error) {
func checkUploadChunkSize(cs fs.SizeSuffix) error {
const minChunkSize = fs.SizeSuffixBase
if cs < minChunkSize {
return errors.Errorf("%s is less than %s", cs, minChunkSize)
}
if cs > maxChunkSize {
return errors.Errorf("%s is greater than %s", cs, maxChunkSize)
return fmt.Errorf("%s is less than %s", cs, minChunkSize)
}
return nil
}
@@ -459,11 +476,11 @@ const azureStorageEndpoint = "https://storage.azure.com/"
func newServicePrincipalTokenRefresher(ctx context.Context, credentialsData []byte) (azblob.TokenRefresher, error) {
var spCredentials servicePrincipalCredentials
if err := json.Unmarshal(credentialsData, &spCredentials); err != nil {
return nil, errors.Wrap(err, "error parsing credentials from JSON file")
return nil, fmt.Errorf("error parsing credentials from JSON file: %w", err)
}
oauthConfig, err := adal.NewOAuthConfig(azureActiveDirectoryEndpoint, spCredentials.Tenant)
if err != nil {
return nil, errors.Wrap(err, "error creating oauth config")
return nil, fmt.Errorf("error creating oauth config: %w", err)
}
// Create service principal token for Azure Storage.
@@ -473,7 +490,7 @@ func newServicePrincipalTokenRefresher(ctx context.Context, credentialsData []by
spCredentials.Password,
azureStorageEndpoint)
if err != nil {
return nil, errors.Wrap(err, "error creating service principal token")
return nil, fmt.Errorf("error creating service principal token: %w", err)
}
// Wrap token inside a refresher closure.
@@ -526,10 +543,10 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
err = checkUploadChunkSize(opt.ChunkSize)
if err != nil {
return nil, errors.Wrap(err, "azure: chunk size")
return nil, fmt.Errorf("azure: chunk size: %w", err)
}
if opt.ListChunkSize > maxListChunkSize {
return nil, errors.Errorf("azure: blob list size can't be greater than %v - was %v", maxListChunkSize, opt.ListChunkSize)
return nil, fmt.Errorf("azure: blob list size can't be greater than %v - was %v", maxListChunkSize, opt.ListChunkSize)
}
if opt.Endpoint == "" {
opt.Endpoint = storageDefaultBaseURL
@@ -538,12 +555,12 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
if opt.AccessTier == "" {
opt.AccessTier = string(defaultAccessTier)
} else if !validateAccessTier(opt.AccessTier) {
return nil, errors.Errorf("Azure Blob: Supported access tiers are %s, %s and %s",
return nil, fmt.Errorf("Azure Blob: Supported access tiers are %s, %s and %s",
string(azblob.AccessTierHot), string(azblob.AccessTierCool), string(azblob.AccessTierArchive))
}
if !validatePublicAccess((opt.PublicAccess)) {
return nil, errors.Errorf("Azure Blob: Supported public access level are %s and %s",
return nil, fmt.Errorf("Azure Blob: Supported public access level are %s and %s",
string(azblob.PublicAccessBlob), string(azblob.PublicAccessContainer))
}
@@ -585,17 +602,17 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
case opt.UseEmulator:
credential, err := azblob.NewSharedKeyCredential(emulatorAccount, emulatorAccountKey)
if err != nil {
return nil, errors.Wrapf(err, "Failed to parse credentials")
return nil, fmt.Errorf("Failed to parse credentials: %w", err)
}
u, err = url.Parse(emulatorBlobEndpoint)
if err != nil {
return nil, errors.Wrap(err, "failed to make azure storage url from account and endpoint")
return nil, fmt.Errorf("failed to make azure storage url from account and endpoint: %w", err)
}
pipeline := f.newPipeline(credential, azblob.PipelineOptions{Retry: azblob.RetryOptions{TryTimeout: maxTryTimeout}})
serviceURL = azblob.NewServiceURL(*u, pipeline)
case opt.UseMSI:
var token adal.Token
var userMSI *userMSI = &userMSI{}
var userMSI = &userMSI{}
if len(opt.MSIClientID) > 0 || len(opt.MSIObjectID) > 0 || len(opt.MSIResourceID) > 0 {
// Specifying a user-assigned identity. Exactly one of the above IDs must be specified.
// Validate and ensure exactly one is set. (To do: better validation.)
@@ -631,12 +648,12 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
})
if err != nil {
return nil, errors.Wrapf(err, "Failed to acquire MSI token")
return nil, fmt.Errorf("Failed to acquire MSI token: %w", err)
}
u, err = url.Parse(fmt.Sprintf("https://%s.%s", opt.Account, opt.Endpoint))
if err != nil {
return nil, errors.Wrap(err, "failed to make azure storage url from account and endpoint")
return nil, fmt.Errorf("failed to make azure storage url from account and endpoint: %w", err)
}
credential := azblob.NewTokenCredential(token.AccessToken, func(credential azblob.TokenCredential) time.Duration {
fs.Debugf(f, "Token refresher called.")
@@ -666,19 +683,19 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
case opt.Account != "" && opt.Key != "":
credential, err := azblob.NewSharedKeyCredential(opt.Account, opt.Key)
if err != nil {
return nil, errors.Wrapf(err, "Failed to parse credentials")
return nil, fmt.Errorf("Failed to parse credentials: %w", err)
}
u, err = url.Parse(fmt.Sprintf("https://%s.%s", opt.Account, opt.Endpoint))
if err != nil {
return nil, errors.Wrap(err, "failed to make azure storage url from account and endpoint")
return nil, fmt.Errorf("failed to make azure storage url from account and endpoint: %w", err)
}
pipeline := f.newPipeline(credential, azblob.PipelineOptions{Retry: azblob.RetryOptions{TryTimeout: maxTryTimeout}})
serviceURL = azblob.NewServiceURL(*u, pipeline)
case opt.SASURL != "":
u, err = url.Parse(opt.SASURL)
if err != nil {
return nil, errors.Wrapf(err, "failed to parse SAS URL")
return nil, fmt.Errorf("failed to parse SAS URL: %w", err)
}
// use anonymous credentials in case of sas url
pipeline := f.newPipeline(azblob.NewAnonymousCredential(), azblob.PipelineOptions{Retry: azblob.RetryOptions{TryTimeout: maxTryTimeout}})
@@ -698,17 +715,17 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
// Create a standard URL.
u, err = url.Parse(fmt.Sprintf("https://%s.%s", opt.Account, opt.Endpoint))
if err != nil {
return nil, errors.Wrap(err, "failed to make azure storage url from account and endpoint")
return nil, fmt.Errorf("failed to make azure storage url from account and endpoint: %w", err)
}
// Try loading service principal credentials from file.
loadedCreds, err := ioutil.ReadFile(env.ShellExpand(opt.ServicePrincipalFile))
if err != nil {
return nil, errors.Wrap(err, "error opening service principal credentials file")
return nil, fmt.Errorf("error opening service principal credentials file: %w", err)
}
// Create a token refresher from service principal credentials.
tokenRefresher, err := newServicePrincipalTokenRefresher(ctx, loadedCreds)
if err != nil {
return nil, errors.Wrap(err, "failed to create a service principal token")
return nil, fmt.Errorf("failed to create a service principal token: %w", err)
}
options := azblob.PipelineOptions{Retry: azblob.RetryOptions{TryTimeout: maxTryTimeout}}
pipe := f.newPipeline(azblob.NewTokenCredential("", tokenRefresher), options)
@@ -1324,7 +1341,7 @@ func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
}
data, err := base64.StdEncoding.DecodeString(o.md5)
if err != nil {
return "", errors.Wrapf(err, "Failed to decode Content-MD5: %q", o.md5)
return "", fmt.Errorf("Failed to decode Content-MD5: %q: %w", o.md5, err)
}
return hex.EncodeToString(data), nil
}
@@ -1444,6 +1461,10 @@ func (o *Object) clearMetaData() {
// o.size
// o.md5
func (o *Object) readMetaData() (err error) {
container, _ := o.split()
if !o.fs.containerOK(container) {
return fs.ErrorObjectNotFound
}
if !o.modTime.IsZero() {
return nil
}
@@ -1510,7 +1531,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
var offset int64
var count int64
if o.AccessTier() == azblob.AccessTierArchive {
return nil, errors.Errorf("Blob in archive tier, you need to set tier to hot or cool first")
return nil, fmt.Errorf("Blob in archive tier, you need to set tier to hot or cool first")
}
fs.FixRangeOption(options, o.size)
for _, option := range options {
@@ -1536,11 +1557,11 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
return o.fs.shouldRetry(ctx, err)
})
if err != nil {
return nil, errors.Wrap(err, "failed to open for download")
return nil, fmt.Errorf("failed to open for download: %w", err)
}
err = o.decodeMetaDataFromDownloadResponse(downloadResponse)
if err != nil {
return nil, errors.Wrap(err, "failed to decode metadata for download")
return nil, fmt.Errorf("failed to decode metadata for download: %w", err)
}
in = downloadResponse.Body(azblob.RetryReaderOptions{})
return in, nil
@@ -1630,13 +1651,16 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
fs.Debugf(o, "deleting archive tier blob before updating")
err = o.Remove(ctx)
if err != nil {
return errors.Wrap(err, "failed to delete archive blob before updating")
return fmt.Errorf("failed to delete archive blob before updating: %w", err)
}
} else {
return errCantUpdateArchiveTierBlobs
}
}
container, _ := o.split()
container, containerPath := o.split()
if container == "" || containerPath == "" {
return fmt.Errorf("can't upload to root - need a container")
}
err = o.fs.makeContainer(ctx, container)
if err != nil {
return err
@@ -1667,10 +1691,10 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
putBlobOptions := azblob.UploadStreamToBlockBlobOptions{
BufferSize: int(o.fs.opt.ChunkSize),
MaxBuffers: uploadConcurrency,
MaxBuffers: o.fs.opt.UploadConcurrency,
Metadata: o.meta,
BlobHTTPHeaders: httpHeaders,
TransferManager: o.fs.newPoolWrapper(uploadConcurrency),
TransferManager: o.fs.newPoolWrapper(o.fs.opt.UploadConcurrency),
}
// Don't retry, return a retry error instead
@@ -1723,7 +1747,7 @@ func (o *Object) AccessTier() azblob.AccessTierType {
// SetTier performs changing object tier
func (o *Object) SetTier(tier string) error {
if !validateAccessTier(tier) {
return errors.Errorf("Tier %s not supported by Azure Blob Storage", tier)
return fmt.Errorf("Tier %s not supported by Azure Blob Storage", tier)
}
// Check if current tier already matches with desired tier
@@ -1739,7 +1763,7 @@ func (o *Object) SetTier(tier string) error {
})
if err != nil {
return errors.Wrap(err, "Failed to set Blob Tier")
return fmt.Errorf("Failed to set Blob Tier: %w", err)
}
// Set access tier on local object also, this typically

View File

@@ -17,12 +17,10 @@ import (
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestAzureBlob:",
NilObject: (*Object)(nil),
TiersToTest: []string{"Hot", "Cool"},
ChunkedUpload: fstests.ChunkedUploadConfig{
MaxChunkSize: maxChunkSize,
},
RemoteName: "TestAzureBlob:",
NilObject: (*Object)(nil),
TiersToTest: []string{"Hot", "Cool"},
ChunkedUpload: fstests.ChunkedUploadConfig{},
})
}

View File

@@ -13,7 +13,6 @@ import (
"net/http"
"github.com/Azure/go-autorest/autorest/adal"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/fshttp"
)
@@ -95,7 +94,7 @@ func GetMSIToken(ctx context.Context, identity *userMSI) (adal.Token, error) {
httpClient := fshttp.NewClient(ctx)
resp, err := httpClient.Do(req)
if err != nil {
return result, errors.Wrap(err, "MSI is not enabled on this VM")
return result, fmt.Errorf("MSI is not enabled on this VM: %w", err)
}
defer func() { // resp and Body should not be nil
_, err = io.Copy(ioutil.Discard, resp.Body)
@@ -120,7 +119,7 @@ func GetMSIToken(ctx context.Context, identity *userMSI) (adal.Token, error) {
b, err := ioutil.ReadAll(resp.Body)
if err != nil {
return result, errors.Wrap(err, "Couldn't read IMDS response")
return result, fmt.Errorf("Couldn't read IMDS response: %w", err)
}
// Remove BOM, if any. azcopy does this so I'm following along.
b = bytes.TrimPrefix(b, []byte("\xef\xbb\xbf"))
@@ -131,7 +130,7 @@ func GetMSIToken(ctx context.Context, identity *userMSI) (adal.Token, error) {
// storage API call.
err = json.Unmarshal(b, &result)
if err != nil {
return result, errors.Wrap(err, "Couldn't unmarshal IMDS response")
return result, fmt.Errorf("Couldn't unmarshal IMDS response: %w", err)
}
return result, nil

View File

@@ -9,6 +9,7 @@ import (
"bytes"
"context"
"crypto/sha1"
"errors"
"fmt"
gohash "hash"
"io"
@@ -19,7 +20,6 @@ import (
"sync"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/backend/b2/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
@@ -160,7 +160,15 @@ free egress for data downloaded through the Cloudflare network.
Rclone works with private buckets by sending an "Authorization" header.
If the custom endpoint rewrites the requests for authentication,
e.g., in Cloudflare Workers, this header needs to be handled properly.
Leave blank if you want to use the endpoint provided by Backblaze.`,
Leave blank if you want to use the endpoint provided by Backblaze.
The URL provided here SHOULD have the protocol and SHOULD NOT have
a trailing slash or specify the /file/bucket subpath as rclone will
request files with "{download_url}/file/{bucket_name}/{path}".
Example:
> https://mysubdomain.mydomain.tld
(No trailing "/", "file" or "bucket")`,
Advanced: true,
}, {
Name: "download_auth_duration",
@@ -366,7 +374,7 @@ func errorHandler(resp *http.Response) error {
func checkUploadChunkSize(cs fs.SizeSuffix) error {
if cs < minChunkSize {
return errors.Errorf("%s is less than %s", cs, minChunkSize)
return fmt.Errorf("%s is less than %s", cs, minChunkSize)
}
return nil
}
@@ -381,7 +389,7 @@ func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error)
func checkUploadCutoff(opt *Options, cs fs.SizeSuffix) error {
if cs < opt.ChunkSize {
return errors.Errorf("%v is less than chunk size %v", cs, opt.ChunkSize)
return fmt.Errorf("%v is less than chunk size %v", cs, opt.ChunkSize)
}
return nil
}
@@ -414,11 +422,11 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
}
err = checkUploadCutoff(opt, opt.UploadCutoff)
if err != nil {
return nil, errors.Wrap(err, "b2: upload cutoff")
return nil, fmt.Errorf("b2: upload cutoff: %w", err)
}
err = checkUploadChunkSize(opt.ChunkSize)
if err != nil {
return nil, errors.Wrap(err, "b2: chunk size")
return nil, fmt.Errorf("b2: chunk size: %w", err)
}
if opt.Account == "" {
return nil, errors.New("account not found")
@@ -463,7 +471,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
}
err = f.authorizeAccount(ctx)
if err != nil {
return nil, errors.Wrap(err, "failed to authorize account")
return nil, fmt.Errorf("failed to authorize account: %w", err)
}
// If this is a key limited to a single bucket, it must exist already
if f.rootBucket != "" && f.info.Allowed.BucketID != "" {
@@ -472,7 +480,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
return nil, errors.New("bucket that application key is restricted to no longer exists")
}
if allowedBucket != f.rootBucket {
return nil, errors.Errorf("you must use bucket %q with this application key", allowedBucket)
return nil, fmt.Errorf("you must use bucket %q with this application key", allowedBucket)
}
f.cache.MarkOK(f.rootBucket)
f.setBucketID(f.rootBucket, f.info.Allowed.BucketID)
@@ -512,7 +520,7 @@ func (f *Fs) authorizeAccount(ctx context.Context) error {
return f.shouldRetryNoReauth(ctx, resp, err)
})
if err != nil {
return errors.Wrap(err, "failed to authenticate")
return fmt.Errorf("failed to authenticate: %w", err)
}
f.srv.SetRoot(f.info.APIURL+"/b2api/v1").SetHeader("Authorization", f.info.AuthorizationToken)
return nil
@@ -558,7 +566,7 @@ func (f *Fs) getUploadURL(ctx context.Context, bucket string) (upload *api.GetUp
return f.shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "failed to get upload URL")
return nil, fmt.Errorf("failed to get upload URL: %w", err)
}
return upload, nil
}
@@ -1048,7 +1056,7 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) error {
}
}
}
return errors.Wrap(err, "failed to create bucket")
return fmt.Errorf("failed to create bucket: %w", err)
}
f.setBucketID(bucket, response.ID)
f.setBucketType(bucket, response.Type)
@@ -1083,7 +1091,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
return f.shouldRetry(ctx, resp, err)
})
if err != nil {
return errors.Wrap(err, "failed to delete bucket")
return fmt.Errorf("failed to delete bucket: %w", err)
}
f.clearBucketID(bucket)
f.clearBucketType(bucket)
@@ -1124,7 +1132,7 @@ func (f *Fs) hide(ctx context.Context, bucket, bucketPath string) error {
return nil
}
}
return errors.Wrapf(err, "failed to hide %q", bucketPath)
return fmt.Errorf("failed to hide %q: %w", bucketPath, err)
}
return nil
}
@@ -1145,7 +1153,7 @@ func (f *Fs) deleteByID(ctx context.Context, ID, Name string) error {
return f.shouldRetry(ctx, resp, err)
})
if err != nil {
return errors.Wrapf(err, "failed to delete %q", Name)
return fmt.Errorf("failed to delete %q: %w", Name, err)
}
return nil
}
@@ -1364,7 +1372,7 @@ func (f *Fs) getDownloadAuthorization(ctx context.Context, bucket, remote string
return f.shouldRetry(ctx, resp, err)
})
if err != nil {
return "", errors.Wrap(err, "failed to get download authorization")
return "", fmt.Errorf("failed to get download authorization: %w", err)
}
return response.AuthorizationToken, nil
}
@@ -1669,14 +1677,14 @@ func (file *openFile) Close() (err error) {
// Check to see we read the correct number of bytes
if file.o.Size() != file.bytes {
return errors.Errorf("object corrupted on transfer - length mismatch (want %d got %d)", file.o.Size(), file.bytes)
return fmt.Errorf("object corrupted on transfer - length mismatch (want %d got %d)", file.o.Size(), file.bytes)
}
// Check the SHA1
receivedSHA1 := file.o.sha1
calculatedSHA1 := fmt.Sprintf("%x", file.hash.Sum(nil))
if receivedSHA1 != "" && receivedSHA1 != calculatedSHA1 {
return errors.Errorf("object corrupted on transfer - SHA1 mismatch (want %q got %q)", receivedSHA1, calculatedSHA1)
return fmt.Errorf("object corrupted on transfer - SHA1 mismatch (want %q got %q)", receivedSHA1, calculatedSHA1)
}
return nil
@@ -1716,7 +1724,7 @@ func (o *Object) getOrHead(ctx context.Context, method string, options []fs.Open
if resp != nil && (resp.StatusCode == http.StatusNotFound || resp.StatusCode == http.StatusBadRequest) {
return nil, nil, fs.ErrorObjectNotFound
}
return nil, nil, errors.Wrapf(err, "failed to %s for download", method)
return nil, nil, fmt.Errorf("failed to %s for download: %w", method, err)
}
// NB resp may be Open here - don't return err != nil without closing

View File

@@ -15,7 +15,6 @@ import (
"strings"
"sync"
"github.com/pkg/errors"
"github.com/rclone/rclone/backend/b2/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
@@ -102,7 +101,7 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
parts++
}
if parts > maxParts {
return nil, errors.Errorf("%q too big (%d bytes) makes too many parts %d > %d - increase --b2-chunk-size", remote, size, parts, maxParts)
return nil, fmt.Errorf("%q too big (%d bytes) makes too many parts %d > %d - increase --b2-chunk-size", remote, size, parts, maxParts)
}
sha1SliceSize = parts
}
@@ -185,7 +184,7 @@ func (up *largeUpload) getUploadURL(ctx context.Context) (upload *api.GetUploadP
return up.f.shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "failed to get upload URL")
return nil, fmt.Errorf("failed to get upload URL: %w", err)
}
} else {
upload, up.uploads = up.uploads[0], up.uploads[1:]
@@ -406,7 +405,7 @@ func (up *largeUpload) Stream(ctx context.Context, initialUploadBlock []byte) (e
up.size += int64(n)
if part > maxParts {
up.f.putBuf(buf, false)
return errors.Errorf("%q too big (%d bytes so far) makes too many parts %d > %d - increase --b2-chunk-size", up.o, up.size, up.parts, maxParts)
return fmt.Errorf("%q too big (%d bytes so far) makes too many parts %d > %d - increase --b2-chunk-size", up.o, up.size, up.parts, maxParts)
}
part := part // for the closure

View File

@@ -14,6 +14,7 @@ import (
"crypto/rsa"
"encoding/json"
"encoding/pem"
"errors"
"fmt"
"io"
"io/ioutil"
@@ -26,13 +27,6 @@ import (
"sync/atomic"
"time"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/env"
"github.com/rclone/rclone/lib/jwtutil"
"github.com/youmark/pkcs8"
"github.com/pkg/errors"
"github.com/rclone/rclone/backend/box/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
@@ -43,9 +37,13 @@ import (
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/dircache"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/env"
"github.com/rclone/rclone/lib/jwtutil"
"github.com/rclone/rclone/lib/oauthutil"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/rest"
"github.com/youmark/pkcs8"
"golang.org/x/oauth2"
"golang.org/x/oauth2/jws"
)
@@ -93,7 +91,7 @@ func init() {
if ok && boxSubTypeOk && jsonFile != "" && boxSubType != "" {
err = refreshJWTToken(ctx, jsonFile, boxSubType, name, m)
if err != nil {
return nil, errors.Wrap(err, "failed to configure token with jwt authentication")
return nil, fmt.Errorf("failed to configure token with jwt authentication: %w", err)
}
// Else, if not using an access token, use oauth2
} else if boxAccessToken == "" || !boxAccessTokenOk {
@@ -167,15 +165,15 @@ func refreshJWTToken(ctx context.Context, jsonFile string, boxSubType string, na
jsonFile = env.ShellExpand(jsonFile)
boxConfig, err := getBoxConfig(jsonFile)
if err != nil {
return errors.Wrap(err, "get box config")
return fmt.Errorf("get box config: %w", err)
}
privateKey, err := getDecryptedPrivateKey(boxConfig)
if err != nil {
return errors.Wrap(err, "get decrypted private key")
return fmt.Errorf("get decrypted private key: %w", err)
}
claims, err := getClaims(boxConfig, boxSubType)
if err != nil {
return errors.Wrap(err, "get claims")
return fmt.Errorf("get claims: %w", err)
}
signingHeaders := getSigningHeaders(boxConfig)
queryParams := getQueryParams(boxConfig)
@@ -187,11 +185,11 @@ func refreshJWTToken(ctx context.Context, jsonFile string, boxSubType string, na
func getBoxConfig(configFile string) (boxConfig *api.ConfigJSON, err error) {
file, err := ioutil.ReadFile(configFile)
if err != nil {
return nil, errors.Wrap(err, "box: failed to read Box config")
return nil, fmt.Errorf("box: failed to read Box config: %w", err)
}
err = json.Unmarshal(file, &boxConfig)
if err != nil {
return nil, errors.Wrap(err, "box: failed to parse Box config")
return nil, fmt.Errorf("box: failed to parse Box config: %w", err)
}
return boxConfig, nil
}
@@ -199,7 +197,7 @@ func getBoxConfig(configFile string) (boxConfig *api.ConfigJSON, err error) {
func getClaims(boxConfig *api.ConfigJSON, boxSubType string) (claims *jws.ClaimSet, err error) {
val, err := jwtutil.RandomHex(20)
if err != nil {
return nil, errors.Wrap(err, "box: failed to generate random string for jti")
return nil, fmt.Errorf("box: failed to generate random string for jti: %w", err)
}
claims = &jws.ClaimSet{
@@ -240,12 +238,12 @@ func getDecryptedPrivateKey(boxConfig *api.ConfigJSON) (key *rsa.PrivateKey, err
block, rest := pem.Decode([]byte(boxConfig.BoxAppSettings.AppAuth.PrivateKey))
if len(rest) > 0 {
return nil, errors.Wrap(err, "box: extra data included in private key")
return nil, fmt.Errorf("box: extra data included in private key: %w", err)
}
rsaKey, err := pkcs8.ParsePKCS8PrivateKey(block.Bytes, []byte(boxConfig.BoxAppSettings.AppAuth.Passphrase))
if err != nil {
return nil, errors.Wrap(err, "box: failed to decrypt private key")
return nil, fmt.Errorf("box: failed to decrypt private key: %w", err)
}
return rsaKey.(*rsa.PrivateKey), nil
@@ -403,7 +401,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
}
if opt.UploadCutoff < minUploadCutoff {
return nil, errors.Errorf("box: upload cutoff (%v) must be greater than equal to %v", opt.UploadCutoff, fs.SizeSuffix(minUploadCutoff))
return nil, fmt.Errorf("box: upload cutoff (%v) must be greater than equal to %v", opt.UploadCutoff, fs.SizeSuffix(minUploadCutoff))
}
root = parsePath(root)
@@ -414,7 +412,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
if opt.AccessToken == "" {
client, ts, err = oauthutil.NewClient(ctx, name, m, oauthConfig)
if err != nil {
return nil, errors.Wrap(err, "failed to configure Box")
return nil, fmt.Errorf("failed to configure Box: %w", err)
}
}
@@ -613,7 +611,7 @@ OUTER:
return shouldRetry(ctx, resp, err)
})
if err != nil {
return found, errors.Wrap(err, "couldn't list files")
return found, fmt.Errorf("couldn't list files: %w", err)
}
for i := range result.Entries {
item := &result.Entries[i]
@@ -740,14 +738,14 @@ func (f *Fs) preUploadCheck(ctx context.Context, leaf, directoryID string, size
var conflict api.PreUploadCheckConflict
err = json.Unmarshal(apiErr.ContextInfo, &conflict)
if err != nil {
return "", errors.Wrap(err, "pre-upload check: JSON decode failed")
return "", fmt.Errorf("pre-upload check: JSON decode failed: %w", err)
}
if conflict.Conflicts.Type != api.ItemTypeFile {
return "", errors.Wrap(err, "pre-upload check: can't overwrite non file with file")
return "", fmt.Errorf("pre-upload check: can't overwrite non file with file: %w", err)
}
return conflict.Conflicts.ID, nil
}
return "", errors.Wrap(err, "pre-upload check")
return "", fmt.Errorf("pre-upload check: %w", err)
}
return "", nil
}
@@ -856,7 +854,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
return shouldRetry(ctx, resp, err)
})
if err != nil {
return errors.Wrap(err, "rmdir failed")
return fmt.Errorf("rmdir failed: %w", err)
}
f.dirCache.FlushDir(dir)
if err != nil {
@@ -900,7 +898,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
srcPath := srcObj.fs.rootSlash() + srcObj.remote
dstPath := f.rootSlash() + remote
if strings.ToLower(srcPath) == strings.ToLower(dstPath) {
return nil, errors.Errorf("can't copy %q -> %q as are same name when lowercase", srcPath, dstPath)
return nil, fmt.Errorf("can't copy %q -> %q as are same name when lowercase", srcPath, dstPath)
}
// Create temporary object
@@ -984,7 +982,7 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "failed to read user info")
return nil, fmt.Errorf("failed to read user info: %w", err)
}
// FIXME max upload size would be useful to use in Update
usage = &fs.Usage{
@@ -1145,7 +1143,7 @@ func (f *Fs) CleanUp(ctx context.Context) (err error) {
})
wg.Wait()
if deleteErrors != 0 {
return errors.Errorf("failed to delete %d trash items", deleteErrors)
return fmt.Errorf("failed to delete %d trash items", deleteErrors)
}
return err
}
@@ -1205,7 +1203,7 @@ func (o *Object) setMetaData(info *api.Item) (err error) {
return fs.ErrorIsDir
}
if info.Type != api.ItemTypeFile {
return errors.Wrapf(fs.ErrorNotAFile, "%q is %q", o.remote, info.Type)
return fmt.Errorf("%q is %q: %w", o.remote, info.Type, fs.ErrorNotAFile)
}
o.hasMetaData = true
o.size = int64(info.Size)
@@ -1341,7 +1339,7 @@ func (o *Object) upload(ctx context.Context, in io.Reader, leaf, directoryID str
return err
}
if result.TotalCount != 1 || len(result.Entries) != 1 {
return errors.Errorf("failed to upload %v - not sure why", o)
return fmt.Errorf("failed to upload %v - not sure why", o)
}
return o.setMetaData(&result.Entries[0])
}

View File

@@ -8,6 +8,7 @@ import (
"crypto/sha1"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
@@ -15,7 +16,6 @@ import (
"sync"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/backend/box/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
@@ -140,7 +140,7 @@ outer:
}
}
default:
return nil, errors.Errorf("unknown HTTP status return %q (%d)", resp.Status, resp.StatusCode)
return nil, fmt.Errorf("unknown HTTP status return %q (%d)", resp.Status, resp.StatusCode)
}
}
fs.Debugf(o, "commit multipart upload failed %d/%d - trying again in %d seconds (%s)", tries+1, maxTries, delay, why)
@@ -151,7 +151,7 @@ outer:
}
err = json.Unmarshal(body, &result)
if err != nil {
return nil, errors.Wrapf(err, "couldn't decode commit response: %q", body)
return nil, fmt.Errorf("couldn't decode commit response: %q: %w", body, err)
}
return result, nil
}
@@ -177,7 +177,7 @@ func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, leaf, direct
// Create upload session
session, err := o.createUploadSession(ctx, leaf, directoryID, size)
if err != nil {
return errors.Wrap(err, "multipart upload create session failed")
return fmt.Errorf("multipart upload create session failed: %w", err)
}
chunkSize := session.PartSize
fs.Debugf(o, "Multipart upload session started for %d parts of size %v", session.TotalParts, fs.SizeSuffix(chunkSize))
@@ -222,7 +222,7 @@ outer:
// Read the chunk
_, err = io.ReadFull(in, buf)
if err != nil {
err = errors.Wrap(err, "multipart upload failed to read source")
err = fmt.Errorf("multipart upload failed to read source: %w", err)
break outer
}
@@ -238,7 +238,7 @@ outer:
fs.Debugf(o, "Uploading part %d/%d offset %v/%v part size %v", part+1, session.TotalParts, fs.SizeSuffix(position), fs.SizeSuffix(size), fs.SizeSuffix(chunkSize))
partResponse, err := o.uploadPart(ctx, session.ID, position, size, buf, wrap, options...)
if err != nil {
err = errors.Wrap(err, "multipart upload failed to upload part")
err = fmt.Errorf("multipart upload failed to upload part: %w", err)
select {
case errs <- err:
default:
@@ -266,11 +266,11 @@ outer:
// Finalise the upload session
result, err := o.commitUpload(ctx, session.ID, parts, modTime, hash.Sum(nil))
if err != nil {
return errors.Wrap(err, "multipart upload failed to finalize")
return fmt.Errorf("multipart upload failed to finalize: %w", err)
}
if result.TotalCount != 1 || len(result.Entries) != 1 {
return errors.Errorf("multipart upload failed %v - not sure why", o)
return fmt.Errorf("multipart upload failed %v - not sure why", o)
}
return o.setMetaData(&result.Entries[0])
}

View File

@@ -5,6 +5,7 @@ package cache
import (
"context"
"errors"
"fmt"
"io"
"math"
@@ -19,7 +20,6 @@ import (
"syscall"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/backend/crypt"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/cache"
@@ -356,7 +356,7 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
return nil, err
}
if opt.ChunkTotalSize < opt.ChunkSize*fs.SizeSuffix(opt.TotalWorkers) {
return nil, errors.Errorf("don't set cache-chunk-total-size(%v) less than cache-chunk-size(%v) * cache-workers(%v)",
return nil, fmt.Errorf("don't set cache-chunk-total-size(%v) less than cache-chunk-size(%v) * cache-workers(%v)",
opt.ChunkTotalSize, opt.ChunkSize, opt.TotalWorkers)
}
@@ -366,13 +366,13 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
rpath, err := parseRootPath(rootPath)
if err != nil {
return nil, errors.Wrapf(err, "failed to clean root path %q", rootPath)
return nil, fmt.Errorf("failed to clean root path %q: %w", rootPath, err)
}
remotePath := fspath.JoinRootPath(opt.Remote, rootPath)
wrappedFs, wrapErr := cache.Get(ctx, remotePath)
if wrapErr != nil && wrapErr != fs.ErrorIsFile {
return nil, errors.Wrapf(wrapErr, "failed to make remote %q to wrap", remotePath)
return nil, fmt.Errorf("failed to make remote %q to wrap: %w", remotePath, wrapErr)
}
var fsErr error
fs.Debugf(name, "wrapped %v:%v at root %v", wrappedFs.Name(), wrappedFs.Root(), rpath)
@@ -394,14 +394,18 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
notifiedRemotes: make(map[string]bool),
}
cache.PinUntilFinalized(f.Fs, f)
f.rateLimiter = rate.NewLimiter(rate.Limit(float64(opt.Rps)), opt.TotalWorkers)
rps := rate.Inf
if opt.Rps > 0 {
rps = rate.Limit(float64(opt.Rps))
}
f.rateLimiter = rate.NewLimiter(rps, opt.TotalWorkers)
f.plexConnector = &plexConnector{}
if opt.PlexURL != "" {
if opt.PlexToken != "" {
f.plexConnector, err = newPlexConnectorWithToken(f, opt.PlexURL, opt.PlexToken, opt.PlexInsecure)
if err != nil {
return nil, errors.Wrapf(err, "failed to connect to the Plex API %v", opt.PlexURL)
return nil, fmt.Errorf("failed to connect to the Plex API %v: %w", opt.PlexURL, err)
}
} else {
if opt.PlexPassword != "" && opt.PlexUsername != "" {
@@ -413,7 +417,7 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
m.Set("plex_token", token)
})
if err != nil {
return nil, errors.Wrapf(err, "failed to connect to the Plex API %v", opt.PlexURL)
return nil, fmt.Errorf("failed to connect to the Plex API %v: %w", opt.PlexURL, err)
}
}
}
@@ -434,11 +438,11 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
}
err = os.MkdirAll(dbPath, os.ModePerm)
if err != nil {
return nil, errors.Wrapf(err, "failed to create cache directory %v", dbPath)
return nil, fmt.Errorf("failed to create cache directory %v: %w", dbPath, err)
}
err = os.MkdirAll(chunkPath, os.ModePerm)
if err != nil {
return nil, errors.Wrapf(err, "failed to create cache directory %v", chunkPath)
return nil, fmt.Errorf("failed to create cache directory %v: %w", chunkPath, err)
}
dbPath = filepath.Join(dbPath, name+".db")
@@ -450,7 +454,7 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
DbWaitTime: time.Duration(opt.DbWaitTime),
})
if err != nil {
return nil, errors.Wrapf(err, "failed to start cache db")
return nil, fmt.Errorf("failed to start cache db: %w", err)
}
// Trap SIGINT and SIGTERM to close the DB handle gracefully
c := make(chan os.Signal, 1)
@@ -484,12 +488,12 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
if f.opt.TempWritePath != "" {
err = os.MkdirAll(f.opt.TempWritePath, os.ModePerm)
if err != nil {
return nil, errors.Wrapf(err, "failed to create cache directory %v", f.opt.TempWritePath)
return nil, fmt.Errorf("failed to create cache directory %v: %w", f.opt.TempWritePath, err)
}
f.opt.TempWritePath = filepath.ToSlash(f.opt.TempWritePath)
f.tempFs, err = cache.Get(ctx, f.opt.TempWritePath)
if err != nil {
return nil, errors.Wrapf(err, "failed to create temp fs: %v", err)
return nil, fmt.Errorf("failed to create temp fs: %w", err)
}
fs.Infof(name, "Upload Temp Rest Time: %v", f.opt.TempWaitTime)
fs.Infof(name, "Upload Temp FS: %v", f.opt.TempWritePath)
@@ -606,7 +610,7 @@ func (f *Fs) httpStats(ctx context.Context, in rc.Params) (out rc.Params, err er
out = make(rc.Params)
m, err := f.Stats()
if err != nil {
return out, errors.Errorf("error while getting cache stats")
return out, fmt.Errorf("error while getting cache stats")
}
out["status"] = "ok"
out["stats"] = m
@@ -633,7 +637,7 @@ func (f *Fs) httpExpireRemote(ctx context.Context, in rc.Params) (out rc.Params,
out = make(rc.Params)
remoteInt, ok := in["remote"]
if !ok {
return out, errors.Errorf("remote is needed")
return out, fmt.Errorf("remote is needed")
}
remote := remoteInt.(string)
withData := false
@@ -644,7 +648,7 @@ func (f *Fs) httpExpireRemote(ctx context.Context, in rc.Params) (out rc.Params,
remote = f.unwrapRemote(remote)
if !f.cache.HasEntry(path.Join(f.Root(), remote)) {
return out, errors.Errorf("%s doesn't exist in cache", remote)
return out, fmt.Errorf("%s doesn't exist in cache", remote)
}
co := NewObject(f, remote)
@@ -653,7 +657,7 @@ func (f *Fs) httpExpireRemote(ctx context.Context, in rc.Params) (out rc.Params,
cd := NewDirectory(f, remote)
err := f.cache.ExpireDir(cd)
if err != nil {
return out, errors.WithMessage(err, "error expiring directory")
return out, fmt.Errorf("error expiring directory: %w", err)
}
// notify vfs too
f.notifyChangeUpstream(cd.Remote(), fs.EntryDirectory)
@@ -664,7 +668,7 @@ func (f *Fs) httpExpireRemote(ctx context.Context, in rc.Params) (out rc.Params,
// expire the entry
err = f.cache.ExpireObject(co, withData)
if err != nil {
return out, errors.WithMessage(err, "error expiring file")
return out, fmt.Errorf("error expiring file: %w", err)
}
// notify vfs too
f.notifyChangeUpstream(co.Remote(), fs.EntryObject)
@@ -685,24 +689,24 @@ func (f *Fs) rcFetch(ctx context.Context, in rc.Params) (rc.Params, error) {
case 1:
start, err = strconv.ParseInt(ints[0], 10, 64)
if err != nil {
return nil, errors.Errorf("invalid range: %q", part)
return nil, fmt.Errorf("invalid range: %q", part)
}
end = start + 1
case 2:
if ints[0] != "" {
start, err = strconv.ParseInt(ints[0], 10, 64)
if err != nil {
return nil, errors.Errorf("invalid range: %q", part)
return nil, fmt.Errorf("invalid range: %q", part)
}
}
if ints[1] != "" {
end, err = strconv.ParseInt(ints[1], 10, 64)
if err != nil {
return nil, errors.Errorf("invalid range: %q", part)
return nil, fmt.Errorf("invalid range: %q", part)
}
}
default:
return nil, errors.Errorf("invalid range: %q", part)
return nil, fmt.Errorf("invalid range: %q", part)
}
crs = append(crs, chunkRange{start: start, end: end})
}
@@ -757,18 +761,18 @@ func (f *Fs) rcFetch(ctx context.Context, in rc.Params) (rc.Params, error) {
delete(in, "chunks")
crs, err := parseChunks(s)
if err != nil {
return nil, errors.Wrap(err, "invalid chunks parameter")
return nil, fmt.Errorf("invalid chunks parameter: %w", err)
}
var files [][2]string
for k, v := range in {
if !strings.HasPrefix(k, "file") {
return nil, errors.Errorf("invalid parameter %s=%s", k, v)
return nil, fmt.Errorf("invalid parameter %s=%s", k, v)
}
switch v := v.(type) {
case string:
files = append(files, [2]string{v, f.unwrapRemote(v)})
default:
return nil, errors.Errorf("invalid parameter %s=%s", k, v)
return nil, fmt.Errorf("invalid parameter %s=%s", k, v)
}
}
type fileStatus struct {
@@ -1124,7 +1128,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
case fs.Directory:
_ = f.cache.AddDir(DirectoryFromOriginal(ctx, f, o))
default:
return errors.Errorf("Unknown object type %T", entry)
return fmt.Errorf("Unknown object type %T", entry)
}
}

View File

@@ -7,6 +7,7 @@ import (
"bytes"
"context"
"encoding/base64"
"errors"
goflag "flag"
"fmt"
"io"
@@ -22,7 +23,6 @@ import (
"testing"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/backend/cache"
"github.com/rclone/rclone/backend/crypt"
_ "github.com/rclone/rclone/backend/drive"
@@ -446,7 +446,7 @@ func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
return err
}
if coSize != expectedSize {
return errors.Errorf("%v <> %v", coSize, expectedSize)
return fmt.Errorf("%v <> %v", coSize, expectedSize)
}
return nil
}, 12, time.Second*10)
@@ -502,7 +502,7 @@ func TestInternalMoveWithNotify(t *testing.T) {
}
if len(li) != 2 {
log.Printf("not expected listing /test: %v", li)
return errors.Errorf("not expected listing /test: %v", li)
return fmt.Errorf("not expected listing /test: %v", li)
}
li, err = runInstance.list(t, rootFs, "test/one")
@@ -512,7 +512,7 @@ func TestInternalMoveWithNotify(t *testing.T) {
}
if len(li) != 0 {
log.Printf("not expected listing /test/one: %v", li)
return errors.Errorf("not expected listing /test/one: %v", li)
return fmt.Errorf("not expected listing /test/one: %v", li)
}
li, err = runInstance.list(t, rootFs, "test/second")
@@ -522,21 +522,21 @@ func TestInternalMoveWithNotify(t *testing.T) {
}
if len(li) != 1 {
log.Printf("not expected listing /test/second: %v", li)
return errors.Errorf("not expected listing /test/second: %v", li)
return fmt.Errorf("not expected listing /test/second: %v", li)
}
if fi, ok := li[0].(os.FileInfo); ok {
if fi.Name() != "data.bin" {
log.Printf("not expected name: %v", fi.Name())
return errors.Errorf("not expected name: %v", fi.Name())
return fmt.Errorf("not expected name: %v", fi.Name())
}
} else if di, ok := li[0].(fs.DirEntry); ok {
if di.Remote() != "test/second/data.bin" {
log.Printf("not expected remote: %v", di.Remote())
return errors.Errorf("not expected remote: %v", di.Remote())
return fmt.Errorf("not expected remote: %v", di.Remote())
}
} else {
log.Printf("unexpected listing: %v", li)
return errors.Errorf("unexpected listing: %v", li)
return fmt.Errorf("unexpected listing: %v", li)
}
log.Printf("complete listing: %v", li)
@@ -591,17 +591,17 @@ func TestInternalNotifyCreatesEmptyParts(t *testing.T) {
found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test")))
if !found {
log.Printf("not found /test")
return errors.Errorf("not found /test")
return fmt.Errorf("not found /test")
}
found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test"), runInstance.encryptRemoteIfNeeded(t, "one")))
if !found {
log.Printf("not found /test/one")
return errors.Errorf("not found /test/one")
return fmt.Errorf("not found /test/one")
}
found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test"), runInstance.encryptRemoteIfNeeded(t, "one"), runInstance.encryptRemoteIfNeeded(t, "test2")))
if !found {
log.Printf("not found /test/one/test2")
return errors.Errorf("not found /test/one/test2")
return fmt.Errorf("not found /test/one/test2")
}
li, err := runInstance.list(t, rootFs, "test/one")
if err != nil {
@@ -610,21 +610,21 @@ func TestInternalNotifyCreatesEmptyParts(t *testing.T) {
}
if len(li) != 1 {
log.Printf("not expected listing /test/one: %v", li)
return errors.Errorf("not expected listing /test/one: %v", li)
return fmt.Errorf("not expected listing /test/one: %v", li)
}
if fi, ok := li[0].(os.FileInfo); ok {
if fi.Name() != "test2" {
log.Printf("not expected name: %v", fi.Name())
return errors.Errorf("not expected name: %v", fi.Name())
return fmt.Errorf("not expected name: %v", fi.Name())
}
} else if di, ok := li[0].(fs.DirEntry); ok {
if di.Remote() != "test/one/test2" {
log.Printf("not expected remote: %v", di.Remote())
return errors.Errorf("not expected remote: %v", di.Remote())
return fmt.Errorf("not expected remote: %v", di.Remote())
}
} else {
log.Printf("unexpected listing: %v", li)
return errors.Errorf("unexpected listing: %v", li)
return fmt.Errorf("unexpected listing: %v", li)
}
log.Printf("complete listing /test/one/test2")
return nil
@@ -1062,7 +1062,7 @@ func (r *run) readDataFromRemote(t *testing.T, f fs.Fs, remote string, offset, e
checkSample = r.readDataFromObj(t, co, offset, end, noLengthCheck)
if !noLengthCheck && size != int64(len(checkSample)) {
return checkSample, errors.Errorf("read size doesn't match expected: %v <> %v", len(checkSample), size)
return checkSample, fmt.Errorf("read size doesn't match expected: %v <> %v", len(checkSample), size)
}
return checkSample, nil
}
@@ -1257,7 +1257,7 @@ func (r *run) listenForBackgroundUpload(t *testing.T, f fs.Fs, remote string) ch
case state = <-buCh:
// continue
case <-time.After(maxDuration):
waitCh <- errors.Errorf("Timed out waiting for background upload: %v", remote)
waitCh <- fmt.Errorf("Timed out waiting for background upload: %v", remote)
return
}
checkRemote := state.Remote
@@ -1274,7 +1274,7 @@ func (r *run) listenForBackgroundUpload(t *testing.T, f fs.Fs, remote string) ch
return
}
}
waitCh <- errors.Errorf("Too many attempts to wait for the background upload: %v", remote)
waitCh <- fmt.Errorf("Too many attempts to wait for the background upload: %v", remote)
}()
return waitCh
}

View File

@@ -5,6 +5,7 @@ package cache
import (
"context"
"errors"
"fmt"
"io"
"path"
@@ -13,7 +14,6 @@ import (
"sync"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/operations"
)
@@ -243,7 +243,7 @@ func (r *Handle) getChunk(chunkStart int64) ([]byte, error) {
return nil, io.ErrUnexpectedEOF
}
return nil, errors.Errorf("chunk not found %v", chunkStart)
return nil, fmt.Errorf("chunk not found %v", chunkStart)
}
// first chunk will be aligned with the start
@@ -323,7 +323,7 @@ func (r *Handle) Seek(offset int64, whence int) (int64, error) {
fs.Debugf(r, "moving offset end (%v) from %v to %v", r.cachedObject.Size(), r.offset, r.cachedObject.Size()+offset)
r.offset = r.cachedObject.Size() + offset
default:
err = errors.Errorf("cache: unimplemented seek whence %v", whence)
err = fmt.Errorf("cache: unimplemented seek whence %v", whence)
}
chunkStart := r.offset - (r.offset % int64(r.cacheFs().opt.ChunkSize))

View File

@@ -5,12 +5,12 @@ package cache
import (
"context"
"fmt"
"io"
"path"
"sync"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/readers"
@@ -178,10 +178,14 @@ func (o *Object) refreshFromSource(ctx context.Context, force bool) error {
}
if o.isTempFile() {
liveObject, err = o.ParentFs.NewObject(ctx, o.Remote())
err = errors.Wrapf(err, "in parent fs %v", o.ParentFs)
if err != nil {
err = fmt.Errorf("in parent fs %v: %w", o.ParentFs, err)
}
} else {
liveObject, err = o.CacheFs.Fs.NewObject(ctx, o.Remote())
err = errors.Wrapf(err, "in cache fs %v", o.CacheFs.Fs)
if err != nil {
err = fmt.Errorf("in cache fs %v: %w", o.CacheFs.Fs, err)
}
}
if err != nil {
fs.Errorf(o, "error refreshing object in : %v", err)
@@ -253,7 +257,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
defer o.CacheFs.backgroundRunner.play()
// don't allow started uploads
if o.isTempFile() && o.tempFileStartedUpload() {
return errors.Errorf("%v is currently uploading, can't update", o)
return fmt.Errorf("%v is currently uploading, can't update", o)
}
}
fs.Debugf(o, "updating object contents with size %v", src.Size())
@@ -292,7 +296,7 @@ func (o *Object) Remove(ctx context.Context) error {
defer o.CacheFs.backgroundRunner.play()
// don't allow started uploads
if o.isTempFile() && o.tempFileStartedUpload() {
return errors.Errorf("%v is currently uploading, can't delete", o)
return fmt.Errorf("%v is currently uploading, can't delete", o)
}
}
err := o.Object.Remove(ctx)

View File

@@ -4,12 +4,12 @@
package cache
import (
"fmt"
"strconv"
"strings"
"time"
cache "github.com/patrickmn/go-cache"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
)
@@ -53,7 +53,7 @@ func (m *Memory) GetChunk(cachedObject *Object, offset int64) ([]byte, error) {
return data, nil
}
return nil, errors.Errorf("couldn't get cached object data at offset %v", offset)
return nil, fmt.Errorf("couldn't get cached object data at offset %v", offset)
}
// AddChunk adds a new chunk of a cached object

View File

@@ -17,7 +17,6 @@ import (
"sync"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/walk"
bolt "go.etcd.io/bbolt"
@@ -120,11 +119,11 @@ func (b *Persistent) connect() error {
err = os.MkdirAll(b.dataPath, os.ModePerm)
if err != nil {
return errors.Wrapf(err, "failed to create a data directory %q", b.dataPath)
return fmt.Errorf("failed to create a data directory %q: %w", b.dataPath, err)
}
b.db, err = bolt.Open(b.dbPath, 0644, &bolt.Options{Timeout: b.features.DbWaitTime})
if err != nil {
return errors.Wrapf(err, "failed to open a cache connection to %q", b.dbPath)
return fmt.Errorf("failed to open a cache connection to %q: %w", b.dbPath, err)
}
if b.features.PurgeDb {
b.Purge()
@@ -176,7 +175,7 @@ func (b *Persistent) GetDir(remote string) (*Directory, error) {
err := b.db.View(func(tx *bolt.Tx) error {
bucket := b.getBucket(remote, false, tx)
if bucket == nil {
return errors.Errorf("couldn't open bucket (%v)", remote)
return fmt.Errorf("couldn't open bucket (%v)", remote)
}
data := bucket.Get([]byte("."))
@@ -184,7 +183,7 @@ func (b *Persistent) GetDir(remote string) (*Directory, error) {
return json.Unmarshal(data, cd)
}
return errors.Errorf("%v not found", remote)
return fmt.Errorf("%v not found", remote)
})
return cd, err
@@ -209,7 +208,7 @@ func (b *Persistent) AddBatchDir(cachedDirs []*Directory) error {
bucket = b.getBucket(cachedDirs[0].Dir, true, tx)
}
if bucket == nil {
return errors.Errorf("couldn't open bucket (%v)", cachedDirs[0].Dir)
return fmt.Errorf("couldn't open bucket (%v)", cachedDirs[0].Dir)
}
for _, cachedDir := range cachedDirs {
@@ -226,7 +225,7 @@ func (b *Persistent) AddBatchDir(cachedDirs []*Directory) error {
encoded, err := json.Marshal(cachedDir)
if err != nil {
return errors.Errorf("couldn't marshal object (%v): %v", cachedDir, err)
return fmt.Errorf("couldn't marshal object (%v): %v", cachedDir, err)
}
err = b.Put([]byte("."), encoded)
if err != nil {
@@ -244,17 +243,17 @@ func (b *Persistent) GetDirEntries(cachedDir *Directory) (fs.DirEntries, error)
err := b.db.View(func(tx *bolt.Tx) error {
bucket := b.getBucket(cachedDir.abs(), false, tx)
if bucket == nil {
return errors.Errorf("couldn't open bucket (%v)", cachedDir.abs())
return fmt.Errorf("couldn't open bucket (%v)", cachedDir.abs())
}
val := bucket.Get([]byte("."))
if val != nil {
err := json.Unmarshal(val, cachedDir)
if err != nil {
return errors.Errorf("error during unmarshalling obj: %v", err)
return fmt.Errorf("error during unmarshalling obj: %v", err)
}
} else {
return errors.Errorf("missing cached dir: %v", cachedDir)
return fmt.Errorf("missing cached dir: %v", cachedDir)
}
c := bucket.Cursor()
@@ -269,7 +268,7 @@ func (b *Persistent) GetDirEntries(cachedDir *Directory) (fs.DirEntries, error)
// we try to find a cached meta for the dir
currentBucket := c.Bucket().Bucket(k)
if currentBucket == nil {
return errors.Errorf("couldn't open bucket (%v)", string(k))
return fmt.Errorf("couldn't open bucket (%v)", string(k))
}
metaKey := currentBucket.Get([]byte("."))
@@ -318,7 +317,7 @@ func (b *Persistent) RemoveDir(fp string) error {
err = b.db.Update(func(tx *bolt.Tx) error {
bucket := b.getBucket(cleanPath(parentDir), false, tx)
if bucket == nil {
return errors.Errorf("couldn't open bucket (%v)", fp)
return fmt.Errorf("couldn't open bucket (%v)", fp)
}
// delete the cached dir
err := bucket.DeleteBucket([]byte(cleanPath(dirName)))
@@ -378,13 +377,13 @@ func (b *Persistent) GetObject(cachedObject *Object) (err error) {
return b.db.View(func(tx *bolt.Tx) error {
bucket := b.getBucket(cachedObject.Dir, false, tx)
if bucket == nil {
return errors.Errorf("couldn't open parent bucket for %v", cachedObject.Dir)
return fmt.Errorf("couldn't open parent bucket for %v", cachedObject.Dir)
}
val := bucket.Get([]byte(cachedObject.Name))
if val != nil {
return json.Unmarshal(val, cachedObject)
}
return errors.Errorf("couldn't find object (%v)", cachedObject.Name)
return fmt.Errorf("couldn't find object (%v)", cachedObject.Name)
})
}
@@ -393,16 +392,16 @@ func (b *Persistent) AddObject(cachedObject *Object) error {
return b.db.Update(func(tx *bolt.Tx) error {
bucket := b.getBucket(cachedObject.Dir, true, tx)
if bucket == nil {
return errors.Errorf("couldn't open parent bucket for %v", cachedObject)
return fmt.Errorf("couldn't open parent bucket for %v", cachedObject)
}
// cache Object Info
encoded, err := json.Marshal(cachedObject)
if err != nil {
return errors.Errorf("couldn't marshal object (%v) info: %v", cachedObject, err)
return fmt.Errorf("couldn't marshal object (%v) info: %v", cachedObject, err)
}
err = bucket.Put([]byte(cachedObject.Name), encoded)
if err != nil {
return errors.Errorf("couldn't cache object (%v) info: %v", cachedObject, err)
return fmt.Errorf("couldn't cache object (%v) info: %v", cachedObject, err)
}
return nil
})
@@ -414,7 +413,7 @@ func (b *Persistent) RemoveObject(fp string) error {
return b.db.Update(func(tx *bolt.Tx) error {
bucket := b.getBucket(cleanPath(parentDir), false, tx)
if bucket == nil {
return errors.Errorf("couldn't open parent bucket for %v", cleanPath(parentDir))
return fmt.Errorf("couldn't open parent bucket for %v", cleanPath(parentDir))
}
err := bucket.Delete([]byte(cleanPath(objName)))
if err != nil {
@@ -446,7 +445,7 @@ func (b *Persistent) HasEntry(remote string) bool {
err := b.db.View(func(tx *bolt.Tx) error {
bucket := b.getBucket(dir, false, tx)
if bucket == nil {
return errors.Errorf("couldn't open parent bucket for %v", remote)
return fmt.Errorf("couldn't open parent bucket for %v", remote)
}
if f := bucket.Bucket([]byte(name)); f != nil {
return nil
@@ -455,7 +454,7 @@ func (b *Persistent) HasEntry(remote string) bool {
return nil
}
return errors.Errorf("couldn't find object (%v)", remote)
return fmt.Errorf("couldn't find object (%v)", remote)
})
if err == nil {
return true
@@ -555,7 +554,7 @@ func (b *Persistent) CleanChunksBySize(maxSize int64) {
err := b.db.Update(func(tx *bolt.Tx) error {
dataTsBucket := tx.Bucket([]byte(DataTsBucket))
if dataTsBucket == nil {
return errors.Errorf("Couldn't open (%v) bucket", DataTsBucket)
return fmt.Errorf("Couldn't open (%v) bucket", DataTsBucket)
}
// iterate through ts
c := dataTsBucket.Cursor()
@@ -733,7 +732,7 @@ func (b *Persistent) GetChunkTs(path string, offset int64) (time.Time, error) {
return nil
}
}
return errors.Errorf("not found %v-%v", path, offset)
return fmt.Errorf("not found %v-%v", path, offset)
})
return t, err
@@ -773,7 +772,7 @@ func (b *Persistent) addPendingUpload(destPath string, started bool) error {
return b.db.Update(func(tx *bolt.Tx) error {
bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket))
if err != nil {
return errors.Errorf("couldn't bucket for %v", tempBucket)
return fmt.Errorf("couldn't bucket for %v", tempBucket)
}
tempObj := &tempUploadInfo{
DestPath: destPath,
@@ -784,11 +783,11 @@ func (b *Persistent) addPendingUpload(destPath string, started bool) error {
// cache Object Info
encoded, err := json.Marshal(tempObj)
if err != nil {
return errors.Errorf("couldn't marshal object (%v) info: %v", destPath, err)
return fmt.Errorf("couldn't marshal object (%v) info: %v", destPath, err)
}
err = bucket.Put([]byte(destPath), encoded)
if err != nil {
return errors.Errorf("couldn't cache object (%v) info: %v", destPath, err)
return fmt.Errorf("couldn't cache object (%v) info: %v", destPath, err)
}
return nil
@@ -803,7 +802,7 @@ func (b *Persistent) getPendingUpload(inRoot string, waitTime time.Duration) (de
err = b.db.Update(func(tx *bolt.Tx) error {
bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket))
if err != nil {
return errors.Errorf("couldn't bucket for %v", tempBucket)
return fmt.Errorf("couldn't bucket for %v", tempBucket)
}
c := bucket.Cursor()
@@ -836,7 +835,7 @@ func (b *Persistent) getPendingUpload(inRoot string, waitTime time.Duration) (de
return nil
}
return errors.Errorf("no pending upload found")
return fmt.Errorf("no pending upload found")
})
return destPath, err
@@ -847,14 +846,14 @@ func (b *Persistent) SearchPendingUpload(remote string) (started bool, err error
err = b.db.View(func(tx *bolt.Tx) error {
bucket := tx.Bucket([]byte(tempBucket))
if bucket == nil {
return errors.Errorf("couldn't bucket for %v", tempBucket)
return fmt.Errorf("couldn't bucket for %v", tempBucket)
}
var tempObj = &tempUploadInfo{}
v := bucket.Get([]byte(remote))
err = json.Unmarshal(v, tempObj)
if err != nil {
return errors.Errorf("pending upload (%v) not found %v", remote, err)
return fmt.Errorf("pending upload (%v) not found %v", remote, err)
}
started = tempObj.Started
@@ -869,7 +868,7 @@ func (b *Persistent) searchPendingUploadFromDir(dir string) (remotes []string, e
err = b.db.View(func(tx *bolt.Tx) error {
bucket := tx.Bucket([]byte(tempBucket))
if bucket == nil {
return errors.Errorf("couldn't bucket for %v", tempBucket)
return fmt.Errorf("couldn't bucket for %v", tempBucket)
}
c := bucket.Cursor()
@@ -899,22 +898,22 @@ func (b *Persistent) rollbackPendingUpload(remote string) error {
return b.db.Update(func(tx *bolt.Tx) error {
bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket))
if err != nil {
return errors.Errorf("couldn't bucket for %v", tempBucket)
return fmt.Errorf("couldn't bucket for %v", tempBucket)
}
var tempObj = &tempUploadInfo{}
v := bucket.Get([]byte(remote))
err = json.Unmarshal(v, tempObj)
if err != nil {
return errors.Errorf("pending upload (%v) not found %v", remote, err)
return fmt.Errorf("pending upload (%v) not found %v", remote, err)
}
tempObj.Started = false
v2, err := json.Marshal(tempObj)
if err != nil {
return errors.Errorf("pending upload not updated %v", err)
return fmt.Errorf("pending upload not updated %v", err)
}
err = bucket.Put([]byte(tempObj.DestPath), v2)
if err != nil {
return errors.Errorf("pending upload not updated %v", err)
return fmt.Errorf("pending upload not updated %v", err)
}
return nil
})
@@ -927,7 +926,7 @@ func (b *Persistent) removePendingUpload(remote string) error {
return b.db.Update(func(tx *bolt.Tx) error {
bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket))
if err != nil {
return errors.Errorf("couldn't bucket for %v", tempBucket)
return fmt.Errorf("couldn't bucket for %v", tempBucket)
}
return bucket.Delete([]byte(remote))
})
@@ -942,17 +941,17 @@ func (b *Persistent) updatePendingUpload(remote string, fn func(item *tempUpload
return b.db.Update(func(tx *bolt.Tx) error {
bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket))
if err != nil {
return errors.Errorf("couldn't bucket for %v", tempBucket)
return fmt.Errorf("couldn't bucket for %v", tempBucket)
}
var tempObj = &tempUploadInfo{}
v := bucket.Get([]byte(remote))
err = json.Unmarshal(v, tempObj)
if err != nil {
return errors.Errorf("pending upload (%v) not found %v", remote, err)
return fmt.Errorf("pending upload (%v) not found %v", remote, err)
}
if tempObj.Started {
return errors.Errorf("pending upload already started %v", remote)
return fmt.Errorf("pending upload already started %v", remote)
}
err = fn(tempObj)
if err != nil {
@@ -970,11 +969,11 @@ func (b *Persistent) updatePendingUpload(remote string, fn func(item *tempUpload
}
v2, err := json.Marshal(tempObj)
if err != nil {
return errors.Errorf("pending upload not updated %v", err)
return fmt.Errorf("pending upload not updated %v", err)
}
err = bucket.Put([]byte(tempObj.DestPath), v2)
if err != nil {
return errors.Errorf("pending upload not updated %v", err)
return fmt.Errorf("pending upload not updated %v", err)
}
return nil
@@ -1015,11 +1014,11 @@ func (b *Persistent) ReconcileTempUploads(ctx context.Context, cacheFs *Fs) erro
// cache Object Info
encoded, err := json.Marshal(tempObj)
if err != nil {
return errors.Errorf("couldn't marshal object (%v) info: %v", queuedEntry, err)
return fmt.Errorf("couldn't marshal object (%v) info: %v", queuedEntry, err)
}
err = bucket.Put([]byte(destPath), encoded)
if err != nil {
return errors.Errorf("couldn't cache object (%v) info: %v", destPath, err)
return fmt.Errorf("couldn't cache object (%v) info: %v", destPath, err)
}
fs.Debugf(cacheFs, "reconciled temporary upload: %v", destPath)
}

View File

@@ -8,6 +8,7 @@ import (
"crypto/sha1"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
gohash "hash"
"io"
@@ -21,7 +22,6 @@ import (
"sync"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/cache"
@@ -290,13 +290,13 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
baseName, basePath, err := fspath.SplitFs(remote)
if err != nil {
return nil, errors.Wrapf(err, "failed to parse remote %q to wrap", remote)
return nil, fmt.Errorf("failed to parse remote %q to wrap: %w", remote, err)
}
// Look for a file first
remotePath := fspath.JoinRootPath(basePath, rpath)
baseFs, err := cache.Get(ctx, baseName+remotePath)
if err != fs.ErrorIsFile && err != nil {
return nil, errors.Wrapf(err, "failed to make remote %q to wrap", baseName+remotePath)
return nil, fmt.Errorf("failed to make remote %q to wrap: %w", baseName+remotePath, err)
}
if !operations.CanServerSideMove(baseFs) {
return nil, errors.New("can't use chunker on a backend which doesn't support server-side move or copy")
@@ -386,7 +386,7 @@ type Fs struct {
// configure must be called only from NewFs or by unit tests.
func (f *Fs) configure(nameFormat, metaFormat, hashType, transactionMode string) error {
if err := f.setChunkNameFormat(nameFormat); err != nil {
return errors.Wrapf(err, "invalid name format '%s'", nameFormat)
return fmt.Errorf("invalid name format '%s': %w", nameFormat, err)
}
if err := f.setMetaFormat(metaFormat); err != nil {
return err
@@ -878,7 +878,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
// ignores non-chunked objects and skips chunk size checks.
func (f *Fs) scanObject(ctx context.Context, remote string, quickScan bool) (fs.Object, error) {
if err := f.forbidChunk(false, remote); err != nil {
return nil, errors.Wrap(err, "can't access")
return nil, fmt.Errorf("can't access: %w", err)
}
var (
@@ -927,7 +927,7 @@ func (f *Fs) scanObject(ctx context.Context, remote string, quickScan bool) (fs.
case fs.ErrorDirNotFound:
entries = nil
default:
return nil, errors.Wrap(err, "can't detect composite file")
return nil, fmt.Errorf("can't detect composite file: %w", err)
}
if f.useNoRename {
@@ -1067,7 +1067,7 @@ func (o *Object) readMetadata(ctx context.Context) error {
case ErrMetaTooBig, ErrMetaUnknown:
return err // return these errors unwrapped for unit tests
default:
return errors.Wrap(err, "invalid metadata")
return fmt.Errorf("invalid metadata: %w", err)
}
if o.size != metaInfo.Size() || len(o.chunks) != metaInfo.nChunks {
return errors.New("metadata doesn't match file size")
@@ -1132,7 +1132,7 @@ func (f *Fs) put(
// Perform consistency checks
if err := f.forbidChunk(src, remote); err != nil {
return nil, errors.Wrap(err, action+" refused")
return nil, fmt.Errorf("%s refused: %w", action, err)
}
if target == nil {
// Get target object with a quick directory scan
@@ -1146,7 +1146,7 @@ func (f *Fs) put(
obj := target.(*Object)
if err := obj.readMetadata(ctx); err == ErrMetaUnknown {
// refuse to update a file of unsupported format
return nil, errors.Wrap(err, "refusing to "+action)
return nil, fmt.Errorf("refusing to %s: %w", action, err)
}
}
@@ -1564,7 +1564,7 @@ func (f *Fs) Hashes() hash.Set {
// Shouldn't return an error if it already exists
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
if err := f.forbidChunk(dir, dir); err != nil {
return errors.Wrap(err, "can't mkdir")
return fmt.Errorf("can't mkdir: %w", err)
}
return f.base.Mkdir(ctx, dir)
}
@@ -1633,7 +1633,7 @@ func (o *Object) Remove(ctx context.Context) (err error) {
if err := o.f.forbidChunk(o, o.Remote()); err != nil {
// operations.Move can still call Remove if chunker's Move refuses
// to corrupt file in hard mode. Hence, refuse to Remove, too.
return errors.Wrap(err, "refuse to corrupt")
return fmt.Errorf("refuse to corrupt: %w", err)
}
if err := o.readMetadata(ctx); err == ErrMetaUnknown {
// Proceed but warn user that unexpected things can happen.
@@ -1661,12 +1661,12 @@ func (o *Object) Remove(ctx context.Context) (err error) {
// copyOrMove implements copy or move
func (f *Fs) copyOrMove(ctx context.Context, o *Object, remote string, do copyMoveFn, md5, sha1, opName string) (fs.Object, error) {
if err := f.forbidChunk(o, remote); err != nil {
return nil, errors.Wrapf(err, "can't %s", opName)
return nil, fmt.Errorf("can't %s: %w", opName, err)
}
if err := o.readMetadata(ctx); err != nil {
// Refuse to copy/move composite files with invalid or future
// metadata format which might involve unsupported chunk types.
return nil, errors.Wrapf(err, "can't %s this file", opName)
return nil, fmt.Errorf("can't %s this file: %w", opName, err)
}
if !o.isComposite() {
fs.Debugf(o, "%s non-chunked object...", opName)
@@ -2163,7 +2163,7 @@ func (o *Object) UnWrap() fs.Object {
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.ReadCloser, err error) {
if err := o.readMetadata(ctx); err != nil {
// refuse to open unsupported format
return nil, errors.Wrap(err, "can't open")
return nil, fmt.Errorf("can't open: %w", err)
}
if !o.isComposite() {
return o.mainChunk().Open(ctx, options...) // chain to wrapped non-chunked file

877
backend/combine/combine.go Normal file
View File

@@ -0,0 +1,877 @@
// Package combine implents a backend to combine multipe remotes in a directory tree
package combine
/*
Have API to add/remove branches in the combine
*/
import (
"context"
"errors"
"fmt"
"io"
"path"
"strings"
"sync"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/cache"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fs/walk"
"golang.org/x/sync/errgroup"
)
// Register with Fs
func init() {
fsi := &fs.RegInfo{
Name: "combine",
Description: "Combine several remotes into one",
NewFs: NewFs,
Options: []fs.Option{{
Name: "upstreams",
Help: `Upstreams for combining
These should be in the form
dir=remote:path dir2=remote2:path
Where before the = is specified the root directory and after is the remote to
put there.
Embedded spaces can be added using quotes
"dir=remote:path with space" "dir2=remote2:path with space"
`,
Required: true,
Default: fs.SpaceSepList(nil),
}},
}
fs.Register(fsi)
}
// Options defines the configuration for this backend
type Options struct {
Upstreams fs.SpaceSepList `config:"upstreams"`
}
// Fs represents a combine of upstreams
type Fs struct {
name string // name of this remote
features *fs.Features // optional features
opt Options // options for this Fs
root string // the path we are working on
hashSet hash.Set // common hashes
when time.Time // directory times
upstreams map[string]*upstream // map of upstreams
}
// adjustment stores the info to add a prefix to a path or chop characters off it
type adjustment struct {
prefix string
chop int
}
// do makes the adjustment on s
func (a *adjustment) do(s string) string {
if a.prefix != "" {
return join(a.prefix, s)
}
return s[a.chop:]
}
// upstream represents an upstream Fs
type upstream struct {
f fs.Fs
parent *Fs
dir string // directory the upstream is mounted
pathAdjustment adjustment // how to fiddle with the path
}
// Create an upstream from the directory it is mounted on and the remote
func (f *Fs) newUpstream(ctx context.Context, dir, remote string) (*upstream, error) {
uFs, err := cache.Get(ctx, remote)
if err == fs.ErrorIsFile {
return nil, fmt.Errorf("can't combine files yet, only directories %q: %w", remote, err)
}
if err != nil {
return nil, fmt.Errorf("failed to create upstream %q: %w", remote, err)
}
u := &upstream{
f: uFs,
parent: f,
dir: dir,
}
if len(f.root) < len(dir) {
u.pathAdjustment.prefix = dir[:len(dir)-len(f.root)]
} else {
u.pathAdjustment.chop = len(f.root) - len(dir)
}
return u, nil
}
// NewFs constructs an Fs from the path.
//
// The returned Fs is the actual Fs, referenced by remote in the config
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs.Fs, err error) {
// defer log.Trace(nil, "name=%q, root=%q, m=%v", name, root, m)("f=%+v, err=%v", &outFs, &err)
// Parse config into Options struct
opt := new(Options)
err = configstruct.Set(m, opt)
if err != nil {
return nil, err
}
// Backward compatible to old config
if len(opt.Upstreams) == 0 {
return nil, errors.New("combine can't point to an empty upstream - check the value of the upstreams setting")
}
for _, u := range opt.Upstreams {
if strings.HasPrefix(u, name+":") {
return nil, errors.New("can't point combine remote at itself - check the value of the upstreams setting")
}
}
f := &Fs{
name: name,
root: root,
opt: *opt,
upstreams: make(map[string]*upstream, len(opt.Upstreams)),
when: time.Now(),
}
g, ctx := errgroup.WithContext(ctx)
var mu sync.Mutex
for _, upstream := range opt.Upstreams {
upstream := upstream
g.Go(func() (err error) {
equal := strings.IndexRune(upstream, '=')
if equal < 0 {
return fmt.Errorf("no \"=\" in upstream definition %q", upstream)
}
dir, remote := upstream[:equal], upstream[equal+1:]
if dir == "" {
return fmt.Errorf("empty dir in upstream definition %q", upstream)
}
if remote == "" {
return fmt.Errorf("empty remote in upstream definition %q", upstream)
}
u, err := f.newUpstream(ctx, dir, remote)
if err != nil {
return err
}
mu.Lock()
f.upstreams[dir] = u
mu.Unlock()
return nil
})
}
err = g.Wait()
if err != nil {
return nil, err
}
// check features
var features = (&fs.Features{
CaseInsensitive: true,
DuplicateFiles: false,
ReadMimeType: true,
WriteMimeType: true,
CanHaveEmptyDirectories: true,
BucketBased: true,
SetTier: true,
GetTier: true,
}).Fill(ctx, f)
canMove := true
for _, u := range f.upstreams {
features = features.Mask(ctx, u.f) // Mask all upstream fs
if !operations.CanServerSideMove(u.f) {
canMove = false
}
}
// We can move if all remotes support Move or Copy
if canMove {
features.Move = f.Move
}
// Enable ListR when upstreams either support ListR or is local
// But not when all upstreams are local
if features.ListR == nil {
for _, u := range f.upstreams {
if u.f.Features().ListR != nil {
features.ListR = f.ListR
} else if !u.f.Features().IsLocal {
features.ListR = nil
break
}
}
}
// Enable Purge when any upstreams support it
if features.Purge == nil {
for _, u := range f.upstreams {
if u.f.Features().Purge != nil {
features.Purge = f.Purge
break
}
}
}
// Enable Shutdown when any upstreams support it
if features.Shutdown == nil {
for _, u := range f.upstreams {
if u.f.Features().Shutdown != nil {
features.Shutdown = f.Shutdown
break
}
}
}
// Enable DirCacheFlush when any upstreams support it
if features.DirCacheFlush == nil {
for _, u := range f.upstreams {
if u.f.Features().DirCacheFlush != nil {
features.DirCacheFlush = f.DirCacheFlush
break
}
}
}
f.features = features
// Get common intersection of hashes
var hashSet hash.Set
var first = true
for _, u := range f.upstreams {
if first {
hashSet = u.f.Hashes()
first = false
} else {
hashSet = hashSet.Overlap(u.f.Hashes())
}
}
f.hashSet = hashSet
// Check to see if the root is actually a file
if f.root != "" {
_, err := f.NewObject(ctx, "")
if err != nil {
if err == fs.ErrorObjectNotFound || err == fs.ErrorNotAFile || err == fs.ErrorIsDir {
// File doesn't exist or is a directory so return old f
return f, nil
}
return nil, err
}
// Check to see if the root path is actually an existing file
oldRoot := f.root
newRoot, leaf := path.Split(oldRoot)
f.root = newRoot
// Adjust path adjustment to remove leaf
for _, u := range f.upstreams {
u.pathAdjustment.chop -= len(leaf) + 1
}
return f, fs.ErrorIsFile
}
return f, nil
}
// Run a function over all the upstreams in parallel
func (f *Fs) multithread(ctx context.Context, fn func(context.Context, *upstream) error) error {
g, gCtx := errgroup.WithContext(ctx)
for _, u := range f.upstreams {
u := u
g.Go(func() (err error) {
return fn(gCtx, u)
})
}
return g.Wait()
}
// join the elements together but unline path.Join return empty string
func join(elem ...string) string {
result := path.Join(elem...)
if result == "." {
return ""
}
return result
}
// find the upstream for the remote passed in, returning the upstream and the adjusted path
func (f *Fs) findUpstream(remote string) (u *upstream, uRemote string, err error) {
// defer log.Trace(remote, "")("f=%v, uRemote=%q, err=%v", &u, &uRemote, &err)
absolute := join(f.root, remote)
for dir, u := range f.upstreams {
dirSlash := dir + "/"
foundStart := -1
foundEnd := -1
if absolute == dir {
foundEnd = len(dir)
foundStart = foundEnd
} else if strings.HasPrefix(absolute, dirSlash) {
foundEnd = len(dirSlash)
foundStart = foundEnd - 1
}
if foundStart > 0 {
uRemote = absolute[foundEnd:]
return u, uRemote, nil
}
}
return nil, "", fmt.Errorf("combine for remote %q: %w", remote, fs.ErrorDirNotFound)
}
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string {
return f.name
}
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
return f.root
}
// String converts this Fs to a string
func (f *Fs) String() string {
return fmt.Sprintf("combine root '%s'", f.root)
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// Rmdir removes the root directory of the Fs object
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
// The root always exists
if f.root == "" && dir == "" {
return nil
}
u, uRemote, err := f.findUpstream(dir)
if err != nil {
return err
}
return u.f.Rmdir(ctx, uRemote)
}
// Hashes returns hash.HashNone to indicate remote hashing is unavailable
func (f *Fs) Hashes() hash.Set {
return f.hashSet
}
// Mkdir makes the root directory of the Fs object
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
// The root always exists
if f.root == "" && dir == "" {
return nil
}
u, uRemote, err := f.findUpstream(dir)
if err != nil {
return err
}
return u.f.Mkdir(ctx, uRemote)
}
// purge the upstream or fallback to a slow way
func (u *upstream) purge(ctx context.Context, dir string) (err error) {
if do := u.f.Features().Purge; do != nil {
err = do(ctx, dir)
} else {
err = operations.Purge(ctx, u.f, dir)
}
return err
}
// Purge all files in the directory
//
// Implement this if you have a way of deleting all the files
// quicker than just running Remove() on the result of List()
//
// Return an error if it doesn't exist
func (f *Fs) Purge(ctx context.Context, dir string) error {
if f.root == "" && dir == "" {
return f.multithread(ctx, func(ctx context.Context, u *upstream) error {
return u.purge(ctx, "")
})
}
u, uRemote, err := f.findUpstream(dir)
if err != nil {
return err
}
return u.purge(ctx, uRemote)
}
// Copy src to this remote using server-side copy operations.
//
// This is stored with the remote path given
//
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object)
if !ok {
fs.Debugf(src, "Can't copy - not same remote type")
return nil, fs.ErrorCantCopy
}
dstU, dstRemote, err := f.findUpstream(remote)
if err != nil {
return nil, err
}
do := dstU.f.Features().Copy
if do == nil {
return nil, fs.ErrorCantCopy
}
o, err := do(ctx, srcObj.Object, dstRemote)
if err != nil {
return nil, err
}
return dstU.newObject(o), nil
}
// Move src to this remote using server-side move operations.
//
// This is stored with the remote path given
//
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantMove
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object)
if !ok {
fs.Debugf(src, "Can't move - not same remote type")
return nil, fs.ErrorCantMove
}
dstU, dstRemote, err := f.findUpstream(remote)
if err != nil {
return nil, err
}
do := dstU.f.Features().Move
useCopy := false
if do == nil {
do = dstU.f.Features().Copy
if do == nil {
return nil, fs.ErrorCantMove
}
useCopy = true
}
o, err := do(ctx, srcObj.Object, dstRemote)
if err != nil {
return nil, err
}
// If did Copy then remove the source object
if useCopy {
err = srcObj.Remove(ctx)
if err != nil {
return nil, err
}
}
return dstU.newObject(o), nil
}
// DirMove moves src, srcRemote to this remote at dstRemote
// using server-side move operations.
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantDirMove
//
// If destination exists then return fs.ErrorDirExists
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) (err error) {
// defer log.Trace(f, "src=%v, srcRemote=%q, dstRemote=%q", src, srcRemote, dstRemote)("err=%v", &err)
srcFs, ok := src.(*Fs)
if !ok {
fs.Debugf(src, "Can't move directory - not same remote type")
return fs.ErrorCantDirMove
}
dstU, dstURemote, err := f.findUpstream(dstRemote)
if err != nil {
return err
}
srcU, srcURemote, err := srcFs.findUpstream(srcRemote)
if err != nil {
return err
}
do := dstU.f.Features().DirMove
if do == nil {
return fs.ErrorCantDirMove
}
fs.Logf(dstU.f, "srcU.f=%v, srcURemote=%q, dstURemote=%q", srcU.f, srcURemote, dstURemote)
return do(ctx, srcU.f, srcURemote, dstURemote)
}
// ChangeNotify calls the passed function with a path
// that has had changes. If the implementation
// uses polling, it should adhere to the given interval.
// At least one value will be written to the channel,
// specifying the initial value and updated values might
// follow. A 0 Duration should pause the polling.
// The ChangeNotify implementation must empty the channel
// regularly. When the channel gets closed, the implementation
// should stop polling and release resources.
func (f *Fs) ChangeNotify(ctx context.Context, fn func(string, fs.EntryType), ch <-chan time.Duration) {
var uChans []chan time.Duration
for _, u := range f.upstreams {
if do := u.f.Features().ChangeNotify; do != nil {
ch := make(chan time.Duration)
uChans = append(uChans, ch)
do(ctx, fn, ch)
}
}
go func() {
for i := range ch {
for _, c := range uChans {
c <- i
}
}
for _, c := range uChans {
close(c)
}
}()
}
// DirCacheFlush resets the directory cache - used in testing
// as an optional interface
func (f *Fs) DirCacheFlush() {
ctx := context.Background()
_ = f.multithread(ctx, func(ctx context.Context, u *upstream) error {
if do := u.f.Features().DirCacheFlush; do != nil {
do()
}
return nil
})
}
func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, stream bool, options ...fs.OpenOption) (fs.Object, error) {
srcPath := src.Remote()
u, uRemote, err := f.findUpstream(srcPath)
if err != nil {
return nil, err
}
uSrc := operations.NewOverrideRemote(src, uRemote)
var o fs.Object
if stream {
o, err = u.f.Features().PutStream(ctx, in, uSrc, options...)
} else {
o, err = u.f.Put(ctx, in, uSrc, options...)
}
if err != nil {
return nil, err
}
return u.newObject(o), nil
}
// Put in to the remote path with the modTime given of the given size
//
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
// nil and the error
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
o, err := f.NewObject(ctx, src.Remote())
switch err {
case nil:
return o, o.Update(ctx, in, src, options...)
case fs.ErrorObjectNotFound:
return f.put(ctx, in, src, false, options...)
default:
return nil, err
}
}
// PutStream uploads to the remote path with the modTime given of indeterminate size
//
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
// nil and the error
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
o, err := f.NewObject(ctx, src.Remote())
switch err {
case nil:
return o, o.Update(ctx, in, src, options...)
case fs.ErrorObjectNotFound:
return f.put(ctx, in, src, true, options...)
default:
return nil, err
}
}
// About gets quota information from the Fs
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
usage := &fs.Usage{
Total: new(int64),
Used: new(int64),
Trashed: new(int64),
Other: new(int64),
Free: new(int64),
Objects: new(int64),
}
for _, u := range f.upstreams {
doAbout := u.f.Features().About
if doAbout == nil {
continue
}
usg, err := doAbout(ctx)
if errors.Is(err, fs.ErrorDirNotFound) {
continue
}
if err != nil {
return nil, err
}
if usg.Total != nil && usage.Total != nil {
*usage.Total += *usg.Total
} else {
usage.Total = nil
}
if usg.Used != nil && usage.Used != nil {
*usage.Used += *usg.Used
} else {
usage.Used = nil
}
if usg.Trashed != nil && usage.Trashed != nil {
*usage.Trashed += *usg.Trashed
} else {
usage.Trashed = nil
}
if usg.Other != nil && usage.Other != nil {
*usage.Other += *usg.Other
} else {
usage.Other = nil
}
if usg.Free != nil && usage.Free != nil {
*usage.Free += *usg.Free
} else {
usage.Free = nil
}
if usg.Objects != nil && usage.Objects != nil {
*usage.Objects += *usg.Objects
} else {
usage.Objects = nil
}
}
return usage, nil
}
// Wraps entries for this upstream
func (u *upstream) wrapEntries(ctx context.Context, entries fs.DirEntries) (fs.DirEntries, error) {
for i, entry := range entries {
switch x := entry.(type) {
case fs.Object:
entries[i] = u.newObject(x)
case fs.Directory:
newDir := fs.NewDirCopy(ctx, x)
newDir.SetRemote(u.pathAdjustment.do(newDir.Remote()))
entries[i] = newDir
default:
return nil, fmt.Errorf("unknown entry type %T", entry)
}
}
return entries, nil
}
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
// defer log.Trace(f, "dir=%q", dir)("entries = %v, err=%v", &entries, &err)
if f.root == "" && dir == "" {
entries = make(fs.DirEntries, 0, len(f.upstreams))
for combineDir := range f.upstreams {
d := fs.NewDir(combineDir, f.when)
entries = append(entries, d)
}
return entries, nil
}
u, uRemote, err := f.findUpstream(dir)
if err != nil {
return nil, err
}
entries, err = u.f.List(ctx, uRemote)
if err != nil {
return nil, err
}
return u.wrapEntries(ctx, entries)
}
// ListR lists the objects and directories of the Fs starting
// from dir recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
//
// Don't implement this unless you have a more efficient way
// of listing recursively that doing a directory traversal.
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
// defer log.Trace(f, "dir=%q, callback=%v", dir, callback)("err=%v", &err)
if f.root == "" && dir == "" {
rootEntries, err := f.List(ctx, "")
if err != nil {
return err
}
err = callback(rootEntries)
if err != nil {
return err
}
var mu sync.Mutex
syncCallback := func(entries fs.DirEntries) error {
mu.Lock()
defer mu.Unlock()
return callback(entries)
}
err = f.multithread(ctx, func(ctx context.Context, u *upstream) error {
return f.ListR(ctx, u.dir, syncCallback)
})
if err != nil {
return err
}
return nil
}
u, uRemote, err := f.findUpstream(dir)
if err != nil {
return err
}
wrapCallback := func(entries fs.DirEntries) error {
entries, err := u.wrapEntries(ctx, entries)
if err != nil {
return err
}
return callback(entries)
}
if do := u.f.Features().ListR; do != nil {
err = do(ctx, uRemote, wrapCallback)
} else {
err = walk.ListR(ctx, u.f, uRemote, true, -1, walk.ListAll, wrapCallback)
}
if err == fs.ErrorDirNotFound {
err = nil
}
return err
}
// NewObject creates a new remote combine file object
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
u, uRemote, err := f.findUpstream(remote)
if err != nil {
return nil, err
}
o, err := u.f.NewObject(ctx, uRemote)
if err != nil {
return nil, err
}
return u.newObject(o), nil
}
// Precision is the greatest Precision of all upstreams
func (f *Fs) Precision() time.Duration {
var greatestPrecision time.Duration
for _, u := range f.upstreams {
uPrecision := u.f.Precision()
if uPrecision > greatestPrecision {
greatestPrecision = uPrecision
}
}
return greatestPrecision
}
// Shutdown the backend, closing any background tasks and any
// cached connections.
func (f *Fs) Shutdown(ctx context.Context) error {
return f.multithread(ctx, func(ctx context.Context, u *upstream) error {
if do := u.f.Features().Shutdown; do != nil {
return do(ctx)
}
return nil
})
}
// Object describes a wrapped Object
//
// This is a wrapped Object which knows its path prefix
type Object struct {
fs.Object
u *upstream
}
func (u *upstream) newObject(o fs.Object) *Object {
return &Object{
Object: o,
u: u,
}
}
// Fs returns read only access to the Fs that this object is part of
func (o *Object) Fs() fs.Info {
return o.u.parent
}
// String returns the remote path
func (o *Object) String() string {
return o.Remote()
}
// Remote returns the remote path
func (o *Object) Remote() string {
return o.u.pathAdjustment.do(o.Object.String())
}
// MimeType returns the content type of the Object if known
func (o *Object) MimeType(ctx context.Context) (mimeType string) {
if do, ok := o.Object.(fs.MimeTyper); ok {
mimeType = do.MimeType(ctx)
}
return mimeType
}
// UnWrap returns the Object that this Object is wrapping or
// nil if it isn't wrapping anything
func (o *Object) UnWrap() fs.Object {
return o.Object
}
// Check the interfaces are satisfied
var (
_ fs.Fs = (*Fs)(nil)
_ fs.Purger = (*Fs)(nil)
_ fs.PutStreamer = (*Fs)(nil)
_ fs.Copier = (*Fs)(nil)
_ fs.Mover = (*Fs)(nil)
_ fs.DirMover = (*Fs)(nil)
_ fs.DirCacheFlusher = (*Fs)(nil)
_ fs.ChangeNotifier = (*Fs)(nil)
_ fs.Abouter = (*Fs)(nil)
_ fs.ListRer = (*Fs)(nil)
_ fs.Shutdowner = (*Fs)(nil)
)

View File

@@ -0,0 +1,79 @@
// Test Combine filesystem interface
package combine_test
import (
"testing"
_ "github.com/rclone/rclone/backend/local"
_ "github.com/rclone/rclone/backend/memory"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
if *fstest.RemoteName == "" {
t.Skip("Skipping as -remote not set")
}
fstests.Run(t, &fstests.Opt{
RemoteName: *fstest.RemoteName,
UnimplementableFsMethods: []string{"OpenWriterAt", "DuplicateFiles"},
UnimplementableObjectMethods: []string{"MimeType"},
})
}
func TestLocal(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
dirs := MakeTestDirs(t, 3)
upstreams := "dir1=" + dirs[0] + " dir2=" + dirs[1] + " dir3=" + dirs[2]
name := "TestCombineLocal"
fstests.Run(t, &fstests.Opt{
RemoteName: name + ":dir1",
ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "type", Value: "combine"},
{Name: name, Key: "upstreams", Value: upstreams},
},
})
}
func TestMemory(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
upstreams := "dir1=:memory:dir1 dir2=:memory:dir2 dir3=:memory:dir3"
name := "TestCombineMemory"
fstests.Run(t, &fstests.Opt{
RemoteName: name + ":dir1",
ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "type", Value: "combine"},
{Name: name, Key: "upstreams", Value: upstreams},
},
})
}
func TestMixed(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
dirs := MakeTestDirs(t, 2)
upstreams := "dir1=" + dirs[0] + " dir2=" + dirs[1] + " dir3=:memory:dir3"
name := "TestCombineMixed"
fstests.Run(t, &fstests.Opt{
RemoteName: name + ":dir1",
ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "type", Value: "combine"},
{Name: name, Key: "upstreams", Value: upstreams},
},
})
}
// MakeTestDirs makes directories in /tmp for testing
func MakeTestDirs(t *testing.T, n int) (dirs []string) {
for i := 1; i <= n; i++ {
dir := t.TempDir()
dirs = append(dirs, dir)
}
return dirs
}

View File

@@ -10,6 +10,7 @@ import (
"encoding/binary"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
@@ -21,7 +22,6 @@ import (
"github.com/buengese/sgzip"
"github.com/gabriel-vasile/mimetype"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/chunkedreader"
@@ -143,7 +143,7 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
wInfo, wName, wPath, wConfig, err := fs.ConfigFs(remote)
if err != nil {
return nil, errors.Wrapf(err, "failed to parse remote %q to wrap", remote)
return nil, fmt.Errorf("failed to parse remote %q to wrap: %w", remote, err)
}
// Strip trailing slashes if they exist in rpath
@@ -158,7 +158,7 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
wrappedFs, err = wInfo.NewFs(ctx, wName, remotePath, wConfig)
}
if err != nil && err != fs.ErrorIsFile {
return nil, errors.Wrapf(err, "failed to make remote %s:%q to wrap", wName, remotePath)
return nil, fmt.Errorf("failed to make remote %s:%q to wrap: %w", wName, remotePath, err)
}
// Create the wrapping fs
@@ -304,7 +304,7 @@ func (f *Fs) processEntries(entries fs.DirEntries) (newEntries fs.DirEntries, er
case fs.Directory:
f.addDir(&newEntries, x)
default:
return nil, errors.Errorf("Unknown object type %T", entry)
return nil, fmt.Errorf("Unknown object type %T", entry)
}
}
return newEntries, nil
@@ -401,6 +401,10 @@ func isCompressible(r io.Reader) (bool, error) {
if err != nil {
return false, err
}
err = w.Close()
if err != nil {
return false, err
}
ratio := float64(n) / float64(b.Len())
return ratio > minCompressionRatio, nil
}
@@ -410,7 +414,7 @@ func (f *Fs) verifyObjectHash(ctx context.Context, o fs.Object, hasher *hash.Mul
srcHash := hasher.Sums()[ht]
dstHash, err := o.Hash(ctx, ht)
if err != nil {
return errors.Wrap(err, "failed to read destination hash")
return fmt.Errorf("failed to read destination hash: %w", err)
}
if srcHash != "" && dstHash != "" && srcHash != dstHash {
// remove object
@@ -418,7 +422,7 @@ func (f *Fs) verifyObjectHash(ctx context.Context, o fs.Object, hasher *hash.Mul
if err != nil {
fs.Errorf(o, "Failed to remove corrupted object: %v", err)
}
return errors.Errorf("corrupted on transfer: %v compressed hashes differ %q vs %q", ht, srcHash, dstHash)
return fmt.Errorf("corrupted on transfer: %v compressed hashes differ %q vs %q", ht, srcHash, dstHash)
}
return nil
}
@@ -462,10 +466,10 @@ func (f *Fs) rcat(ctx context.Context, dstFileName string, in io.ReadCloser, mod
_ = os.Remove(tempFile.Name())
}()
if err != nil {
return nil, errors.Wrap(err, "Failed to create temporary local FS to spool file")
return nil, fmt.Errorf("Failed to create temporary local FS to spool file: %w", err)
}
if _, err = io.Copy(tempFile, in); err != nil {
return nil, errors.Wrap(err, "Failed to write temporary local file")
return nil, fmt.Errorf("Failed to write temporary local file: %w", err)
}
if _, err = tempFile.Seek(0, 0); err != nil {
return nil, err
@@ -626,9 +630,11 @@ func (f *Fs) putMetadata(ctx context.Context, meta *ObjectMetadata, src fs.Objec
// Put the data
mo, err = put(ctx, metaReader, f.wrapInfo(src, makeMetadataName(src.Remote()), int64(len(data))), options...)
if err != nil {
removeErr := mo.Remove(ctx)
if removeErr != nil {
fs.Errorf(mo, "Failed to remove partially transferred object: %v", err)
if mo != nil {
removeErr := mo.Remove(ctx)
if removeErr != nil {
fs.Errorf(mo, "Failed to remove partially transferred object: %v", err)
}
}
return nil, err
}
@@ -714,7 +720,7 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
if found && (oldObj.(*Object).meta.Mode != Uncompressed || compressible) {
err = oldObj.(*Object).Object.Remove(ctx)
if err != nil {
return nil, errors.Wrap(err, "Could remove original object")
return nil, fmt.Errorf("Could remove original object: %w", err)
}
}
@@ -723,7 +729,7 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
if compressible {
wrapObj, err := operations.Move(ctx, f.Fs, nil, f.dataName(src.Remote(), newObj.size, compressible), newObj.Object)
if err != nil {
return nil, errors.Wrap(err, "Couldn't rename streamed Object.")
return nil, fmt.Errorf("Couldn't rename streamed Object.: %w", err)
}
newObj.Object = wrapObj
}

View File

@@ -7,6 +7,8 @@ import (
gocipher "crypto/cipher"
"crypto/rand"
"encoding/base32"
"encoding/base64"
"errors"
"fmt"
"io"
"strconv"
@@ -15,7 +17,7 @@ import (
"time"
"unicode/utf8"
"github.com/pkg/errors"
"github.com/Max-Sum/base32768"
"github.com/rclone/rclone/backend/crypt/pkcs7"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
@@ -94,12 +96,12 @@ func NewNameEncryptionMode(s string) (mode NameEncryptionMode, err error) {
case "obfuscate":
mode = NameEncryptionObfuscated
default:
err = errors.Errorf("Unknown file name encryption mode %q", s)
err = fmt.Errorf("Unknown file name encryption mode %q", s)
}
return mode, err
}
// String turns mode into a human readable string
// String turns mode into a human-readable string
func (mode NameEncryptionMode) String() (out string) {
switch mode {
case NameEncryptionOff:
@@ -114,6 +116,57 @@ func (mode NameEncryptionMode) String() (out string) {
return out
}
// fileNameEncoding are the encoding methods dealing with encrypted file names
type fileNameEncoding interface {
EncodeToString(src []byte) string
DecodeString(s string) ([]byte, error)
}
// caseInsensitiveBase32Encoding defines a file name encoding
// using a modified version of standard base32 as described in
// RFC4648
//
// The standard encoding is modified in two ways
// * it becomes lower case (no-one likes upper case filenames!)
// * we strip the padding character `=`
type caseInsensitiveBase32Encoding struct{}
// EncodeToString encodes a strign using the modified version of
// base32 encoding.
func (caseInsensitiveBase32Encoding) EncodeToString(src []byte) string {
encoded := base32.HexEncoding.EncodeToString(src)
encoded = strings.TrimRight(encoded, "=")
return strings.ToLower(encoded)
}
// DecodeString decodes a string as encoded by EncodeToString
func (caseInsensitiveBase32Encoding) DecodeString(s string) ([]byte, error) {
if strings.HasSuffix(s, "=") {
return nil, ErrorBadBase32Encoding
}
// First figure out how many padding characters to add
roundUpToMultipleOf8 := (len(s) + 7) &^ 7
equals := roundUpToMultipleOf8 - len(s)
s = strings.ToUpper(s) + "========"[:equals]
return base32.HexEncoding.DecodeString(s)
}
// NewNameEncoding creates a NameEncoding from a string
func NewNameEncoding(s string) (enc fileNameEncoding, err error) {
s = strings.ToLower(s)
switch s {
case "base32":
enc = caseInsensitiveBase32Encoding{}
case "base64":
enc = base64.RawURLEncoding
case "base32768":
enc = base32768.SafeEncoding
default:
err = fmt.Errorf("Unknown file name encoding mode %q", s)
}
return enc, err
}
// Cipher defines an encoding and decoding cipher for the crypt backend
type Cipher struct {
dataKey [32]byte // Key for secretbox
@@ -121,15 +174,17 @@ type Cipher struct {
nameTweak [nameCipherBlockSize]byte // used to tweak the name crypto
block gocipher.Block
mode NameEncryptionMode
fileNameEnc fileNameEncoding
buffers sync.Pool // encrypt/decrypt buffers
cryptoRand io.Reader // read crypto random numbers from here
dirNameEncrypt bool
}
// newCipher initialises the cipher. If salt is "" then it uses a built in salt val
func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bool) (*Cipher, error) {
func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bool, enc fileNameEncoding) (*Cipher, error) {
c := &Cipher{
mode: mode,
fileNameEnc: enc,
cryptoRand: rand.Reader,
dirNameEncrypt: dirNameEncrypt,
}
@@ -187,30 +242,6 @@ func (c *Cipher) putBlock(buf []byte) {
c.buffers.Put(buf)
}
// encodeFileName encodes a filename using a modified version of
// standard base32 as described in RFC4648
//
// The standard encoding is modified in two ways
// * it becomes lower case (no-one likes upper case filenames!)
// * we strip the padding character `=`
func encodeFileName(in []byte) string {
encoded := base32.HexEncoding.EncodeToString(in)
encoded = strings.TrimRight(encoded, "=")
return strings.ToLower(encoded)
}
// decodeFileName decodes a filename as encoded by encodeFileName
func decodeFileName(in string) ([]byte, error) {
if strings.HasSuffix(in, "=") {
return nil, ErrorBadBase32Encoding
}
// First figure out how many padding characters to add
roundUpToMultipleOf8 := (len(in) + 7) &^ 7
equals := roundUpToMultipleOf8 - len(in)
in = strings.ToUpper(in) + "========"[:equals]
return base32.HexEncoding.DecodeString(in)
}
// encryptSegment encrypts a path segment
//
// This uses EME with AES
@@ -231,7 +262,7 @@ func (c *Cipher) encryptSegment(plaintext string) string {
}
paddedPlaintext := pkcs7.Pad(nameCipherBlockSize, []byte(plaintext))
ciphertext := eme.Transform(c.block, c.nameTweak[:], paddedPlaintext, eme.DirectionEncrypt)
return encodeFileName(ciphertext)
return c.fileNameEnc.EncodeToString(ciphertext)
}
// decryptSegment decrypts a path segment
@@ -239,7 +270,7 @@ func (c *Cipher) decryptSegment(ciphertext string) (string, error) {
if ciphertext == "" {
return "", nil
}
rawCiphertext, err := decodeFileName(ciphertext)
rawCiphertext, err := c.fileNameEnc.DecodeString(ciphertext)
if err != nil {
return "", err
}
@@ -580,7 +611,7 @@ func (n *nonce) pointer() *[fileNonceSize]byte {
func (n *nonce) fromReader(in io.Reader) error {
read, err := io.ReadFull(in, (*n)[:])
if read != fileNonceSize {
return errors.Wrap(err, "short read of nonce")
return fmt.Errorf("short read of nonce: %w", err)
}
return nil
}
@@ -956,7 +987,7 @@ func (fh *decrypter) RangeSeek(ctx context.Context, offset int64, whence int, li
// Re-open the underlying object with the offset given
rc, err := fh.open(ctx, underlyingOffset, underlyingLimit)
if err != nil {
return 0, fh.finish(errors.Wrap(err, "couldn't reopen file with offset and limit"))
return 0, fh.finish(fmt.Errorf("couldn't reopen file with offset and limit: %w", err))
}
// Set the file handle

View File

@@ -4,13 +4,15 @@ import (
"bytes"
"context"
"encoding/base32"
"encoding/base64"
"errors"
"fmt"
"io"
"io/ioutil"
"strings"
"testing"
"github.com/pkg/errors"
"github.com/Max-Sum/base32768"
"github.com/rclone/rclone/backend/crypt/pkcs7"
"github.com/rclone/rclone/lib/readers"
"github.com/stretchr/testify/assert"
@@ -45,11 +47,31 @@ func TestNewNameEncryptionModeString(t *testing.T) {
assert.Equal(t, NameEncryptionMode(3).String(), "Unknown mode #3")
}
func TestEncodeFileName(t *testing.T) {
for _, test := range []struct {
in string
expected string
}{
type EncodingTestCase struct {
in string
expected string
}
func testEncodeFileName(t *testing.T, encoding string, testCases []EncodingTestCase, caseInsensitive bool) {
for _, test := range testCases {
enc, err := NewNameEncoding(encoding)
assert.NoError(t, err, "There should be no error creating name encoder for base32.")
actual := enc.EncodeToString([]byte(test.in))
assert.Equal(t, actual, test.expected, fmt.Sprintf("in=%q", test.in))
recovered, err := enc.DecodeString(test.expected)
assert.NoError(t, err)
assert.Equal(t, string(recovered), test.in, fmt.Sprintf("reverse=%q", test.expected))
if caseInsensitive {
in := strings.ToUpper(test.expected)
recovered, err = enc.DecodeString(in)
assert.NoError(t, err)
assert.Equal(t, string(recovered), test.in, fmt.Sprintf("reverse=%q", in))
}
}
}
func TestEncodeFileNameBase32(t *testing.T) {
testEncodeFileName(t, "base32", []EncodingTestCase{
{"", ""},
{"1", "64"},
{"12", "64p0"},
@@ -67,20 +89,56 @@ func TestEncodeFileName(t *testing.T) {
{"12345678901234", "64p36d1l6orjge9g64p36d0"},
{"123456789012345", "64p36d1l6orjge9g64p36d1l"},
{"1234567890123456", "64p36d1l6orjge9g64p36d1l6o"},
} {
actual := encodeFileName([]byte(test.in))
assert.Equal(t, actual, test.expected, fmt.Sprintf("in=%q", test.in))
recovered, err := decodeFileName(test.expected)
assert.NoError(t, err)
assert.Equal(t, string(recovered), test.in, fmt.Sprintf("reverse=%q", test.expected))
in := strings.ToUpper(test.expected)
recovered, err = decodeFileName(in)
assert.NoError(t, err)
assert.Equal(t, string(recovered), test.in, fmt.Sprintf("reverse=%q", in))
}
}, true)
}
func TestDecodeFileName(t *testing.T) {
func TestEncodeFileNameBase64(t *testing.T) {
testEncodeFileName(t, "base64", []EncodingTestCase{
{"", ""},
{"1", "MQ"},
{"12", "MTI"},
{"123", "MTIz"},
{"1234", "MTIzNA"},
{"12345", "MTIzNDU"},
{"123456", "MTIzNDU2"},
{"1234567", "MTIzNDU2Nw"},
{"12345678", "MTIzNDU2Nzg"},
{"123456789", "MTIzNDU2Nzg5"},
{"1234567890", "MTIzNDU2Nzg5MA"},
{"12345678901", "MTIzNDU2Nzg5MDE"},
{"123456789012", "MTIzNDU2Nzg5MDEy"},
{"1234567890123", "MTIzNDU2Nzg5MDEyMw"},
{"12345678901234", "MTIzNDU2Nzg5MDEyMzQ"},
{"123456789012345", "MTIzNDU2Nzg5MDEyMzQ1"},
{"1234567890123456", "MTIzNDU2Nzg5MDEyMzQ1Ng"},
}, false)
}
func TestEncodeFileNameBase32768(t *testing.T) {
testEncodeFileName(t, "base32768", []EncodingTestCase{
{"", ""},
{"1", "㼿"},
{"12", "㻙ɟ"},
{"123", "㻙ⲿ"},
{"1234", "㻙ⲍƟ"},
{"12345", "㻙ⲍ⍟"},
{"123456", "㻙ⲍ⍆ʏ"},
{"1234567", "㻙ⲍ⍆觟"},
{"12345678", "㻙ⲍ⍆觓ɧ"},
{"123456789", "㻙ⲍ⍆觓栯"},
{"1234567890", "㻙ⲍ⍆觓栩ɣ"},
{"12345678901", "㻙ⲍ⍆觓栩朧"},
{"123456789012", "㻙ⲍ⍆觓栩朤ʅ"},
{"1234567890123", "㻙ⲍ⍆觓栩朤談"},
{"12345678901234", "㻙ⲍ⍆觓栩朤諆ɔ"},
{"123456789012345", "㻙ⲍ⍆觓栩朤諆媕"},
{"1234567890123456", "㻙ⲍ⍆觓栩朤諆媕䆿"},
}, false)
}
func TestDecodeFileNameBase32(t *testing.T) {
enc, err := NewNameEncoding("base32")
assert.NoError(t, err, "There should be no error creating name encoder for base32.")
// We've tested decoding the valid ones above, now concentrate on the invalid ones
for _, test := range []struct {
in string
@@ -90,17 +148,65 @@ func TestDecodeFileName(t *testing.T) {
{"!", base32.CorruptInputError(0)},
{"hello=hello", base32.CorruptInputError(5)},
} {
actual, actualErr := decodeFileName(test.in)
actual, actualErr := enc.DecodeString(test.in)
assert.Equal(t, test.expectedErr, actualErr, fmt.Sprintf("in=%q got actual=%q, err = %v %T", test.in, actual, actualErr, actualErr))
}
}
func TestEncryptSegment(t *testing.T) {
c, _ := newCipher(NameEncryptionStandard, "", "", true)
func TestDecodeFileNameBase64(t *testing.T) {
enc, err := NewNameEncoding("base64")
assert.NoError(t, err, "There should be no error creating name encoder for base32.")
// We've tested decoding the valid ones above, now concentrate on the invalid ones
for _, test := range []struct {
in string
expected string
in string
expectedErr error
}{
{"64=", base64.CorruptInputError(2)},
{"!", base64.CorruptInputError(0)},
{"Hello=Hello", base64.CorruptInputError(5)},
} {
actual, actualErr := enc.DecodeString(test.in)
assert.Equal(t, test.expectedErr, actualErr, fmt.Sprintf("in=%q got actual=%q, err = %v %T", test.in, actual, actualErr, actualErr))
}
}
func TestDecodeFileNameBase32768(t *testing.T) {
enc, err := NewNameEncoding("base32768")
assert.NoError(t, err, "There should be no error creating name encoder for base32.")
// We've tested decoding the valid ones above, now concentrate on the invalid ones
for _, test := range []struct {
in string
expectedErr error
}{
{"㼿c", base32768.CorruptInputError(1)},
{"!", base32768.CorruptInputError(0)},
{"㻙ⲿ=㻙ⲿ", base32768.CorruptInputError(2)},
} {
actual, actualErr := enc.DecodeString(test.in)
assert.Equal(t, test.expectedErr, actualErr, fmt.Sprintf("in=%q got actual=%q, err = %v %T", test.in, actual, actualErr, actualErr))
}
}
func testEncryptSegment(t *testing.T, encoding string, testCases []EncodingTestCase, caseInsensitive bool) {
enc, _ := NewNameEncoding(encoding)
c, _ := newCipher(NameEncryptionStandard, "", "", true, enc)
for _, test := range testCases {
actual := c.encryptSegment(test.in)
assert.Equal(t, test.expected, actual, fmt.Sprintf("Testing %q", test.in))
recovered, err := c.decryptSegment(test.expected)
assert.NoError(t, err, fmt.Sprintf("Testing reverse %q", test.expected))
assert.Equal(t, test.in, recovered, fmt.Sprintf("Testing reverse %q", test.expected))
if caseInsensitive {
in := strings.ToUpper(test.expected)
recovered, err = c.decryptSegment(in)
assert.NoError(t, err, fmt.Sprintf("Testing reverse %q", in))
assert.Equal(t, test.in, recovered, fmt.Sprintf("Testing reverse %q", in))
}
}
}
func TestEncryptSegmentBase32(t *testing.T) {
testEncryptSegment(t, "base32", []EncodingTestCase{
{"", ""},
{"1", "p0e52nreeaj0a5ea7s64m4j72s"},
{"12", "l42g6771hnv3an9cgc8cr2n1ng"},
@@ -118,26 +224,61 @@ func TestEncryptSegment(t *testing.T) {
{"12345678901234", "moq0uqdlqrblrc5pa5u5c7hq9g"},
{"123456789012345", "eeam3li4rnommi3a762h5n7meg"},
{"1234567890123456", "mijbj0frqf6ms7frcr6bd9h0env53jv96pjaaoirk7forcgpt70g"},
} {
actual := c.encryptSegment(test.in)
assert.Equal(t, test.expected, actual, fmt.Sprintf("Testing %q", test.in))
recovered, err := c.decryptSegment(test.expected)
assert.NoError(t, err, fmt.Sprintf("Testing reverse %q", test.expected))
assert.Equal(t, test.in, recovered, fmt.Sprintf("Testing reverse %q", test.expected))
in := strings.ToUpper(test.expected)
recovered, err = c.decryptSegment(in)
assert.NoError(t, err, fmt.Sprintf("Testing reverse %q", in))
assert.Equal(t, test.in, recovered, fmt.Sprintf("Testing reverse %q", in))
}
}, true)
}
func TestDecryptSegment(t *testing.T) {
func TestEncryptSegmentBase64(t *testing.T) {
testEncryptSegment(t, "base64", []EncodingTestCase{
{"", ""},
{"1", "yBxRX25ypgUVyj8MSxJnFw"},
{"12", "qQUDHOGN_jVdLIMQzYrhvA"},
{"123", "1CxFf2Mti1xIPYlGruDh-A"},
{"1234", "RL-xOTmsxsG7kuTy2XJUxw"},
{"12345", "3FP_GHoeBJdq0yLgaED8IQ"},
{"123456", "Xc4T1Gqrs3OVYnrE6dpEWQ"},
{"1234567", "uZeEzssOnDWHEOzLqjwpog"},
{"12345678", "8noiTP5WkkbEuijsPhOpxQ"},
{"123456789", "GeNxgLA0wiaGAKU3U7qL4Q"},
{"1234567890", "x1DUhdmqoVWYVBLD3dha-A"},
{"12345678901", "iEyP_3BZR6vvv_2WM6NbZw"},
{"123456789012", "4OPGvS4SZdjvS568APUaFw"},
{"1234567890123", "Y8c5Wr8OhYYUo7fPwdojdg"},
{"12345678901234", "tjQPabXW112wuVF8Vh46TA"},
{"123456789012345", "c5Vh1kTd8WtIajmFEtz2dA"},
{"1234567890123456", "tKa5gfvTzW4d-2bMtqYgdf5Rz-k2ZqViW6HfjbIZ6cE"},
}, false)
}
func TestEncryptSegmentBase32768(t *testing.T) {
testEncryptSegment(t, "base32768", []EncodingTestCase{
{"", ""},
{"1", "詮㪗鐮僀伎作㻖㢧⪟"},
{"12", "竢朧䉱虃光塬䟛⣡蓟"},
{"123", "遶㞟鋅缕袡鲅ⵝ蝁ꌟ"},
{"1234", "䢟銮䵵狌㐜燳谒颴詟"},
{"12345", "钉Ꞇ㖃蚩憶狫朰杜㜿"},
{"123456", "啇ᚵⵕ憗䋫➫➓肤卟"},
{"1234567", "茫螓翁連劘樓㶔抉矟"},
{"12345678", "龝☳䘊辄岅較络㧩襟"},
{"123456789", "ⲱ苀㱆犂媐Ꮤ锇惫靟"},
{"1234567890", "計宁憕偵匢皫╛纺ꌟ"},
{"12345678901", "檆䨿鑫㪺藝ꡖ勇䦛婟"},
{"123456789012", "雑頏䰂䲝淚哚鹡魺⪟"},
{"1234567890123", "塃璶繁躸圅㔟䗃肃懟"},
{"12345678901234", "腺ᕚ崚鏕鏥讥鼌䑺䲿"},
{"123456789012345", "怪绕滻蕶肣但⠥荖惟"},
{"1234567890123456", "肳哀旚挶靏鏻㾭䱠慟㪳ꏆ賊兲铧敻塹魀ʟ"},
}, false)
}
func TestDecryptSegmentBase32(t *testing.T) {
// We've tested the forwards above, now concentrate on the errors
longName := make([]byte, 3328)
for i := range longName {
longName[i] = 'a'
}
c, _ := newCipher(NameEncryptionStandard, "", "", true)
enc, _ := NewNameEncoding("base32")
c, _ := newCipher(NameEncryptionStandard, "", "", true, enc)
for _, test := range []struct {
in string
expectedErr error
@@ -145,118 +286,371 @@ func TestDecryptSegment(t *testing.T) {
{"64=", ErrorBadBase32Encoding},
{"!", base32.CorruptInputError(0)},
{string(longName), ErrorTooLongAfterDecode},
{encodeFileName([]byte("a")), ErrorNotAMultipleOfBlocksize},
{encodeFileName([]byte("123456789abcdef")), ErrorNotAMultipleOfBlocksize},
{encodeFileName([]byte("123456789abcdef0")), pkcs7.ErrorPaddingTooLong},
{enc.EncodeToString([]byte("a")), ErrorNotAMultipleOfBlocksize},
{enc.EncodeToString([]byte("123456789abcdef")), ErrorNotAMultipleOfBlocksize},
{enc.EncodeToString([]byte("123456789abcdef0")), pkcs7.ErrorPaddingTooLong},
} {
actual, actualErr := c.decryptSegment(test.in)
assert.Equal(t, test.expectedErr, actualErr, fmt.Sprintf("in=%q got actual=%q, err = %v %T", test.in, actual, actualErr, actualErr))
}
}
func TestEncryptFileName(t *testing.T) {
func TestDecryptSegmentBase64(t *testing.T) {
// We've tested the forwards above, now concentrate on the errors
longName := make([]byte, 2816)
for i := range longName {
longName[i] = 'a'
}
enc, _ := NewNameEncoding("base64")
c, _ := newCipher(NameEncryptionStandard, "", "", true, enc)
for _, test := range []struct {
in string
expectedErr error
}{
{"6H=", base64.CorruptInputError(2)},
{"!", base64.CorruptInputError(0)},
{string(longName), ErrorTooLongAfterDecode},
{enc.EncodeToString([]byte("a")), ErrorNotAMultipleOfBlocksize},
{enc.EncodeToString([]byte("123456789abcdef")), ErrorNotAMultipleOfBlocksize},
{enc.EncodeToString([]byte("123456789abcdef0")), pkcs7.ErrorPaddingTooLong},
} {
actual, actualErr := c.decryptSegment(test.in)
assert.Equal(t, test.expectedErr, actualErr, fmt.Sprintf("in=%q got actual=%q, err = %v %T", test.in, actual, actualErr, actualErr))
}
}
func TestDecryptSegmentBase32768(t *testing.T) {
// We've tested the forwards above, now concentrate on the errors
longName := strings.Repeat("怪", 1280)
enc, _ := NewNameEncoding("base32768")
c, _ := newCipher(NameEncryptionStandard, "", "", true, enc)
for _, test := range []struct {
in string
expectedErr error
}{
{"怪=", base32768.CorruptInputError(1)},
{"!", base32768.CorruptInputError(0)},
{longName, ErrorTooLongAfterDecode},
{enc.EncodeToString([]byte("a")), ErrorNotAMultipleOfBlocksize},
{enc.EncodeToString([]byte("123456789abcdef")), ErrorNotAMultipleOfBlocksize},
{enc.EncodeToString([]byte("123456789abcdef0")), pkcs7.ErrorPaddingTooLong},
} {
actual, actualErr := c.decryptSegment(test.in)
assert.Equal(t, test.expectedErr, actualErr, fmt.Sprintf("in=%q got actual=%q, err = %v %T", test.in, actual, actualErr, actualErr))
}
}
func testStandardEncryptFileName(t *testing.T, encoding string, testCasesEncryptDir []EncodingTestCase, testCasesNoEncryptDir []EncodingTestCase) {
// First standard mode
c, _ := newCipher(NameEncryptionStandard, "", "", true)
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s", c.EncryptFileName("1"))
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng", c.EncryptFileName("1/12"))
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng/qgm4avr35m5loi1th53ato71v0", c.EncryptFileName("1/12/123"))
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s-v2001-02-03-040506-123", c.EncryptFileName("1-v2001-02-03-040506-123"))
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng-v2001-02-03-040506-123", c.EncryptFileName("1/12-v2001-02-03-040506-123"))
enc, _ := NewNameEncoding(encoding)
c, _ := newCipher(NameEncryptionStandard, "", "", true, enc)
for _, test := range testCasesEncryptDir {
assert.Equal(t, test.expected, c.EncryptFileName(test.in))
}
// Standard mode with directory name encryption off
c, _ = newCipher(NameEncryptionStandard, "", "", false)
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s", c.EncryptFileName("1"))
assert.Equal(t, "1/l42g6771hnv3an9cgc8cr2n1ng", c.EncryptFileName("1/12"))
assert.Equal(t, "1/12/qgm4avr35m5loi1th53ato71v0", c.EncryptFileName("1/12/123"))
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s-v2001-02-03-040506-123", c.EncryptFileName("1-v2001-02-03-040506-123"))
assert.Equal(t, "1/l42g6771hnv3an9cgc8cr2n1ng-v2001-02-03-040506-123", c.EncryptFileName("1/12-v2001-02-03-040506-123"))
// Now off mode
c, _ = newCipher(NameEncryptionOff, "", "", true)
c, _ = newCipher(NameEncryptionStandard, "", "", false, enc)
for _, test := range testCasesNoEncryptDir {
assert.Equal(t, test.expected, c.EncryptFileName(test.in))
}
}
func TestStandardEncryptFileNameBase32(t *testing.T) {
testStandardEncryptFileName(t, "base32", []EncodingTestCase{
{"1", "p0e52nreeaj0a5ea7s64m4j72s"},
{"1/12", "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng"},
{"1/12/123", "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng/qgm4avr35m5loi1th53ato71v0"},
{"1-v2001-02-03-040506-123", "p0e52nreeaj0a5ea7s64m4j72s-v2001-02-03-040506-123"},
{"1/12-v2001-02-03-040506-123", "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng-v2001-02-03-040506-123"},
}, []EncodingTestCase{
{"1", "p0e52nreeaj0a5ea7s64m4j72s"},
{"1/12", "1/l42g6771hnv3an9cgc8cr2n1ng"},
{"1/12/123", "1/12/qgm4avr35m5loi1th53ato71v0"},
{"1-v2001-02-03-040506-123", "p0e52nreeaj0a5ea7s64m4j72s-v2001-02-03-040506-123"},
{"1/12-v2001-02-03-040506-123", "1/l42g6771hnv3an9cgc8cr2n1ng-v2001-02-03-040506-123"},
})
}
func TestStandardEncryptFileNameBase64(t *testing.T) {
testStandardEncryptFileName(t, "base64", []EncodingTestCase{
{"1", "yBxRX25ypgUVyj8MSxJnFw"},
{"1/12", "yBxRX25ypgUVyj8MSxJnFw/qQUDHOGN_jVdLIMQzYrhvA"},
{"1/12/123", "yBxRX25ypgUVyj8MSxJnFw/qQUDHOGN_jVdLIMQzYrhvA/1CxFf2Mti1xIPYlGruDh-A"},
{"1-v2001-02-03-040506-123", "yBxRX25ypgUVyj8MSxJnFw-v2001-02-03-040506-123"},
{"1/12-v2001-02-03-040506-123", "yBxRX25ypgUVyj8MSxJnFw/qQUDHOGN_jVdLIMQzYrhvA-v2001-02-03-040506-123"},
}, []EncodingTestCase{
{"1", "yBxRX25ypgUVyj8MSxJnFw"},
{"1/12", "1/qQUDHOGN_jVdLIMQzYrhvA"},
{"1/12/123", "1/12/1CxFf2Mti1xIPYlGruDh-A"},
{"1-v2001-02-03-040506-123", "yBxRX25ypgUVyj8MSxJnFw-v2001-02-03-040506-123"},
{"1/12-v2001-02-03-040506-123", "1/qQUDHOGN_jVdLIMQzYrhvA-v2001-02-03-040506-123"},
})
}
func TestStandardEncryptFileNameBase32768(t *testing.T) {
testStandardEncryptFileName(t, "base32768", []EncodingTestCase{
{"1", "詮㪗鐮僀伎作㻖㢧⪟"},
{"1/12", "詮㪗鐮僀伎作㻖㢧⪟/竢朧䉱虃光塬䟛⣡蓟"},
{"1/12/123", "詮㪗鐮僀伎作㻖㢧⪟/竢朧䉱虃光塬䟛⣡蓟/遶㞟鋅缕袡鲅ⵝ蝁ꌟ"},
{"1-v2001-02-03-040506-123", "詮㪗鐮僀伎作㻖㢧⪟-v2001-02-03-040506-123"},
{"1/12-v2001-02-03-040506-123", "詮㪗鐮僀伎作㻖㢧⪟/竢朧䉱虃光塬䟛⣡蓟-v2001-02-03-040506-123"},
}, []EncodingTestCase{
{"1", "詮㪗鐮僀伎作㻖㢧⪟"},
{"1/12", "1/竢朧䉱虃光塬䟛⣡蓟"},
{"1/12/123", "1/12/遶㞟鋅缕袡鲅ⵝ蝁ꌟ"},
{"1-v2001-02-03-040506-123", "詮㪗鐮僀伎作㻖㢧⪟-v2001-02-03-040506-123"},
{"1/12-v2001-02-03-040506-123", "1/竢朧䉱虃光塬䟛⣡蓟-v2001-02-03-040506-123"},
})
}
func TestNonStandardEncryptFileName(t *testing.T) {
// Off mode
c, _ := newCipher(NameEncryptionOff, "", "", true, nil)
assert.Equal(t, "1/12/123.bin", c.EncryptFileName("1/12/123"))
// Obfuscation mode
c, _ = newCipher(NameEncryptionObfuscated, "", "", true)
c, _ = newCipher(NameEncryptionObfuscated, "", "", true, nil)
assert.Equal(t, "49.6/99.23/150.890/53.!!lipps", c.EncryptFileName("1/12/123/!hello"))
assert.Equal(t, "49.6/99.23/150.890/53-v2001-02-03-040506-123.!!lipps", c.EncryptFileName("1/12/123/!hello-v2001-02-03-040506-123"))
assert.Equal(t, "49.6/99.23/150.890/162.uryyB-v2001-02-03-040506-123.GKG", c.EncryptFileName("1/12/123/hello-v2001-02-03-040506-123.txt"))
assert.Equal(t, "161.\u00e4", c.EncryptFileName("\u00a1"))
assert.Equal(t, "160.\u03c2", c.EncryptFileName("\u03a0"))
// Obfuscation mode with directory name encryption off
c, _ = newCipher(NameEncryptionObfuscated, "", "", false)
c, _ = newCipher(NameEncryptionObfuscated, "", "", false, nil)
assert.Equal(t, "1/12/123/53.!!lipps", c.EncryptFileName("1/12/123/!hello"))
assert.Equal(t, "1/12/123/53-v2001-02-03-040506-123.!!lipps", c.EncryptFileName("1/12/123/!hello-v2001-02-03-040506-123"))
assert.Equal(t, "161.\u00e4", c.EncryptFileName("\u00a1"))
assert.Equal(t, "160.\u03c2", c.EncryptFileName("\u03a0"))
}
func TestDecryptFileName(t *testing.T) {
for _, test := range []struct {
mode NameEncryptionMode
dirNameEncrypt bool
in string
expected string
expectedErr error
}{
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s", "1", nil},
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng", "1/12", nil},
{NameEncryptionStandard, true, "p0e52nreeAJ0A5EA7S64M4J72S/L42G6771HNv3an9cgc8cr2n1ng", "1/12", nil},
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng/qgm4avr35m5loi1th53ato71v0", "1/12/123", nil},
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1/qgm4avr35m5loi1th53ato71v0", "", ErrorNotAMultipleOfBlocksize},
{NameEncryptionStandard, false, "1/12/qgm4avr35m5loi1th53ato71v0", "1/12/123", nil},
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s-v2001-02-03-040506-123", "1-v2001-02-03-040506-123", nil},
{NameEncryptionOff, true, "1/12/123.bin", "1/12/123", nil},
{NameEncryptionOff, true, "1/12/123.bix", "", ErrorNotAnEncryptedFile},
{NameEncryptionOff, true, ".bin", "", ErrorNotAnEncryptedFile},
{NameEncryptionOff, true, "1/12/123-v2001-02-03-040506-123.bin", "1/12/123-v2001-02-03-040506-123", nil},
{NameEncryptionOff, true, "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.bin", "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123", nil},
{NameEncryptionOff, true, "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.txt.bin", "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.txt", nil},
{NameEncryptionObfuscated, true, "!.hello", "hello", nil},
{NameEncryptionObfuscated, true, "hello", "", ErrorNotAnEncryptedFile},
{NameEncryptionObfuscated, true, "161.\u00e4", "\u00a1", nil},
{NameEncryptionObfuscated, true, "160.\u03c2", "\u03a0", nil},
{NameEncryptionObfuscated, false, "1/12/123/53.!!lipps", "1/12/123/!hello", nil},
{NameEncryptionObfuscated, false, "1/12/123/53-v2001-02-03-040506-123.!!lipps", "1/12/123/!hello-v2001-02-03-040506-123", nil},
} {
c, _ := newCipher(test.mode, "", "", test.dirNameEncrypt)
func testStandardDecryptFileName(t *testing.T, encoding string, testCases []EncodingTestCase, caseInsensitive bool) {
enc, _ := NewNameEncoding(encoding)
for _, test := range testCases {
// Test when dirNameEncrypt=true
c, _ := newCipher(NameEncryptionStandard, "", "", true, enc)
actual, actualErr := c.DecryptFileName(test.in)
what := fmt.Sprintf("Testing %q (mode=%v)", test.in, test.mode)
assert.Equal(t, test.expected, actual, what)
assert.Equal(t, test.expectedErr, actualErr, what)
assert.NoError(t, actualErr)
assert.Equal(t, test.expected, actual)
if caseInsensitive {
c, _ := newCipher(NameEncryptionStandard, "", "", true, enc)
actual, actualErr := c.DecryptFileName(strings.ToUpper(test.in))
assert.NoError(t, actualErr)
assert.Equal(t, test.expected, actual)
}
// Add a character should raise ErrorNotAMultipleOfBlocksize
actual, actualErr = c.DecryptFileName(enc.EncodeToString([]byte("1")) + test.in)
assert.Equal(t, ErrorNotAMultipleOfBlocksize, actualErr)
assert.Equal(t, "", actual)
// Test when dirNameEncrypt=false
noDirEncryptIn := test.in
if strings.LastIndex(test.expected, "/") != -1 {
noDirEncryptIn = test.expected[:strings.LastIndex(test.expected, "/")] + test.in[strings.LastIndex(test.in, "/"):]
}
c, _ = newCipher(NameEncryptionStandard, "", "", false, enc)
actual, actualErr = c.DecryptFileName(noDirEncryptIn)
assert.NoError(t, actualErr)
assert.Equal(t, test.expected, actual)
}
}
func TestStandardDecryptFileNameBase32(t *testing.T) {
testStandardDecryptFileName(t, "base32", []EncodingTestCase{
{"p0e52nreeaj0a5ea7s64m4j72s", "1"},
{"p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng", "1/12"},
{"p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng/qgm4avr35m5loi1th53ato71v0", "1/12/123"},
}, true)
}
func TestStandardDecryptFileNameBase64(t *testing.T) {
testStandardDecryptFileName(t, "base64", []EncodingTestCase{
{"yBxRX25ypgUVyj8MSxJnFw", "1"},
{"yBxRX25ypgUVyj8MSxJnFw/qQUDHOGN_jVdLIMQzYrhvA", "1/12"},
{"yBxRX25ypgUVyj8MSxJnFw/qQUDHOGN_jVdLIMQzYrhvA/1CxFf2Mti1xIPYlGruDh-A", "1/12/123"},
}, false)
}
func TestStandardDecryptFileNameBase32768(t *testing.T) {
testStandardDecryptFileName(t, "base32768", []EncodingTestCase{
{"詮㪗鐮僀伎作㻖㢧⪟", "1"},
{"詮㪗鐮僀伎作㻖㢧⪟/竢朧䉱虃光塬䟛⣡蓟", "1/12"},
{"詮㪗鐮僀伎作㻖㢧⪟/竢朧䉱虃光塬䟛⣡蓟/遶㞟鋅缕袡鲅ⵝ蝁ꌟ", "1/12/123"},
}, false)
}
func TestNonStandardDecryptFileName(t *testing.T) {
for _, encoding := range []string{"base32", "base64", "base32768"} {
enc, _ := NewNameEncoding(encoding)
for _, test := range []struct {
mode NameEncryptionMode
dirNameEncrypt bool
in string
expected string
expectedErr error
}{
{NameEncryptionOff, true, "1/12/123.bin", "1/12/123", nil},
{NameEncryptionOff, true, "1/12/123.bix", "", ErrorNotAnEncryptedFile},
{NameEncryptionOff, true, ".bin", "", ErrorNotAnEncryptedFile},
{NameEncryptionOff, true, "1/12/123-v2001-02-03-040506-123.bin", "1/12/123-v2001-02-03-040506-123", nil},
{NameEncryptionOff, true, "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.bin", "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123", nil},
{NameEncryptionOff, true, "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.txt.bin", "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.txt", nil},
{NameEncryptionObfuscated, true, "!.hello", "hello", nil},
{NameEncryptionObfuscated, true, "hello", "", ErrorNotAnEncryptedFile},
{NameEncryptionObfuscated, true, "161.\u00e4", "\u00a1", nil},
{NameEncryptionObfuscated, true, "160.\u03c2", "\u03a0", nil},
{NameEncryptionObfuscated, false, "1/12/123/53.!!lipps", "1/12/123/!hello", nil},
{NameEncryptionObfuscated, false, "1/12/123/53-v2001-02-03-040506-123.!!lipps", "1/12/123/!hello-v2001-02-03-040506-123", nil},
} {
c, _ := newCipher(test.mode, "", "", test.dirNameEncrypt, enc)
actual, actualErr := c.DecryptFileName(test.in)
what := fmt.Sprintf("Testing %q (mode=%v)", test.in, test.mode)
assert.Equal(t, test.expected, actual, what)
assert.Equal(t, test.expectedErr, actualErr, what)
}
}
}
func TestEncDecMatches(t *testing.T) {
for _, test := range []struct {
mode NameEncryptionMode
in string
}{
{NameEncryptionStandard, "1/2/3/4"},
{NameEncryptionOff, "1/2/3/4"},
{NameEncryptionObfuscated, "1/2/3/4/!hello\u03a0"},
{NameEncryptionObfuscated, "Avatar The Last Airbender"},
} {
c, _ := newCipher(test.mode, "", "", true)
out, err := c.DecryptFileName(c.EncryptFileName(test.in))
what := fmt.Sprintf("Testing %q (mode=%v)", test.in, test.mode)
assert.Equal(t, out, test.in, what)
assert.Equal(t, err, nil, what)
for _, encoding := range []string{"base32", "base64", "base32768"} {
enc, _ := NewNameEncoding(encoding)
for _, test := range []struct {
mode NameEncryptionMode
in string
}{
{NameEncryptionStandard, "1/2/3/4"},
{NameEncryptionOff, "1/2/3/4"},
{NameEncryptionObfuscated, "1/2/3/4/!hello\u03a0"},
{NameEncryptionObfuscated, "Avatar The Last Airbender"},
} {
c, _ := newCipher(test.mode, "", "", true, enc)
out, err := c.DecryptFileName(c.EncryptFileName(test.in))
what := fmt.Sprintf("Testing %q (mode=%v)", test.in, test.mode)
assert.Equal(t, out, test.in, what)
assert.Equal(t, err, nil, what)
}
}
}
func TestEncryptDirName(t *testing.T) {
func testStandardEncryptDirName(t *testing.T, encoding string, testCases []EncodingTestCase) {
enc, _ := NewNameEncoding(encoding)
c, _ := newCipher(NameEncryptionStandard, "", "", true, enc)
// First standard mode
c, _ := newCipher(NameEncryptionStandard, "", "", true)
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s", c.EncryptDirName("1"))
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng", c.EncryptDirName("1/12"))
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng/qgm4avr35m5loi1th53ato71v0", c.EncryptDirName("1/12/123"))
// Standard mode with dir name encryption off
c, _ = newCipher(NameEncryptionStandard, "", "", false)
assert.Equal(t, "1/12", c.EncryptDirName("1/12"))
assert.Equal(t, "1/12/123", c.EncryptDirName("1/12/123"))
// Now off mode
c, _ = newCipher(NameEncryptionOff, "", "", true)
assert.Equal(t, "1/12/123", c.EncryptDirName("1/12/123"))
for _, test := range testCases {
assert.Equal(t, test.expected, c.EncryptDirName(test.in))
}
}
func TestDecryptDirName(t *testing.T) {
func TestStandardEncryptDirNameBase32(t *testing.T) {
testStandardEncryptDirName(t, "base32", []EncodingTestCase{
{"1", "p0e52nreeaj0a5ea7s64m4j72s"},
{"1/12", "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng"},
{"1/12/123", "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng/qgm4avr35m5loi1th53ato71v0"},
})
}
func TestStandardEncryptDirNameBase64(t *testing.T) {
testStandardEncryptDirName(t, "base64", []EncodingTestCase{
{"1", "yBxRX25ypgUVyj8MSxJnFw"},
{"1/12", "yBxRX25ypgUVyj8MSxJnFw/qQUDHOGN_jVdLIMQzYrhvA"},
{"1/12/123", "yBxRX25ypgUVyj8MSxJnFw/qQUDHOGN_jVdLIMQzYrhvA/1CxFf2Mti1xIPYlGruDh-A"},
})
}
func TestStandardEncryptDirNameBase32768(t *testing.T) {
testStandardEncryptDirName(t, "base32768", []EncodingTestCase{
{"1", "詮㪗鐮僀伎作㻖㢧⪟"},
{"1/12", "詮㪗鐮僀伎作㻖㢧⪟/竢朧䉱虃光塬䟛⣡蓟"},
{"1/12/123", "詮㪗鐮僀伎作㻖㢧⪟/竢朧䉱虃光塬䟛⣡蓟/遶㞟鋅缕袡鲅ⵝ蝁ꌟ"},
})
}
func TestNonStandardEncryptDirName(t *testing.T) {
for _, encoding := range []string{"base32", "base64", "base32768"} {
enc, _ := NewNameEncoding(encoding)
c, _ := newCipher(NameEncryptionStandard, "", "", false, enc)
assert.Equal(t, "1/12", c.EncryptDirName("1/12"))
assert.Equal(t, "1/12/123", c.EncryptDirName("1/12/123"))
// Now off mode
c, _ = newCipher(NameEncryptionOff, "", "", true, enc)
assert.Equal(t, "1/12/123", c.EncryptDirName("1/12/123"))
}
}
func testStandardDecryptDirName(t *testing.T, encoding string, testCases []EncodingTestCase, caseInsensitive bool) {
enc, _ := NewNameEncoding(encoding)
for _, test := range testCases {
// Test dirNameEncrypt=true
c, _ := newCipher(NameEncryptionStandard, "", "", true, enc)
actual, actualErr := c.DecryptDirName(test.in)
assert.Equal(t, test.expected, actual)
assert.NoError(t, actualErr)
if caseInsensitive {
actual, actualErr := c.DecryptDirName(strings.ToUpper(test.in))
assert.Equal(t, actual, test.expected)
assert.NoError(t, actualErr)
}
actual, actualErr = c.DecryptDirName(enc.EncodeToString([]byte("1")) + test.in)
assert.Equal(t, "", actual)
assert.Equal(t, ErrorNotAMultipleOfBlocksize, actualErr)
// Test dirNameEncrypt=false
c, _ = newCipher(NameEncryptionStandard, "", "", false, enc)
actual, actualErr = c.DecryptDirName(test.in)
assert.Equal(t, test.in, actual)
assert.NoError(t, actualErr)
actual, actualErr = c.DecryptDirName(test.expected)
assert.Equal(t, test.expected, actual)
assert.NoError(t, actualErr)
// Test dirNameEncrypt=false
}
}
/*
enc, _ := NewNameEncoding(encoding)
for _, test := range []struct {
mode NameEncryptionMode
dirNameEncrypt bool
in string
expected string
expectedErr error
}{
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s", "1", nil},
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng", "1/12", nil},
{NameEncryptionStandard, true, "p0e52nreeAJ0A5EA7S64M4J72S/L42G6771HNv3an9cgc8cr2n1ng", "1/12", nil},
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng/qgm4avr35m5loi1th53ato71v0", "1/12/123", nil},
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1/qgm4avr35m5loi1th53ato71v0", "", ErrorNotAMultipleOfBlocksize},
{NameEncryptionStandard, false, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng", "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng", nil},
{NameEncryptionStandard, false, "1/12/123", "1/12/123", nil},
} {
c, _ := newCipher(test.mode, "", "", test.dirNameEncrypt, enc)
actual, actualErr := c.DecryptDirName(test.in)
what := fmt.Sprintf("Testing %q (mode=%v)", test.in, test.mode)
assert.Equal(t, test.expected, actual, what)
assert.Equal(t, test.expectedErr, actualErr, what)
}
*/
func TestStandardDecryptDirNameBase32(t *testing.T) {
testStandardDecryptDirName(t, "base32", []EncodingTestCase{
{"p0e52nreeaj0a5ea7s64m4j72s", "1"},
{"p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng", "1/12"},
{"p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng/qgm4avr35m5loi1th53ato71v0", "1/12/123"},
}, true)
}
func TestStandardDecryptDirNameBase64(t *testing.T) {
testStandardDecryptDirName(t, "base64", []EncodingTestCase{
{"yBxRX25ypgUVyj8MSxJnFw", "1"},
{"yBxRX25ypgUVyj8MSxJnFw/qQUDHOGN_jVdLIMQzYrhvA", "1/12"},
{"yBxRX25ypgUVyj8MSxJnFw/qQUDHOGN_jVdLIMQzYrhvA/1CxFf2Mti1xIPYlGruDh-A", "1/12/123"},
}, false)
}
func TestStandardDecryptDirNameBase32768(t *testing.T) {
testStandardDecryptDirName(t, "base32768", []EncodingTestCase{
{"詮㪗鐮僀伎作㻖㢧⪟", "1"},
{"詮㪗鐮僀伎作㻖㢧⪟/竢朧䉱虃光塬䟛⣡蓟", "1/12"},
{"詮㪗鐮僀伎作㻖㢧⪟/竢朧䉱虃光塬䟛⣡蓟/遶㞟鋅缕袡鲅ⵝ蝁ꌟ", "1/12/123"},
}, false)
}
func TestNonStandardDecryptDirName(t *testing.T) {
for _, test := range []struct {
mode NameEncryptionMode
dirNameEncrypt bool
@@ -264,18 +658,11 @@ func TestDecryptDirName(t *testing.T) {
expected string
expectedErr error
}{
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s", "1", nil},
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng", "1/12", nil},
{NameEncryptionStandard, true, "p0e52nreeAJ0A5EA7S64M4J72S/L42G6771HNv3an9cgc8cr2n1ng", "1/12", nil},
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng/qgm4avr35m5loi1th53ato71v0", "1/12/123", nil},
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1/qgm4avr35m5loi1th53ato71v0", "", ErrorNotAMultipleOfBlocksize},
{NameEncryptionStandard, false, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng", "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng", nil},
{NameEncryptionStandard, false, "1/12/123", "1/12/123", nil},
{NameEncryptionOff, true, "1/12/123.bin", "1/12/123.bin", nil},
{NameEncryptionOff, true, "1/12/123", "1/12/123", nil},
{NameEncryptionOff, true, ".bin", ".bin", nil},
} {
c, _ := newCipher(test.mode, "", "", test.dirNameEncrypt)
c, _ := newCipher(test.mode, "", "", test.dirNameEncrypt, nil)
actual, actualErr := c.DecryptDirName(test.in)
what := fmt.Sprintf("Testing %q (mode=%v)", test.in, test.mode)
assert.Equal(t, test.expected, actual, what)
@@ -284,7 +671,7 @@ func TestDecryptDirName(t *testing.T) {
}
func TestEncryptedSize(t *testing.T) {
c, _ := newCipher(NameEncryptionStandard, "", "", true)
c, _ := newCipher(NameEncryptionStandard, "", "", true, nil)
for _, test := range []struct {
in int64
expected int64
@@ -308,7 +695,7 @@ func TestEncryptedSize(t *testing.T) {
func TestDecryptedSize(t *testing.T) {
// Test the errors since we tested the reverse above
c, _ := newCipher(NameEncryptionStandard, "", "", true)
c, _ := newCipher(NameEncryptionStandard, "", "", true, nil)
for _, test := range []struct {
in int64
expectedErr error
@@ -637,7 +1024,7 @@ func (r *randomSource) Read(p []byte) (n int, err error) {
func (r *randomSource) Write(p []byte) (n int, err error) {
for i := range p {
if p[i] != r.next() {
return 0, errors.Errorf("Error in stream at %d", r.counter)
return 0, fmt.Errorf("Error in stream at %d", r.counter)
}
}
return len(p), nil
@@ -679,7 +1066,7 @@ func (z *zeroes) Read(p []byte) (n int, err error) {
// Test encrypt decrypt with different buffer sizes
func testEncryptDecrypt(t *testing.T, bufSize int, copySize int64) {
c, err := newCipher(NameEncryptionStandard, "", "", true)
c, err := newCipher(NameEncryptionStandard, "", "", true, nil)
assert.NoError(t, err)
c.cryptoRand = &zeroes{} // zero out the nonce
buf := make([]byte, bufSize)
@@ -749,7 +1136,7 @@ func TestEncryptData(t *testing.T) {
{[]byte{1}, file1},
{[]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}, file16},
} {
c, err := newCipher(NameEncryptionStandard, "", "", true)
c, err := newCipher(NameEncryptionStandard, "", "", true, nil)
assert.NoError(t, err)
c.cryptoRand = newRandomSource(1e8) // nodge the crypto rand generator
@@ -772,7 +1159,7 @@ func TestEncryptData(t *testing.T) {
}
func TestNewEncrypter(t *testing.T) {
c, err := newCipher(NameEncryptionStandard, "", "", true)
c, err := newCipher(NameEncryptionStandard, "", "", true, nil)
assert.NoError(t, err)
c.cryptoRand = newRandomSource(1e8) // nodge the crypto rand generator
@@ -788,13 +1175,12 @@ func TestNewEncrypter(t *testing.T) {
fh, err = c.newEncrypter(z, nil)
assert.Nil(t, fh)
assert.Error(t, err, "short read of nonce")
}
// Test the stream returning 0, io.ErrUnexpectedEOF - this used to
// cause a fatal loop
func TestNewEncrypterErrUnexpectedEOF(t *testing.T) {
c, err := newCipher(NameEncryptionStandard, "", "", true)
c, err := newCipher(NameEncryptionStandard, "", "", true, nil)
assert.NoError(t, err)
in := &readers.ErrorReader{Err: io.ErrUnexpectedEOF}
@@ -823,7 +1209,7 @@ func (c *closeDetector) Close() error {
}
func TestNewDecrypter(t *testing.T) {
c, err := newCipher(NameEncryptionStandard, "", "", true)
c, err := newCipher(NameEncryptionStandard, "", "", true, nil)
assert.NoError(t, err)
c.cryptoRand = newRandomSource(1e8) // nodge the crypto rand generator
@@ -866,7 +1252,7 @@ func TestNewDecrypter(t *testing.T) {
// Test the stream returning 0, io.ErrUnexpectedEOF
func TestNewDecrypterErrUnexpectedEOF(t *testing.T) {
c, err := newCipher(NameEncryptionStandard, "", "", true)
c, err := newCipher(NameEncryptionStandard, "", "", true, nil)
assert.NoError(t, err)
in2 := &readers.ErrorReader{Err: io.ErrUnexpectedEOF}
@@ -882,7 +1268,7 @@ func TestNewDecrypterErrUnexpectedEOF(t *testing.T) {
}
func TestNewDecrypterSeekLimit(t *testing.T) {
c, err := newCipher(NameEncryptionStandard, "", "", true)
c, err := newCipher(NameEncryptionStandard, "", "", true, nil)
assert.NoError(t, err)
c.cryptoRand = &zeroes{} // nodge the crypto rand generator
@@ -1088,7 +1474,7 @@ func TestDecrypterCalculateUnderlying(t *testing.T) {
}
func TestDecrypterRead(t *testing.T) {
c, err := newCipher(NameEncryptionStandard, "", "", true)
c, err := newCipher(NameEncryptionStandard, "", "", true, nil)
assert.NoError(t, err)
// Test truncating the file at each possible point
@@ -1152,7 +1538,7 @@ func TestDecrypterRead(t *testing.T) {
}
func TestDecrypterClose(t *testing.T) {
c, err := newCipher(NameEncryptionStandard, "", "", true)
c, err := newCipher(NameEncryptionStandard, "", "", true, nil)
assert.NoError(t, err)
cd := newCloseDetector(bytes.NewBuffer(file16))
@@ -1190,7 +1576,7 @@ func TestDecrypterClose(t *testing.T) {
}
func TestPutGetBlock(t *testing.T) {
c, err := newCipher(NameEncryptionStandard, "", "", true)
c, err := newCipher(NameEncryptionStandard, "", "", true, nil)
assert.NoError(t, err)
block := c.getBlock()
@@ -1201,7 +1587,7 @@ func TestPutGetBlock(t *testing.T) {
}
func TestKey(t *testing.T) {
c, err := newCipher(NameEncryptionStandard, "", "", true)
c, err := newCipher(NameEncryptionStandard, "", "", true, nil)
assert.NoError(t, err)
// Check zero keys OK

View File

@@ -3,13 +3,13 @@ package crypt
import (
"context"
"errors"
"fmt"
"io"
"path"
"strings"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/cache"
@@ -116,6 +116,29 @@ names, or for debugging purposes.`,
Help: "Encrypt file data.",
},
},
}, {
Name: "filename_encoding",
Help: `How to encode the encrypted filename to text string.
This option could help with shortening the encrypted filename. The
suitable option would depend on the way your remote count the filename
length and if it's case sensitve.`,
Default: "base32",
Examples: []fs.OptionExample{
{
Value: "base32",
Help: "Encode using base32. Suitable for all remote.",
},
{
Value: "base64",
Help: "Encode using base64. Suitable for case sensitive remote.",
},
{
Value: "base32768",
Help: "Encode using base32768. Suitable if your remote counts UTF-16 or\nUnicode codepoint instead of UTF-8 byte length. (Eg. Onedrive)",
},
},
Advanced: true,
}},
})
}
@@ -131,18 +154,22 @@ func newCipherForConfig(opt *Options) (*Cipher, error) {
}
password, err := obscure.Reveal(opt.Password)
if err != nil {
return nil, errors.Wrap(err, "failed to decrypt password")
return nil, fmt.Errorf("failed to decrypt password: %w", err)
}
var salt string
if opt.Password2 != "" {
salt, err = obscure.Reveal(opt.Password2)
if err != nil {
return nil, errors.Wrap(err, "failed to decrypt password2")
return nil, fmt.Errorf("failed to decrypt password2: %w", err)
}
}
cipher, err := newCipher(mode, password, salt, opt.DirectoryNameEncryption)
enc, err := NewNameEncoding(opt.FilenameEncoding)
if err != nil {
return nil, errors.Wrap(err, "failed to make cipher")
return nil, err
}
cipher, err := newCipher(mode, password, salt, opt.DirectoryNameEncryption, enc)
if err != nil {
return nil, fmt.Errorf("failed to make cipher: %w", err)
}
return cipher, nil
}
@@ -192,7 +219,7 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
}
}
if err != fs.ErrorIsFile && err != nil {
return nil, errors.Wrapf(err, "failed to make remote %q to wrap", remote)
return nil, fmt.Errorf("failed to make remote %q to wrap: %w", remote, err)
}
f := &Fs{
Fs: wrappedFs,
@@ -229,6 +256,7 @@ type Options struct {
Password2 string `config:"password2"`
ServerSideAcrossConfigs bool `config:"server_side_across_configs"`
ShowMapping bool `config:"show_mapping"`
FilenameEncoding string `config:"filename_encoding"`
}
// Fs represents a wrapped fs.Fs
@@ -300,7 +328,7 @@ func (f *Fs) encryptEntries(ctx context.Context, entries fs.DirEntries) (newEntr
case fs.Directory:
f.addDir(ctx, &newEntries, x)
default:
return nil, errors.Errorf("Unknown object type %T", entry)
return nil, fmt.Errorf("Unknown object type %T", entry)
}
}
return newEntries, nil
@@ -406,7 +434,7 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
var dstHash string
dstHash, err = o.Hash(ctx, ht)
if err != nil {
return nil, errors.Wrap(err, "failed to read destination hash")
return nil, fmt.Errorf("failed to read destination hash: %w", err)
}
if srcHash != "" && dstHash != "" {
if srcHash != dstHash {
@@ -415,7 +443,7 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
if err != nil {
fs.Errorf(o, "Failed to remove corrupted object: %v", err)
}
return nil, errors.Errorf("corrupted on transfer: %v crypted hash differ %q vs %q", ht, srcHash, dstHash)
return nil, fmt.Errorf("corrupted on transfer: %v crypted hash differ src %q vs dst %q", ht, srcHash, dstHash)
}
fs.Debugf(src, "%v = %s OK", ht, srcHash)
}
@@ -616,24 +644,24 @@ func (f *Fs) computeHashWithNonce(ctx context.Context, nonce nonce, src fs.Objec
// Open the src for input
in, err := src.Open(ctx)
if err != nil {
return "", errors.Wrap(err, "failed to open src")
return "", fmt.Errorf("failed to open src: %w", err)
}
defer fs.CheckClose(in, &err)
// Now encrypt the src with the nonce
out, err := f.cipher.newEncrypter(in, &nonce)
if err != nil {
return "", errors.Wrap(err, "failed to make encrypter")
return "", fmt.Errorf("failed to make encrypter: %w", err)
}
// pipe into hash
m, err := hash.NewMultiHasherTypes(hash.NewHashSet(hashType))
if err != nil {
return "", errors.Wrap(err, "failed to make hasher")
return "", fmt.Errorf("failed to make hasher: %w", err)
}
_, err = io.Copy(m, out)
if err != nil {
return "", errors.Wrap(err, "failed to hash data")
return "", fmt.Errorf("failed to hash data: %w", err)
}
return m.Sums()[hashType], nil
@@ -652,12 +680,12 @@ func (f *Fs) ComputeHash(ctx context.Context, o *Object, src fs.Object, hashType
// use a limited read so we only read the header
in, err := o.Object.Open(ctx, &fs.RangeOption{Start: 0, End: int64(fileHeaderSize) - 1})
if err != nil {
return "", errors.Wrap(err, "failed to open object to read nonce")
return "", fmt.Errorf("failed to open object to read nonce: %w", err)
}
d, err := f.cipher.newDecrypter(in)
if err != nil {
_ = in.Close()
return "", errors.Wrap(err, "failed to open object to read nonce")
return "", fmt.Errorf("failed to open object to read nonce: %w", err)
}
nonce := d.nonce
// fs.Debugf(o, "Read nonce % 2x", nonce)
@@ -676,7 +704,7 @@ func (f *Fs) ComputeHash(ctx context.Context, o *Object, src fs.Object, hashType
// Close d (and hence in) once we have read the nonce
err = d.Close()
if err != nil {
return "", errors.Wrap(err, "failed to close nonce read")
return "", fmt.Errorf("failed to close nonce read: %w", err)
}
return f.computeHashWithNonce(ctx, nonce, src, hashType)
@@ -795,7 +823,7 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
for _, encryptedFileName := range arg {
fileName, err := f.DecryptFileName(encryptedFileName)
if err != nil {
return out, errors.Wrap(err, fmt.Sprintf("Failed to decrypt : %s", encryptedFileName))
return out, fmt.Errorf("failed to decrypt: %s: %w", encryptedFileName, err)
}
out = append(out, fileName)
}

View File

@@ -29,7 +29,7 @@ func TestIntegration(t *testing.T) {
}
// TestStandard runs integration tests against the remote
func TestStandard(t *testing.T) {
func TestStandardBase32(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
@@ -49,6 +49,48 @@ func TestStandard(t *testing.T) {
})
}
func TestStandardBase64(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-standard")
name := "TestCrypt"
fstests.Run(t, &fstests.Opt{
RemoteName: name + ":",
NilObject: (*crypt.Object)(nil),
ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "type", Value: "crypt"},
{Name: name, Key: "remote", Value: tempdir},
{Name: name, Key: "password", Value: obscure.MustObscure("potato")},
{Name: name, Key: "filename_encryption", Value: "standard"},
{Name: name, Key: "filename_encoding", Value: "base64"},
},
UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"},
})
}
func TestStandardBase32768(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-standard")
name := "TestCrypt"
fstests.Run(t, &fstests.Opt{
RemoteName: name + ":",
NilObject: (*crypt.Object)(nil),
ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "type", Value: "crypt"},
{Name: name, Key: "remote", Value: tempdir},
{Name: name, Key: "password", Value: obscure.MustObscure("potato")},
{Name: name, Key: "filename_encryption", Value: "standard"},
{Name: name, Key: "filename_encoding", Value: "base32768"},
},
UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"},
})
}
// TestOff runs integration tests against the remote
func TestOff(t *testing.T) {
if *fstest.RemoteName != "" {

View File

@@ -4,7 +4,7 @@
// buffers which are a multiple of an underlying crypto block size.
package pkcs7
import "github.com/pkg/errors"
import "errors"
// Errors Unpad can return
var (

202
backend/drive/drive.go Executable file → Normal file
View File

@@ -11,12 +11,14 @@ import (
"bytes"
"context"
"crypto/tls"
"errors"
"fmt"
"io"
"io/ioutil"
"mime"
"net/http"
"path"
"regexp"
"sort"
"strconv"
"strings"
@@ -25,7 +27,6 @@ import (
"text/template"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/cache"
"github.com/rclone/rclone/fs/config"
@@ -84,7 +85,7 @@ var (
Endpoint: google.Endpoint,
ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.TitleBarRedirectURL,
RedirectURL: oauthutil.RedirectURL,
}
_mimeTypeToExtensionDuplicates = map[string]string{
"application/x-vnd.oasis.opendocument.presentation": ".odp",
@@ -188,7 +189,7 @@ func init() {
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, errors.Wrap(err, "couldn't parse config into struct")
return nil, fmt.Errorf("couldn't parse config into struct: %w", err)
}
switch config.State {
@@ -226,7 +227,7 @@ func init() {
case "teamdrive_config":
f, err := newFs(ctx, name, "", m)
if err != nil {
return nil, errors.Wrap(err, "failed to make Fs to list Shared Drives")
return nil, fmt.Errorf("failed to make Fs to list Shared Drives: %w", err)
}
teamDrives, err := f.listTeamDrives(ctx)
if err != nil {
@@ -299,6 +300,17 @@ a non root folder as its starting point.
Default: true,
Help: "Send files to the trash instead of deleting permanently.\n\nDefaults to true, namely sending files to the trash.\nUse `--drive-use-trash=false` to delete files permanently instead.",
Advanced: true,
}, {
Name: "copy_shortcut_content",
Default: false,
Help: `Server side copy contents of shortcuts instead of the shortcut.
When doing server side copies, normally rclone will copy shortcuts as
shortcuts.
If this flag is used then rclone will copy the contents of shortcuts
rather than shortcuts themselves when doing server side copies.`,
Advanced: true,
}, {
Name: "skip_gdocs",
Default: false,
@@ -542,6 +554,14 @@ Google don't document so it may break in the future.
Normally rclone dereferences shortcut files making them appear as if
they are the original file (see [the shortcuts section](#shortcuts)).
If this flag is set then rclone will ignore shortcut files completely.
`,
Advanced: true,
Default: false,
}, {
Name: "skip_dangling_shortcuts",
Help: `If set skip dangling shortcut files.
If this is set then rclone will not show any dangling shortcuts in listings.
`,
Advanced: true,
Default: false,
@@ -578,6 +598,7 @@ type Options struct {
TeamDriveID string `config:"team_drive"`
AuthOwnerOnly bool `config:"auth_owner_only"`
UseTrash bool `config:"use_trash"`
CopyShortcutContent bool `config:"copy_shortcut_content"`
SkipGdocs bool `config:"skip_gdocs"`
SkipChecksumGphotos bool `config:"skip_checksum_gphotos"`
SharedWithMe bool `config:"shared_with_me"`
@@ -604,6 +625,7 @@ type Options struct {
StopOnUploadLimit bool `config:"stop_on_upload_limit"`
StopOnDownloadLimit bool `config:"stop_on_download_limit"`
SkipShortcuts bool `config:"skip_shortcuts"`
SkipDanglingShortcuts bool `config:"skip_dangling_shortcuts"`
Enc encoder.MultiEncoder `config:"encoding"`
}
@@ -755,7 +777,7 @@ func (f *Fs) getFile(ctx context.Context, ID string, fields googleapi.Field) (in
func (f *Fs) getRootID(ctx context.Context) (string, error) {
info, err := f.getFile(ctx, "root", "id")
if err != nil {
return "", errors.Wrap(err, "couldn't find root directory ID")
return "", fmt.Errorf("couldn't find root directory ID: %w", err)
}
return info.Id, nil
}
@@ -882,7 +904,7 @@ OUTER:
return f.shouldRetry(ctx, err)
})
if err != nil {
return false, errors.Wrap(err, "couldn't list directory")
return false, fmt.Errorf("couldn't list directory: %w", err)
}
if files.IncompleteSearch {
fs.Errorf(f, "search result INCOMPLETE")
@@ -904,7 +926,12 @@ OUTER:
}
item, err = f.resolveShortcut(ctx, item)
if err != nil {
return false, errors.Wrap(err, "list")
return false, fmt.Errorf("list: %w", err)
}
// leave the dangling shortcut out of the listings
// we've already logged about the dangling shortcut in resolveShortcut
if f.opt.SkipDanglingShortcuts && item.MimeType == shortcutMimeTypeDangling {
continue
}
}
// Check the case of items is correct since
@@ -965,7 +992,7 @@ func fixMimeType(mimeTypeIn string) string {
mimeTypeOut = mime.FormatMediaType(mediaType, param)
}
if mimeTypeOut == "" {
panic(errors.Errorf("unable to fix MIME type %q", mimeTypeIn))
panic(fmt.Errorf("unable to fix MIME type %q", mimeTypeIn))
}
return mimeTypeOut
}
@@ -1000,7 +1027,7 @@ func parseExtensions(extensionsIn ...string) (extensions, mimeTypes []string, er
}
mt := mime.TypeByExtension(extension)
if mt == "" {
return extensions, mimeTypes, errors.Errorf("couldn't find MIME type for extension %q", extension)
return extensions, mimeTypes, fmt.Errorf("couldn't find MIME type for extension %q", extension)
}
if !containsString(extensions, extension) {
extensions = append(extensions, extension)
@@ -1027,7 +1054,7 @@ func getServiceAccountClient(ctx context.Context, opt *Options, credentialsData
scopes := driveScopes(opt.Scope)
conf, err := google.JWTConfigFromJSON(credentialsData, scopes...)
if err != nil {
return nil, errors.Wrap(err, "error processing credentials")
return nil, fmt.Errorf("error processing credentials: %w", err)
}
if opt.Impersonate != "" {
conf.Subject = opt.Impersonate
@@ -1044,19 +1071,19 @@ func createOAuthClient(ctx context.Context, opt *Options, name string, m configm
if len(opt.ServiceAccountCredentials) == 0 && opt.ServiceAccountFile != "" {
loadedCreds, err := ioutil.ReadFile(env.ShellExpand(opt.ServiceAccountFile))
if err != nil {
return nil, errors.Wrap(err, "error opening service account credentials file")
return nil, fmt.Errorf("error opening service account credentials file: %w", err)
}
opt.ServiceAccountCredentials = string(loadedCreds)
}
if opt.ServiceAccountCredentials != "" {
oAuthClient, err = getServiceAccountClient(ctx, opt, []byte(opt.ServiceAccountCredentials))
if err != nil {
return nil, errors.Wrap(err, "failed to create oauth client from service account")
return nil, fmt.Errorf("failed to create oauth client from service account: %w", err)
}
} else {
oAuthClient, _, err = oauthutil.NewClientWithBaseClient(ctx, name, m, driveConfig, getClient(ctx, opt))
if err != nil {
return nil, errors.Wrap(err, "failed to create oauth client")
return nil, fmt.Errorf("failed to create oauth client: %w", err)
}
}
@@ -1065,10 +1092,10 @@ func createOAuthClient(ctx context.Context, opt *Options, name string, m configm
func checkUploadChunkSize(cs fs.SizeSuffix) error {
if !isPowerOfTwo(int64(cs)) {
return errors.Errorf("%v isn't a power of two", cs)
return fmt.Errorf("%v isn't a power of two", cs)
}
if cs < minChunkSize {
return errors.Errorf("%s is less than %s", cs, minChunkSize)
return fmt.Errorf("%s is less than %s", cs, minChunkSize)
}
return nil
}
@@ -1106,16 +1133,16 @@ func newFs(ctx context.Context, name, path string, m configmap.Mapper) (*Fs, err
}
err = checkUploadCutoff(opt.UploadCutoff)
if err != nil {
return nil, errors.Wrap(err, "drive: upload cutoff")
return nil, fmt.Errorf("drive: upload cutoff: %w", err)
}
err = checkUploadChunkSize(opt.ChunkSize)
if err != nil {
return nil, errors.Wrap(err, "drive: chunk size")
return nil, fmt.Errorf("drive: chunk size: %w", err)
}
oAuthClient, err := createOAuthClient(ctx, opt, name, m)
if err != nil {
return nil, errors.Wrap(err, "drive: failed when making oauth client")
return nil, fmt.Errorf("drive: failed when making oauth client: %w", err)
}
root, err := parseDrivePath(path)
@@ -1149,13 +1176,13 @@ func newFs(ctx context.Context, name, path string, m configmap.Mapper) (*Fs, err
f.client = oAuthClient
f.svc, err = drive.New(f.client)
if err != nil {
return nil, errors.Wrap(err, "couldn't create Drive client")
return nil, fmt.Errorf("couldn't create Drive client: %w", err)
}
if f.opt.V2DownloadMinSize >= 0 {
f.v2Svc, err = drive_v2.New(f.client)
if err != nil {
return nil, errors.Wrap(err, "couldn't create Drive v2 client")
return nil, fmt.Errorf("couldn't create Drive v2 client: %w", err)
}
}
@@ -1180,7 +1207,8 @@ func NewFs(ctx context.Context, name, path string, m configmap.Mapper) (fs.Fs, e
// otherwise look up the actual root ID
rootID, err := f.getRootID(ctx)
if err != nil {
if gerr, ok := errors.Cause(err).(*googleapi.Error); ok && gerr.Code == 404 {
var gerr *googleapi.Error
if errors.As(err, &gerr) && gerr.Code == 404 {
// 404 means that this scope does not have permission to get the
// root so just use "root"
rootID = "root"
@@ -1322,7 +1350,7 @@ func (f *Fs) newDocumentObject(remote string, info *drive.File, extension, expor
func (f *Fs) newLinkObject(remote string, info *drive.File, extension, exportMimeType string) (fs.Object, error) {
t := linkTemplate(exportMimeType)
if t == nil {
return nil, errors.Errorf("unsupported link type %s", exportMimeType)
return nil, fmt.Errorf("unsupported link type %s", exportMimeType)
}
xdgIcon := _mimeTypeToXDGLinkIcons[info.MimeType]
if xdgIcon == "" {
@@ -1335,7 +1363,7 @@ func (f *Fs) newLinkObject(remote string, info *drive.File, extension, exportMim
info.WebViewLink, info.Name, xdgIcon,
})
if err != nil {
return nil, errors.Wrap(err, "executing template failed")
return nil, fmt.Errorf("executing template failed: %w", err)
}
baseObject := f.newBaseObject(remote+extension, info)
@@ -1372,7 +1400,7 @@ func (f *Fs) newObjectWithExportInfo(
// will have been resolved so this will do nothing.
info, err = f.resolveShortcut(ctx, info)
if err != nil {
return nil, errors.Wrap(err, "new object")
return nil, fmt.Errorf("new object: %w", err)
}
switch {
case info.MimeType == driveFolderType:
@@ -1570,6 +1598,15 @@ func (f *Fs) findExportFormatByMimeType(ctx context.Context, itemMimeType string
}
}
// If using a link type export and a more specific export
// hasn't been found all docs should be exported
for _, _extension := range f.exportExtensions {
_mimeType := mime.TypeByExtension(_extension)
if isLinkMimeType(_mimeType) {
return _extension, _mimeType, true
}
}
// else return empty
return "", "", isDocument
}
@@ -1580,6 +1617,14 @@ func (f *Fs) findExportFormatByMimeType(ctx context.Context, itemMimeType string
// Look through the exportExtensions and find the first format that can be
// converted. If none found then return ("", "", "", false)
func (f *Fs) findExportFormat(ctx context.Context, item *drive.File) (extension, filename, mimeType string, isDocument bool) {
// If item has MD5 sum it is a file stored on drive
if item.Md5Checksum != "" {
return
}
// Folders can't be documents
if item.MimeType == driveFolderType {
return
}
extension, mimeType, isDocument = f.findExportFormatByMimeType(ctx, item.MimeType)
if extension != "" {
filename = item.Name + extension
@@ -2015,13 +2060,14 @@ func (f *Fs) resolveShortcut(ctx context.Context, item *drive.File) (newItem *dr
}
newItem, err = f.getFile(ctx, item.ShortcutDetails.TargetId, f.fileFields)
if err != nil {
if gerr, ok := errors.Cause(err).(*googleapi.Error); ok && gerr.Code == 404 {
var gerr *googleapi.Error
if errors.As(err, &gerr) && gerr.Code == 404 {
// 404 means dangling shortcut, so just return the shortcut with the mime type mangled
fs.Logf(nil, "Dangling shortcut %q detected", item.Name)
item.MimeType = shortcutMimeTypeDangling
return item, nil
}
return nil, errors.Wrap(err, "failed to resolve shortcut")
return nil, fmt.Errorf("failed to resolve shortcut: %w", err)
}
// make sure we use the Name, Parents and Trashed from the original item
newItem.Name = item.Name
@@ -2123,10 +2169,10 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
exportExt, _, _ = f.findExportFormatByMimeType(ctx, importMimeType)
if exportExt == "" {
return nil, errors.Errorf("No export format found for %q", importMimeType)
return nil, fmt.Errorf("No export format found for %q", importMimeType)
}
if exportExt != srcExt && !f.opt.AllowImportNameChange {
return nil, errors.Errorf("Can't convert %q to a document with a different export filetype (%q)", srcExt, exportExt)
return nil, fmt.Errorf("Can't convert %q to a document with a different export filetype (%q)", srcExt, exportExt)
}
}
}
@@ -2194,7 +2240,7 @@ func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
return false
})
if err != nil {
return errors.Wrapf(err, "MergeDirs list failed on %v", srcDir)
return fmt.Errorf("MergeDirs list failed on %v: %w", srcDir, err)
}
// move them into place
for _, info := range infos {
@@ -2210,14 +2256,14 @@ func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
return f.shouldRetry(ctx, err)
})
if err != nil {
return errors.Wrapf(err, "MergeDirs move failed on %q in %v", info.Name, srcDir)
return fmt.Errorf("MergeDirs move failed on %q in %v: %w", info.Name, srcDir, err)
}
}
// rmdir (into trash) the now empty source directory
fs.Infof(srcDir, "removing empty directory")
err = f.delete(ctx, srcDir.ID(), true)
if err != nil {
return errors.Wrapf(err, "MergeDirs move failed to rmdir %q", srcDir)
return fmt.Errorf("MergeDirs move failed to rmdir %q: %w", srcDir, err)
}
}
return nil
@@ -2280,7 +2326,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
return err
}
if found {
return errors.Errorf("directory not empty")
return fmt.Errorf("directory not empty")
}
}
if root != "" {
@@ -2372,9 +2418,16 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
createInfo.Description = ""
}
// get the ID of the thing to copy - this is the shortcut if available
// get the ID of the thing to copy
// copy the contents if CopyShortcutContent
// else copy the shortcut only
id := shortcutID(srcObj.id)
if f.opt.CopyShortcutContent {
id = actualID(srcObj.id)
}
var info *drive.File
err = f.pacer.Call(func() (bool, error) {
info, err = f.svc.Files.Copy(id, createInfo).
@@ -2458,7 +2511,7 @@ func (f *Fs) cleanupTeamDrive(ctx context.Context, dir string, directoryID strin
return false
})
if err != nil {
err = errors.Wrap(err, "failed to list directory")
err = fmt.Errorf("failed to list directory: %w", err)
r.Errors++
fs.Errorf(dir, "%v", err)
}
@@ -2502,7 +2555,7 @@ func (f *Fs) teamDriveOK(ctx context.Context) (err error) {
return f.shouldRetry(ctx, err)
})
if err != nil {
return errors.Wrap(err, "failed to get Shared Drive info")
return fmt.Errorf("failed to get Shared Drive info: %w", err)
}
fs.Debugf(f, "read info from Shared Drive %q", td.Name)
return err
@@ -2525,7 +2578,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
return f.shouldRetry(ctx, err)
})
if err != nil {
return nil, errors.Wrap(err, "failed to get Drive storageQuota")
return nil, fmt.Errorf("failed to get Drive storageQuota: %w", err)
}
q := about.StorageQuota
usage := &fs.Usage{
@@ -2849,7 +2902,7 @@ func (f *Fs) Hashes() hash.Set {
func (f *Fs) changeChunkSize(chunkSizeString string) (err error) {
chunkSizeInt, err := strconv.ParseInt(chunkSizeString, 10, 64)
if err != nil {
return errors.Wrap(err, "couldn't convert chunk size to int")
return fmt.Errorf("couldn't convert chunk size to int: %w", err)
}
chunkSize := fs.SizeSuffix(chunkSizeInt)
if chunkSize == f.opt.ChunkSize {
@@ -2886,17 +2939,17 @@ func (f *Fs) changeServiceAccountFile(ctx context.Context, file string) (err err
f.opt.ServiceAccountCredentials = ""
oAuthClient, err := createOAuthClient(ctx, &f.opt, f.name, f.m)
if err != nil {
return errors.Wrap(err, "drive: failed when making oauth client")
return fmt.Errorf("drive: failed when making oauth client: %w", err)
}
f.client = oAuthClient
f.svc, err = drive.New(f.client)
if err != nil {
return errors.Wrap(err, "couldn't create Drive client")
return fmt.Errorf("couldn't create Drive client: %w", err)
}
if f.opt.V2DownloadMinSize >= 0 {
f.v2Svc, err = drive_v2.New(f.client)
if err != nil {
return errors.Wrap(err, "couldn't create Drive v2 client")
return fmt.Errorf("couldn't create Drive v2 client: %w", err)
}
}
return nil
@@ -2925,12 +2978,12 @@ func (f *Fs) makeShortcut(ctx context.Context, srcPath string, dstFs *Fs, dstPat
isDir = true
} else if srcObj, err := srcFs.NewObject(ctx, srcPath); err != nil {
if err != fs.ErrorIsDir {
return nil, errors.Wrap(err, "can't find source")
return nil, fmt.Errorf("can't find source: %w", err)
}
// source was a directory
srcID, err = srcFs.dirCache.FindDir(ctx, srcPath, false)
if err != nil {
return nil, errors.Wrap(err, "failed to find source dir")
return nil, fmt.Errorf("failed to find source dir: %w", err)
}
isDir = true
} else {
@@ -2947,13 +3000,13 @@ func (f *Fs) makeShortcut(ctx context.Context, srcPath string, dstFs *Fs, dstPat
} else if err == fs.ErrorIsDir {
err = errors.New("existing directory")
}
return nil, errors.Wrap(err, "not overwriting shortcut target")
return nil, fmt.Errorf("not overwriting shortcut target: %w", err)
}
// Create destination shortcut
createInfo, err := dstFs.createFileInfo(ctx, dstPath, time.Now())
if err != nil {
return nil, errors.Wrap(err, "shortcut destination failed")
return nil, fmt.Errorf("shortcut destination failed: %w", err)
}
createInfo.MimeType = shortcutMimeType
createInfo.ShortcutDetails = &drive.FileShortcutDetails{
@@ -2970,7 +3023,7 @@ func (f *Fs) makeShortcut(ctx context.Context, srcPath string, dstFs *Fs, dstPat
return dstFs.shouldRetry(ctx, err)
})
if err != nil {
return nil, errors.Wrap(err, "shortcut creation failed")
return nil, fmt.Errorf("shortcut creation failed: %w", err)
}
if isDir {
return nil, nil
@@ -2990,7 +3043,7 @@ func (f *Fs) listTeamDrives(ctx context.Context) (drives []*drive.Drive, err err
return defaultFs.shouldRetry(ctx, err)
})
if err != nil {
return drives, errors.Wrap(err, "listing Team Drives failed")
return drives, fmt.Errorf("listing Team Drives failed: %w", err)
}
drives = append(drives, teamDrives.Drives...)
if teamDrives.NextPageToken == "" {
@@ -3033,7 +3086,7 @@ func (f *Fs) unTrash(ctx context.Context, dir string, directoryID string, recurs
return f.shouldRetry(ctx, err)
})
if err != nil {
err = errors.Wrap(err, "failed to restore")
err = fmt.Errorf("failed to restore: %w", err)
r.Errors++
fs.Errorf(remote, "%v", err)
} else {
@@ -3050,7 +3103,7 @@ func (f *Fs) unTrash(ctx context.Context, dir string, directoryID string, recurs
return false
})
if err != nil {
err = errors.Wrap(err, "failed to list directory")
err = fmt.Errorf("failed to list directory: %w", err)
r.Errors++
fs.Errorf(dir, "%v", err)
}
@@ -3074,10 +3127,10 @@ func (f *Fs) unTrashDir(ctx context.Context, dir string, recurse bool) (r unTras
func (f *Fs) copyID(ctx context.Context, id, dest string) (err error) {
info, err := f.getFile(ctx, id, f.fileFields)
if err != nil {
return errors.Wrap(err, "couldn't find id")
return fmt.Errorf("couldn't find id: %w", err)
}
if info.MimeType == driveFolderType {
return errors.Errorf("can't copy directory use: rclone copy --drive-root-folder-id %s %s %s", id, fs.ConfigString(f), dest)
return fmt.Errorf("can't copy directory use: rclone copy --drive-root-folder-id %s %s %s", id, fs.ConfigString(f), dest)
}
info.Name = f.opt.Enc.ToStandardName(info.Name)
o, err := f.newObjectWithInfo(ctx, info.Name, info)
@@ -3100,7 +3153,7 @@ func (f *Fs) copyID(ctx context.Context, id, dest string) (err error) {
}
_, err = operations.Copy(ctx, dstFs, nil, destLeaf, o)
if err != nil {
return errors.Wrap(err, "copy failed")
return fmt.Errorf("copy failed: %w", err)
}
return nil
}
@@ -3183,7 +3236,7 @@ This will return a JSON list of objects like this
With the -o config parameter it will output the list in a format
suitable for adding to a config file to make aliases for all the
drives found.
drives found and a combined drive.
[My Drive]
type = alias
@@ -3193,10 +3246,15 @@ drives found.
type = alias
remote = drive,team_drive=0ABCDEFabcdefghijkl,root_folder_id=:
Adding this to the rclone config file will cause those team drives to
be accessible with the aliases shown. This may require manual editing
of the names.
[AllDrives]
type = combine
remote = "My Drive=My Drive:" "Test Drive=Test Drive:"
Adding this to the rclone config file will cause those team drives to
be accessible with the aliases shown. Any illegal charactes will be
substituted with "_" and duplicate names will have numbers suffixed.
It will also add a remote called AllDrives which shows all the shared
drives combined into one directory tree.
`,
}, {
Name: "untrash",
@@ -3299,7 +3357,7 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
if ok {
targetFs, err := cache.Get(ctx, target)
if err != nil {
return nil, errors.Wrap(err, "couldn't find target")
return nil, fmt.Errorf("couldn't find target: %w", err)
}
dstFs, ok = targetFs.(*Fs)
if !ok {
@@ -3312,14 +3370,30 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
if err != nil {
return nil, err
}
re := regexp.MustCompile(`[^\w_. -]+`)
if _, ok := opt["config"]; ok {
lines := []string{}
for _, drive := range drives {
upstreams := []string{}
names := make(map[string]struct{}, len(drives))
for i, drive := range drives {
name := re.ReplaceAllString(drive.Name, "_")
for {
if _, found := names[name]; !found {
break
}
name += fmt.Sprintf("-%d", i)
}
names[name] = struct{}{}
lines = append(lines, "")
lines = append(lines, fmt.Sprintf("[%s]", drive.Name))
lines = append(lines, fmt.Sprintf("[%s]", name))
lines = append(lines, fmt.Sprintf("type = alias"))
lines = append(lines, fmt.Sprintf("remote = %s,team_drive=%s,root_folder_id=:", f.name, drive.Id))
upstreams = append(upstreams, fmt.Sprintf(`"%s=%s:"`, name, name))
}
lines = append(lines, "")
lines = append(lines, fmt.Sprintf("[AllDrives]"))
lines = append(lines, fmt.Sprintf("type = combine"))
lines = append(lines, fmt.Sprintf("upstreams = %s", strings.Join(upstreams, " ")))
return lines, nil
}
return drives, nil
@@ -3338,7 +3412,7 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
arg = arg[2:]
err = f.copyID(ctx, id, dest)
if err != nil {
return nil, errors.Wrapf(err, "failed copying %q to %q", id, dest)
return nil, fmt.Errorf("failed copying %q to %q: %w", id, dest, err)
}
}
return nil, nil
@@ -3572,11 +3646,11 @@ func (o *baseObject) open(ctx context.Context, url string, options ...fs.OpenOpt
url += "acknowledgeAbuse=true"
_, res, err = o.httpResponse(ctx, url, "GET", options)
} else {
err = errors.Wrap(err, "Use the --drive-acknowledge-abuse flag to download this file")
err = fmt.Errorf("Use the --drive-acknowledge-abuse flag to download this file: %w", err)
}
}
if err != nil {
return nil, errors.Wrap(err, "open file failed")
return nil, fmt.Errorf("open file failed: %w", err)
}
}
return res.Body, nil
@@ -3740,14 +3814,14 @@ func (o *documentObject) Update(ctx context.Context, in io.Reader, src fs.Object
}
if o.fs.importMimeTypes == nil || o.fs.opt.SkipGdocs {
return errors.Errorf("can't update google document type without --drive-import-formats")
return fmt.Errorf("can't update google document type without --drive-import-formats")
}
importMimeType = o.fs.findImportFormat(ctx, updateInfo.MimeType)
if importMimeType == "" {
return errors.Errorf("no import format found for %q", srcMimeType)
return fmt.Errorf("no import format found for %q", srcMimeType)
}
if importMimeType != o.documentMimeType {
return errors.Errorf("can't change google document type (o: %q, src: %q, import: %q)", o.documentMimeType, srcMimeType, importMimeType)
return fmt.Errorf("can't change google document type (o: %q, src: %q, import: %q)", o.documentMimeType, srcMimeType, importMimeType)
}
updateInfo.MimeType = importMimeType

View File

@@ -4,6 +4,7 @@ import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
@@ -15,7 +16,6 @@ import (
"testing"
"time"
"github.com/pkg/errors"
_ "github.com/rclone/rclone/backend/local"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/filter"
@@ -422,11 +422,7 @@ func (f *Fs) InternalTestCopyID(t *testing.T) {
require.NoError(t, err)
o := obj.(*Object)
dir, err := ioutil.TempDir("", "rclone-drive-copyid-test")
require.NoError(t, err)
defer func() {
_ = os.RemoveAll(dir)
}()
dir := t.TempDir()
checkFile := func(name string) {
filePath := filepath.Join(dir, name)
@@ -491,19 +487,11 @@ func (f *Fs) InternalTestAgeQuery(t *testing.T) {
subFs, isDriveFs := subFsResult.(*Fs)
require.True(t, isDriveFs)
tempDir1, err := ioutil.TempDir("", "rclone-drive-agequery1-test")
require.NoError(t, err)
defer func() {
_ = os.RemoveAll(tempDir1)
}()
tempDir1 := t.TempDir()
tempFs1, err := fs.NewFs(defCtx, tempDir1)
require.NoError(t, err)
tempDir2, err := ioutil.TempDir("", "rclone-drive-agequery2-test")
require.NoError(t, err)
defer func() {
_ = os.RemoveAll(tempDir2)
}()
tempDir2 := t.TempDir()
tempFs2, err := fs.NewFs(defCtx, tempDir2)
require.NoError(t, err)

View File

@@ -8,13 +8,13 @@ package dropbox
import (
"context"
"errors"
"fmt"
"sync"
"time"
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/async"
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/files"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/lib/atexit"
@@ -66,7 +66,7 @@ type batcherResponse struct {
func newBatcher(ctx context.Context, f *Fs, mode string, size int, timeout time.Duration) (*batcher, error) {
// fs.Debugf(f, "Creating batcher with mode %q, size %d, timeout %v", mode, size, timeout)
if size > maxBatchSize || size < 0 {
return nil, errors.Errorf("dropbox: batch size must be < %d and >= 0 - it is currently %d", maxBatchSize, size)
return nil, fmt.Errorf("dropbox: batch size must be < %d and >= 0 - it is currently %d", maxBatchSize, size)
}
async := false
@@ -91,7 +91,7 @@ func newBatcher(ctx context.Context, f *Fs, mode string, size int, timeout time.
case "off":
size = 0
default:
return nil, errors.Errorf("dropbox: batch mode must be sync|async|off not %q", mode)
return nil, fmt.Errorf("dropbox: batch mode must be sync|async|off not %q", mode)
}
b := &batcher{
@@ -135,7 +135,7 @@ func (b *batcher) finishBatch(ctx context.Context, items []*files.UploadSessionF
return err != nil, err
})
if err != nil {
return nil, errors.Wrap(err, "batch commit failed")
return nil, fmt.Errorf("batch commit failed: %w", err)
}
return batchStatus, nil
}
@@ -180,7 +180,7 @@ func (b *batcher) finishBatchJobStatus(ctx context.Context, launchBatchStatus *f
if err == nil {
err = errors.New("batch didn't complete")
}
return nil, errors.Wrapf(err, "wait for batch failed after %d tries in %v", try, time.Since(startTime))
return nil, fmt.Errorf("wait for batch failed after %d tries in %v: %w", try, time.Since(startTime), err)
}
// commit a batch
@@ -216,13 +216,13 @@ func (b *batcher) commitBatch(ctx context.Context, items []*files.UploadSessionF
case "complete":
complete = batchStatus.Complete
default:
return errors.Errorf("batch returned unknown status %q", batchStatus.Tag)
return fmt.Errorf("batch returned unknown status %q", batchStatus.Tag)
}
// Check we got the right number of entries
entries := complete.Entries
if len(entries) != len(results) {
return errors.Errorf("expecting %d items in batch but got %d", len(results), len(entries))
return fmt.Errorf("expecting %d items in batch but got %d", len(results), len(entries))
}
// Report results to clients
@@ -250,7 +250,7 @@ func (b *batcher) commitBatch(ctx context.Context, items []*files.UploadSessionF
errorTag += "/" + item.Failure.PropertiesError.Tag
}
}
resp.err = errors.Errorf("batch upload failed: %s", errorTag)
resp.err = fmt.Errorf("batch upload failed: %s", errorTag)
}
if !b.async {
results[i] <- resp
@@ -261,7 +261,7 @@ func (b *batcher) commitBatch(ctx context.Context, items []*files.UploadSessionF
// Report an error if any failed in the batch
if errorTag != "" {
return errors.Errorf("batch had %d errors: last error: %s", errorCount, errorTag)
return fmt.Errorf("batch had %d errors: last error: %s", errorCount, errorTag)
}
fs.Debugf(b.f, "Committed %s", desc)

107
backend/dropbox/dropbox.go Executable file → Normal file
View File

@@ -23,6 +23,7 @@ of path_display and all will be well.
import (
"context"
"errors"
"fmt"
"io"
"path"
@@ -38,7 +39,6 @@ import (
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/sharing"
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/team"
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/users"
"github.com/pkg/errors"
"github.com/rclone/rclone/backend/dropbox/dbhash"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
@@ -363,24 +363,24 @@ func shouldRetry(ctx context.Context, err error) (bool, error) {
if err == nil {
return false, err
}
baseErrString := errors.Cause(err).Error()
errString := err.Error()
// First check for specific errors
if strings.Contains(baseErrString, "insufficient_space") {
if strings.Contains(errString, "insufficient_space") {
return false, fserrors.FatalError(err)
} else if strings.Contains(baseErrString, "malformed_path") {
} else if strings.Contains(errString, "malformed_path") {
return false, fserrors.NoRetryError(err)
}
// Then handle any official Retry-After header from Dropbox's SDK
switch e := err.(type) {
case auth.RateLimitAPIError:
if e.RateLimitError.RetryAfter > 0 {
fs.Logf(baseErrString, "Too many requests or write operations. Trying again in %d seconds.", e.RateLimitError.RetryAfter)
fs.Logf(errString, "Too many requests or write operations. Trying again in %d seconds.", e.RateLimitError.RetryAfter)
err = pacer.RetryAfterError(err, time.Duration(e.RateLimitError.RetryAfter)*time.Second)
}
return true, err
}
// Keep old behavior for backward compatibility
if strings.Contains(baseErrString, "too_many_write_operations") || strings.Contains(baseErrString, "too_many_requests") || baseErrString == "" {
if strings.Contains(errString, "too_many_write_operations") || strings.Contains(errString, "too_many_requests") || errString == "" {
return true, err
}
return fserrors.ShouldRetry(err), err
@@ -389,10 +389,10 @@ func shouldRetry(ctx context.Context, err error) (bool, error) {
func checkUploadChunkSize(cs fs.SizeSuffix) error {
const minChunkSize = fs.SizeSuffixBase
if cs < minChunkSize {
return errors.Errorf("%s is less than %s", cs, minChunkSize)
return fmt.Errorf("%s is less than %s", cs, minChunkSize)
}
if cs > maxChunkSize {
return errors.Errorf("%s is greater than %s", cs, maxChunkSize)
return fmt.Errorf("%s is greater than %s", cs, maxChunkSize)
}
return nil
}
@@ -415,7 +415,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
}
err = checkUploadChunkSize(opt.ChunkSize)
if err != nil {
return nil, errors.Wrap(err, "dropbox: chunk size")
return nil, fmt.Errorf("dropbox: chunk size: %w", err)
}
// Convert the old token if it exists. The old token was just
@@ -427,13 +427,13 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
newToken := fmt.Sprintf(`{"access_token":"%s","token_type":"bearer","expiry":"0001-01-01T00:00:00Z"}`, oldToken)
err := config.SetValueAndSave(name, config.ConfigToken, newToken)
if err != nil {
return nil, errors.Wrap(err, "NewFS convert token")
return nil, fmt.Errorf("NewFS convert token: %w", err)
}
}
oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, getOauthConfig(m))
if err != nil {
return nil, errors.Wrap(err, "failed to configure dropbox")
return nil, fmt.Errorf("failed to configure dropbox: %w", err)
}
ci := fs.GetConfig(ctx)
@@ -474,7 +474,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
memberIds, err := f.team.MembersGetInfo(args)
if err != nil {
return nil, errors.Wrapf(err, "invalid dropbox team member: %q", opt.Impersonate)
return nil, fmt.Errorf("invalid dropbox team member: %q: %w", opt.Impersonate, err)
}
cfg.AsMemberID = memberIds[0].MemberInfo.Profile.MemberProfile.TeamMemberId
@@ -551,7 +551,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
return shouldRetry(ctx, err)
})
if err != nil {
return nil, errors.Wrap(err, "get current account failed")
return nil, fmt.Errorf("get current account failed: %w", err)
}
switch x := acc.RootInfo.(type) {
case *common.TeamRootInfo:
@@ -559,22 +559,24 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
case *common.UserRootInfo:
f.ns = x.RootNamespaceId
default:
return nil, errors.Errorf("unknown RootInfo type %v %T", acc.RootInfo, acc.RootInfo)
return nil, fmt.Errorf("unknown RootInfo type %v %T", acc.RootInfo, acc.RootInfo)
}
fs.Debugf(f, "Using root namespace %q", f.ns)
}
f.setRoot(root)
// See if the root is actually an object
_, err = f.getFileMetadata(ctx, f.slashRoot)
if err == nil {
newRoot := path.Dir(f.root)
if newRoot == "." {
newRoot = ""
if f.root != "" {
_, err = f.getFileMetadata(ctx, f.slashRoot)
if err == nil {
newRoot := path.Dir(f.root)
if newRoot == "." {
newRoot = ""
}
f.setRoot(newRoot)
// return an error with an fs which points to the parent
return f, fs.ErrorIsFile
}
f.setRoot(newRoot)
// return an error with an fs which points to the parent
return f, fs.ErrorIsFile
}
return f, nil
}
@@ -710,7 +712,7 @@ func (f *Fs) listSharedFolders(ctx context.Context) (entries fs.DirEntries, err
return shouldRetry(ctx, err)
})
if err != nil {
return nil, errors.Wrap(err, "list continue")
return nil, fmt.Errorf("list continue: %w", err)
}
}
for _, entry := range res.Entries {
@@ -784,7 +786,7 @@ func (f *Fs) listReceivedFiles(ctx context.Context) (entries fs.DirEntries, err
return shouldRetry(ctx, err)
})
if err != nil {
return nil, errors.Wrap(err, "list continue")
return nil, fmt.Errorf("list continue: %w", err)
}
}
for _, entry := range res.Entries {
@@ -850,6 +852,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
arg := files.ListFolderArg{
Path: f.opt.Enc.FromStandardPath(root),
Recursive: false,
Limit: 1000,
}
if root == "/" {
arg.Path = "" // Specify root folder as empty string
@@ -877,7 +880,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
return shouldRetry(ctx, err)
})
if err != nil {
return nil, errors.Wrap(err, "list continue")
return nil, fmt.Errorf("list continue: %w", err)
}
}
for _, entry := range res.Entries {
@@ -989,7 +992,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error)
// check directory exists
_, err = f.getDirMetadata(ctx, root)
if err != nil {
return errors.Wrap(err, "Rmdir")
return fmt.Errorf("Rmdir: %w", err)
}
root = f.opt.Enc.FromStandardPath(root)
@@ -1007,7 +1010,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error)
return shouldRetry(ctx, err)
})
if err != nil {
return errors.Wrap(err, "Rmdir")
return fmt.Errorf("Rmdir: %w", err)
}
if len(res.Entries) != 0 {
return errors.New("directory not empty")
@@ -1073,7 +1076,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
return shouldRetry(ctx, err)
})
if err != nil {
return nil, errors.Wrap(err, "copy failed")
return nil, fmt.Errorf("copy failed: %w", err)
}
// Set the metadata
@@ -1083,7 +1086,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
}
err = dstObj.setMetadataFromEntry(fileInfo)
if err != nil {
return nil, errors.Wrap(err, "copy failed")
return nil, fmt.Errorf("copy failed: %w", err)
}
return dstObj, nil
@@ -1134,7 +1137,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
return shouldRetry(ctx, err)
})
if err != nil {
return nil, errors.Wrap(err, "move failed")
return nil, fmt.Errorf("move failed: %w", err)
}
// Set the metadata
@@ -1144,7 +1147,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
}
err = dstObj.setMetadataFromEntry(fileInfo)
if err != nil {
return nil, errors.Wrap(err, "move failed")
return nil, fmt.Errorf("move failed: %w", err)
}
return dstObj, nil
}
@@ -1252,7 +1255,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
return shouldRetry(ctx, err)
})
if err != nil {
return errors.Wrap(err, "MoveDir failed")
return fmt.Errorf("MoveDir failed: %w", err)
}
return nil
@@ -1266,7 +1269,7 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
return shouldRetry(ctx, err)
})
if err != nil {
return nil, errors.Wrap(err, "about failed")
return nil, fmt.Errorf("about failed: %w", err)
}
var total uint64
if q.Allocation != nil {
@@ -1406,7 +1409,7 @@ func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.
return shouldRetry(ctx, err)
})
if err != nil {
return "", errors.Wrap(err, "list continue")
return "", fmt.Errorf("list continue: %w", err)
}
cursor = changeList.Cursor
var entryType fs.EntryType
@@ -1485,7 +1488,7 @@ func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
}
err := o.readMetaData(ctx)
if err != nil {
return "", errors.Wrap(err, "failed to read hash from metadata")
return "", fmt.Errorf("failed to read hash from metadata: %w", err)
}
return o.hash, nil
}
@@ -1647,13 +1650,37 @@ func (o *Object) uploadChunked(ctx context.Context, in0 io.Reader, commitInfo *f
}
chunk := readers.NewRepeatableLimitReaderBuffer(in, buf, chunkSize)
skip := int64(0)
err = o.fs.pacer.Call(func() (bool, error) {
// seek to the start in case this is a retry
if _, err = chunk.Seek(0, io.SeekStart); err != nil {
return false, nil
if _, err = chunk.Seek(skip, io.SeekStart); err != nil {
return false, err
}
err = o.fs.srv.UploadSessionAppendV2(&appendArg, chunk)
// after session is started, we retry everything
if err != nil {
// Check for incorrect offset error and retry with new offset
if uErr, ok := err.(files.UploadSessionAppendV2APIError); ok {
if uErr.EndpointError != nil && uErr.EndpointError.IncorrectOffset != nil {
correctOffset := uErr.EndpointError.IncorrectOffset.CorrectOffset
delta := int64(correctOffset) - int64(cursor.Offset)
skip += delta
what := fmt.Sprintf("incorrect offset error receved: sent %d, need %d, skip %d", cursor.Offset, correctOffset, skip)
if skip < 0 {
return false, fmt.Errorf("can't seek backwards to correct offset: %s", what)
} else if skip == chunkSize {
fs.Debugf(o, "%s: chunk received OK - continuing", what)
return false, nil
} else if skip > chunkSize {
// This error should never happen
return false, fmt.Errorf("can't seek forwards by more than a chunk to correct offset: %s", what)
}
// Skip the sent data on next retry
cursor.Offset = uint64(int64(cursor.Offset) + delta)
fs.Debugf(o, "%s: skipping bytes on retry to fix offset", what)
}
}
}
return err != nil, err
})
if err != nil {
@@ -1738,7 +1765,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
}
remote := o.remotePath()
if ignoredFiles.MatchString(remote) {
return fserrors.NoRetryError(errors.Errorf("file name %q is disallowed - not uploading", path.Base(remote)))
return fserrors.NoRetryError(fmt.Errorf("file name %q is disallowed - not uploading", path.Base(remote)))
}
commitInfo := files.NewCommitInfo(o.fs.opt.Enc.FromStandardPath(o.remotePath()))
commitInfo.Mode.Tag = "overwrite"
@@ -1757,12 +1784,12 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
entry, err = o.uploadChunked(ctx, in, commitInfo, size)
} else {
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
entry, err = o.fs.srv.Upload(commitInfo, in)
entry, err = o.fs.srv.Upload(&files.UploadArg{CommitInfo: *commitInfo}, in)
return shouldRetry(ctx, err)
})
}
if err != nil {
return errors.Wrap(err, "upload failed")
return fmt.Errorf("upload failed: %w", err)
}
// If we haven't received data back from batch upload then fake it
//

View File

@@ -2,6 +2,8 @@ package fichier
import (
"context"
"errors"
"fmt"
"io"
"net/http"
"net/url"
@@ -10,7 +12,6 @@ import (
"strings"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/lib/rest"
@@ -81,7 +82,7 @@ func (f *Fs) readFileInfo(ctx context.Context, url string) (*File, error) {
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't read file info")
return nil, fmt.Errorf("couldn't read file info: %w", err)
}
return &file, err
@@ -110,7 +111,7 @@ func (f *Fs) getDownloadToken(ctx context.Context, url string) (*GetTokenRespons
return doretry || !validToken(&token), err
})
if err != nil {
return nil, errors.Wrap(err, "couldn't list files")
return nil, fmt.Errorf("couldn't list files: %w", err)
}
return &token, nil
@@ -144,7 +145,7 @@ func (f *Fs) listSharedFiles(ctx context.Context, id string) (entries fs.DirEntr
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't list files")
return nil, fmt.Errorf("couldn't list files: %w", err)
}
entries = make([]fs.DirEntry, len(sharedFiles))
@@ -173,7 +174,7 @@ func (f *Fs) listFiles(ctx context.Context, directoryID int) (filesList *FilesLi
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't list files")
return nil, fmt.Errorf("couldn't list files: %w", err)
}
for i := range filesList.Items {
item := &filesList.Items[i]
@@ -201,7 +202,7 @@ func (f *Fs) listFolders(ctx context.Context, directoryID int) (foldersList *Fol
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't list folders")
return nil, fmt.Errorf("couldn't list folders: %w", err)
}
foldersList.Name = f.opt.Enc.ToStandardName(foldersList.Name)
for i := range foldersList.SubFolders {
@@ -295,7 +296,7 @@ func (f *Fs) makeFolder(ctx context.Context, leaf string, folderID int) (respons
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't create folder")
return nil, fmt.Errorf("couldn't create folder: %w", err)
}
// fs.Debugf(f, "Created Folder `%s` in id `%s`", name, directoryID)
@@ -322,10 +323,10 @@ func (f *Fs) removeFolder(ctx context.Context, name string, folderID int) (respo
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't remove folder")
return nil, fmt.Errorf("couldn't remove folder: %w", err)
}
if response.Status != "OK" {
return nil, errors.Errorf("can't remove folder: %s", response.Message)
return nil, fmt.Errorf("can't remove folder: %s", response.Message)
}
// fs.Debugf(f, "Removed Folder with id `%s`", directoryID)
@@ -352,7 +353,7 @@ func (f *Fs) deleteFile(ctx context.Context, url string) (response *GenericOKRes
})
if err != nil {
return nil, errors.Wrap(err, "couldn't remove file")
return nil, fmt.Errorf("couldn't remove file: %w", err)
}
// fs.Debugf(f, "Removed file with url `%s`", url)
@@ -379,7 +380,7 @@ func (f *Fs) moveFile(ctx context.Context, url string, folderID int, rename stri
})
if err != nil {
return nil, errors.Wrap(err, "couldn't copy file")
return nil, fmt.Errorf("couldn't copy file: %w", err)
}
return response, nil
@@ -404,7 +405,7 @@ func (f *Fs) copyFile(ctx context.Context, url string, folderID int, rename stri
})
if err != nil {
return nil, errors.Wrap(err, "couldn't copy file")
return nil, fmt.Errorf("couldn't copy file: %w", err)
}
return response, nil
@@ -432,7 +433,7 @@ func (f *Fs) renameFile(ctx context.Context, url string, newName string) (respon
})
if err != nil {
return nil, errors.Wrap(err, "couldn't rename file")
return nil, fmt.Errorf("couldn't rename file: %w", err)
}
return response, nil
@@ -453,7 +454,7 @@ func (f *Fs) getUploadNode(ctx context.Context) (response *GetUploadNodeResponse
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "didnt got an upload node")
return nil, fmt.Errorf("didnt got an upload node: %w", err)
}
// fs.Debugf(f, "Got Upload node")
@@ -497,7 +498,7 @@ func (f *Fs) uploadFile(ctx context.Context, in io.Reader, size int64, fileName,
})
if err != nil {
return nil, errors.Wrap(err, "couldn't upload file")
return nil, fmt.Errorf("couldn't upload file: %w", err)
}
// fs.Debugf(f, "Uploaded File `%s`", fileName)
@@ -531,7 +532,7 @@ func (f *Fs) endUpload(ctx context.Context, uploadID string, nodeurl string) (re
})
if err != nil {
return nil, errors.Wrap(err, "couldn't finish file upload")
return nil, fmt.Errorf("couldn't finish file upload: %w", err)
}
return response, err

View File

@@ -2,6 +2,7 @@ package fichier
import (
"context"
"errors"
"fmt"
"io"
"net/http"
@@ -9,7 +10,6 @@ import (
"strings"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
@@ -42,18 +42,15 @@ func init() {
}, {
Help: "If you want to download a shared folder, add this parameter.",
Name: "shared_folder",
Required: false,
Advanced: true,
}, {
Help: "If you want to download a shared file that is password protected, add this parameter.",
Name: "file_password",
Required: false,
Advanced: true,
IsPassword: true,
}, {
Help: "If you want to list the files in a shared folder that is password protected, add this parameter.",
Name: "folder_password",
Required: false,
Advanced: true,
IsPassword: true,
}, {
@@ -454,10 +451,10 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
if currentDirectoryID == directoryID {
resp, err := f.renameFile(ctx, srcObj.file.URL, leaf)
if err != nil {
return nil, errors.Wrap(err, "couldn't rename file")
return nil, fmt.Errorf("couldn't rename file: %w", err)
}
if resp.Status != "OK" {
return nil, errors.Errorf("couldn't rename file: %s", resp.Message)
return nil, fmt.Errorf("couldn't rename file: %s", resp.Message)
}
url = resp.URLs[0].URL
} else {
@@ -467,10 +464,10 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
}
resp, err := f.moveFile(ctx, srcObj.file.URL, folderID, leaf)
if err != nil {
return nil, errors.Wrap(err, "couldn't move file")
return nil, fmt.Errorf("couldn't move file: %w", err)
}
if resp.Status != "OK" {
return nil, errors.Errorf("couldn't move file: %s", resp.Message)
return nil, fmt.Errorf("couldn't move file: %s", resp.Message)
}
url = resp.URLs[0]
}
@@ -503,10 +500,10 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
}
resp, err := f.copyFile(ctx, srcObj.file.URL, folderID, leaf)
if err != nil {
return nil, errors.Wrap(err, "couldn't move file")
return nil, fmt.Errorf("couldn't move file: %w", err)
}
if resp.Status != "OK" {
return nil, errors.Errorf("couldn't move file: %s", resp.Message)
return nil, fmt.Errorf("couldn't move file: %s", resp.Message)
}
file, err := f.readFileInfo(ctx, resp.URLs[0].ToURL)
@@ -517,6 +514,32 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
return dstObj, nil
}
// About gets quota information
func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
opts := rest.Opts{
Method: "POST",
Path: "/user/info.cgi",
ContentType: "application/json",
}
var accountInfo AccountInfo
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.rest.CallJSON(ctx, &opts, nil, &accountInfo)
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, fmt.Errorf("failed to read user info: %w", err)
}
// FIXME max upload size would be useful to use in Update
usage = &fs.Usage{
Used: fs.NewUsageValue(accountInfo.ColdStorage), // bytes in use
Total: fs.NewUsageValue(accountInfo.AvailableColdStorage), // bytes total
Free: fs.NewUsageValue(accountInfo.AvailableColdStorage - accountInfo.ColdStorage), // bytes free
}
return usage, nil
}
// PublicLink adds a "readable by anyone with link" permission on the given file or folder.
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (string, error) {
o, err := f.NewObject(ctx, remote)

View File

@@ -2,11 +2,12 @@ package fichier
import (
"context"
"errors"
"fmt"
"io"
"net/http"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/rest"
@@ -122,7 +123,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// Delete duplicate after successful upload
err = o.Remove(ctx)
if err != nil {
return errors.Wrap(err, "failed to remove old version")
return fmt.Errorf("failed to remove old version: %w", err)
}
// Replace guts of old object with new one

View File

@@ -182,3 +182,34 @@ type FoldersList struct {
Status string `json:"Status"`
SubFolders []Folder `json:"sub_folders"`
}
// AccountInfo is the structure how 1Fichier returns user info
type AccountInfo struct {
StatsDate string `json:"stats_date"`
MailRM string `json:"mail_rm"`
DefaultQuota int64 `json:"default_quota"`
UploadForbidden string `json:"upload_forbidden"`
PageLimit int `json:"page_limit"`
ColdStorage int64 `json:"cold_storage"`
Status string `json:"status"`
UseCDN string `json:"use_cdn"`
AvailableColdStorage int64 `json:"available_cold_storage"`
DefaultPort string `json:"default_port"`
DefaultDomain int `json:"default_domain"`
Email string `json:"email"`
DownloadMenu string `json:"download_menu"`
FTPDID int `json:"ftp_did"`
DefaultPortFiles string `json:"default_port_files"`
FTPReport string `json:"ftp_report"`
OverQuota int64 `json:"overquota"`
AvailableStorage int64 `json:"available_storage"`
CDN string `json:"cdn"`
Offer string `json:"offer"`
SubscriptionEnd string `json:"subscription_end"`
TFA string `json:"2fa"`
AllowedColdStorage int64 `json:"allowed_cold_storage"`
HotStorage int64 `json:"hot_storage"`
DefaultColdStorageQuota int64 `json:"default_cold_storage_quota"`
FTPMode string `json:"ftp_mode"`
RUReport string `json:"ru_report"`
}

View File

@@ -17,6 +17,7 @@ import (
"bytes"
"context"
"encoding/base64"
"errors"
"fmt"
"io"
"io/ioutil"
@@ -32,7 +33,6 @@ import (
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/random"
"github.com/pkg/errors"
"github.com/rclone/rclone/backend/filefabric/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
@@ -267,7 +267,7 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, rootID string, path string
"pid": rootID,
}, &resp, nil)
if err != nil {
return nil, errors.Wrap(err, "failed to check path exists")
return nil, fmt.Errorf("failed to check path exists: %w", err)
}
if resp.Exists != "y" {
return nil, fs.ErrorObjectNotFound
@@ -308,7 +308,7 @@ func (f *Fs) getApplianceInfo(ctx context.Context) error {
"token": "*",
}, &applianceInfo, nil)
if err != nil {
return errors.Wrap(err, "failed to read appliance version")
return fmt.Errorf("failed to read appliance version: %w", err)
}
f.opt.Version = applianceInfo.SoftwareVersionLabel
f.m.Set("version", f.opt.Version)
@@ -349,7 +349,7 @@ func (f *Fs) getToken(ctx context.Context) (token string, err error) {
"authtoken": f.opt.PermanentToken,
}, &info, nil)
if err != nil {
return "", errors.Wrap(err, "failed to get session token")
return "", fmt.Errorf("failed to get session token: %w", err)
}
refreshed = true
now = now.Add(tokenLifeTime)
@@ -562,7 +562,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
"fi_name": f.opt.Enc.FromStandardName(leaf),
}, &info, nil)
if err != nil {
return "", errors.Wrap(err, "failed to create directory")
return "", fmt.Errorf("failed to create directory: %w", err)
}
// fmt.Printf("...Id %q\n", *info.Id)
return info.Item.ID, nil
@@ -595,7 +595,7 @@ OUTER:
var info api.GetFolderContentsResponse
_, err = f.rpc(ctx, "getFolderContents", p, &info, nil)
if err != nil {
return false, errors.Wrap(err, "failed to list directory")
return false, fmt.Errorf("failed to list directory: %w", err)
}
for i := range info.Items {
item := &info.Items[i]
@@ -726,7 +726,7 @@ func (f *Fs) deleteObject(ctx context.Context, id string) (err error) {
"completedeletion": "n",
}, &info, nil)
if err != nil {
return errors.Wrap(err, "failed to delete file")
return fmt.Errorf("failed to delete file: %w", err)
}
return nil
}
@@ -763,7 +763,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
}, &info, nil)
f.dirCache.FlushDir(dir)
if err != nil {
return errors.Wrap(err, "failed to remove directory")
return fmt.Errorf("failed to remove directory: %w", err)
}
return nil
}
@@ -825,7 +825,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
}
_, err = f.rpc(ctx, "doCopyFile", p, &info, nil)
if err != nil {
return nil, errors.Wrap(err, "failed to copy file")
return nil, fmt.Errorf("failed to copy file: %w", err)
}
err = dstObj.setMetaData(&info.Item)
if err != nil {
@@ -857,7 +857,7 @@ func (f *Fs) waitForBackgroundTask(ctx context.Context, taskID api.String) (err
"taskid": taskID,
}, &info, nil)
if err != nil {
return errors.Wrapf(err, "failed to wait for task %s to complete", taskID)
return fmt.Errorf("failed to wait for task %s to complete: %w", taskID, err)
}
if len(info.Tasks) == 0 {
// task has finished
@@ -890,7 +890,7 @@ func (f *Fs) renameLeaf(ctx context.Context, isDir bool, id string, newLeaf stri
"fi_name": newLeaf,
}, &info, nil)
if err != nil {
return nil, errors.Wrap(err, "failed to rename leaf")
return nil, fmt.Errorf("failed to rename leaf: %w", err)
}
err = f.waitForBackgroundTask(ctx, info.Status.TaskID)
if err != nil {
@@ -934,7 +934,7 @@ func (f *Fs) move(ctx context.Context, isDir bool, id, oldLeaf, newLeaf, oldDire
"dir_id": newDirectoryID,
}, &info, nil)
if err != nil {
return nil, errors.Wrap(err, "failed to move file to new directory")
return nil, fmt.Errorf("failed to move file to new directory: %w", err)
}
item = &info.Item
err = f.waitForBackgroundTask(ctx, info.Status.TaskID)
@@ -1037,7 +1037,7 @@ func (f *Fs) CleanUp(ctx context.Context) (err error) {
var info api.EmptyResponse
_, err = f.rpc(ctx, "emptyTrashInBackground", params{}, &info, nil)
if err != nil {
return errors.Wrap(err, "failed to empty trash")
return fmt.Errorf("failed to empty trash: %w", err)
}
return nil
}
@@ -1164,7 +1164,7 @@ func (o *Object) modifyFile(ctx context.Context, keyValues [][2]string) error {
"data": data.String(),
}, &info, nil)
if err != nil {
return errors.Wrap(err, "failed to update metadata")
return fmt.Errorf("failed to update metadata: %w", err)
}
return o.setMetaData(&info.Item)
}
@@ -1247,7 +1247,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
}
_, err = o.fs.rpc(ctx, "doInitUpload", p, &upload, nil)
if err != nil {
return errors.Wrap(err, "failed to initialize upload")
return fmt.Errorf("failed to initialize upload: %w", err)
}
// Cancel the upload if aborted or it fails
@@ -1290,13 +1290,13 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return o.fs.shouldRetry(ctx, resp, err, nil, try)
})
if err != nil {
return errors.Wrap(err, "failed to upload")
return fmt.Errorf("failed to upload: %w", err)
}
if uploader.Success != "y" {
return errors.Errorf("upload failed")
return fmt.Errorf("upload failed")
}
if size > 0 && uploader.FileSize != size {
return errors.Errorf("upload failed: size mismatch: want %d got %d", size, uploader.FileSize)
return fmt.Errorf("upload failed: size mismatch: want %d got %d", size, uploader.FileSize)
}
// Now finalize the file
@@ -1308,7 +1308,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
}
_, err = o.fs.rpc(ctx, "doCompleteUpload", p, &finalize, nil)
if err != nil {
return errors.Wrap(err, "failed to finalize upload")
return fmt.Errorf("failed to finalize upload: %w", err)
}
finalized = true

View File

@@ -4,6 +4,8 @@ package ftp
import (
"context"
"crypto/tls"
"errors"
"fmt"
"io"
"net"
"net/textproto"
@@ -13,8 +15,7 @@ import (
"sync"
"time"
"github.com/jlaffaye/ftp"
"github.com/pkg/errors"
"github.com/rclone/ftp"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/config"
@@ -51,16 +52,17 @@ func init() {
Help: "FTP host to connect to.\n\nE.g. \"ftp.example.com\".",
Required: true,
}, {
Name: "user",
Help: "FTP username, leave blank for current username, " + currentUser + ".",
Name: "user",
Help: "FTP username.",
Default: currentUser,
}, {
Name: "port",
Help: "FTP port, leave blank to use default (21).",
Name: "port",
Help: "FTP port number.",
Default: 21,
}, {
Name: "pass",
Help: "FTP password.",
IsPassword: true,
Required: true,
}, {
Name: "tls",
Help: `Use Implicit FTPS (FTP over TLS).
@@ -138,6 +140,14 @@ Enabled by default. Use 0 to disable.`,
Help: "Maximum time to wait for data connection closing status.",
Default: fs.Duration(60 * time.Second),
Advanced: true,
}, {
Name: "ask_password",
Default: false,
Help: `Allow asking for FTP password when needed.
If this is set and no password is supplied then rclone will ask for a password
`,
Advanced: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
@@ -178,6 +188,7 @@ type Options struct {
IdleTimeout fs.Duration `config:"idle_timeout"`
CloseTimeout fs.Duration `config:"close_timeout"`
ShutTimeout fs.Duration `config:"shut_timeout"`
AskPassword bool `config:"ask_password"`
Enc encoder.MultiEncoder `config:"encoding"`
}
@@ -349,7 +360,7 @@ func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
return false, nil
})
if err != nil {
err = errors.Wrapf(err, "failed to make FTP connection to %q", f.dialAddr)
err = fmt.Errorf("failed to make FTP connection to %q: %w", f.dialAddr, err)
}
return c, err
}
@@ -396,8 +407,8 @@ func (f *Fs) putFtpConnection(pc **ftp.ServerConn, err error) {
*pc = nil
if err != nil {
// If not a regular FTP error code then check the connection
_, isRegularError := errors.Cause(err).(*textproto.Error)
if !isRegularError {
var tpErr *textproto.Error
if !errors.As(err, &tpErr) {
nopErr := c.NoOp()
if nopErr != nil {
fs.Debugf(f, "Connection failed, closing: %v", nopErr)
@@ -443,9 +454,14 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
if err != nil {
return nil, err
}
pass, err := obscure.Reveal(opt.Pass)
if err != nil {
return nil, errors.Wrap(err, "NewFS decrypt password")
pass := ""
if opt.AskPassword && opt.Pass == "" {
pass = config.GetPassword("FTP server password")
} else {
pass, err = obscure.Reveal(opt.Pass)
if err != nil {
return nil, fmt.Errorf("NewFS decrypt password: %w", err)
}
}
user := opt.User
if user == "" {
@@ -502,7 +518,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
// Make a connection and pool it to return errors early
c, err := f.getFtpConnection(ctx)
if err != nil {
return nil, errors.Wrap(err, "NewFs")
return nil, fmt.Errorf("NewFs: %w", err)
}
f.fGetTime = c.IsGetTimeSupported()
f.fSetTime = c.IsSetTimeSupported()
@@ -520,7 +536,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
}
_, err := f.NewObject(ctx, remote)
if err != nil {
if err == fs.ErrorObjectNotFound || errors.Cause(err) == fs.ErrorNotAFile {
if err == fs.ErrorObjectNotFound || errors.Is(err, fs.ErrorNotAFile) {
// File doesn't exist so return old f
f.root = root
return f, nil
@@ -599,7 +615,7 @@ func (f *Fs) findItem(ctx context.Context, remote string) (entry *ftp.Entry, err
c, err := f.getFtpConnection(ctx)
if err != nil {
return nil, errors.Wrap(err, "findItem")
return nil, fmt.Errorf("findItem: %w", err)
}
files, err := c.List(f.dirFromStandardPath(dir))
f.putFtpConnection(&c, err)
@@ -643,7 +659,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err err
func (f *Fs) dirExists(ctx context.Context, remote string) (exists bool, err error) {
entry, err := f.findItem(ctx, remote)
if err != nil {
return false, errors.Wrap(err, "dirExists")
return false, fmt.Errorf("dirExists: %w", err)
}
if entry != nil && entry.Type == ftp.EntryTypeFolder {
return true, nil
@@ -664,7 +680,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// defer log.Trace(dir, "dir=%q", dir)("entries=%v, err=%v", &entries, &err)
c, err := f.getFtpConnection(ctx)
if err != nil {
return nil, errors.Wrap(err, "list")
return nil, fmt.Errorf("list: %w", err)
}
var listErr error
@@ -702,7 +718,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
if len(files) == 0 {
exists, err := f.dirExists(ctx, dir)
if err != nil {
return nil, errors.Wrap(err, "list")
return nil, fmt.Errorf("list: %w", err)
}
if !exists {
return nil, fs.ErrorDirNotFound
@@ -766,7 +782,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
// fs.Debugf(f, "Trying to put file %s", src.Remote())
err := f.mkParentDir(ctx, src.Remote())
if err != nil {
return nil, errors.Wrap(err, "Put mkParentDir failed")
return nil, fmt.Errorf("Put mkParentDir failed: %w", err)
}
o := &Object{
fs: f,
@@ -789,7 +805,7 @@ func (f *Fs) getInfo(ctx context.Context, remote string) (fi *FileInfo, err erro
c, err := f.getFtpConnection(ctx)
if err != nil {
return nil, errors.Wrap(err, "getInfo")
return nil, fmt.Errorf("getInfo: %w", err)
}
files, err := c.List(f.dirFromStandardPath(dir))
f.putFtpConnection(&c, err)
@@ -827,7 +843,7 @@ func (f *Fs) mkdir(ctx context.Context, abspath string) error {
}
return fs.ErrorIsFile
} else if err != fs.ErrorObjectNotFound {
return errors.Wrapf(err, "mkdir %q failed", abspath)
return fmt.Errorf("mkdir %q failed: %w", abspath, err)
}
parent := path.Dir(abspath)
err = f.mkdir(ctx, parent)
@@ -836,7 +852,7 @@ func (f *Fs) mkdir(ctx context.Context, abspath string) error {
}
c, connErr := f.getFtpConnection(ctx)
if connErr != nil {
return errors.Wrap(connErr, "mkdir")
return fmt.Errorf("mkdir: %w", connErr)
}
err = c.MakeDir(f.dirFromStandardPath(abspath))
f.putFtpConnection(&c, err)
@@ -872,7 +888,7 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
c, err := f.getFtpConnection(ctx)
if err != nil {
return errors.Wrap(translateErrorFile(err), "Rmdir")
return fmt.Errorf("Rmdir: %w", translateErrorFile(err))
}
err = c.RemoveDir(f.dirFromStandardPath(path.Join(f.root, dir)))
f.putFtpConnection(&c, err)
@@ -888,11 +904,11 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
}
err := f.mkParentDir(ctx, remote)
if err != nil {
return nil, errors.Wrap(err, "Move mkParentDir failed")
return nil, fmt.Errorf("Move mkParentDir failed: %w", err)
}
c, err := f.getFtpConnection(ctx)
if err != nil {
return nil, errors.Wrap(err, "Move")
return nil, fmt.Errorf("Move: %w", err)
}
err = c.Rename(
f.opt.Enc.FromStandardPath(path.Join(srcObj.fs.root, srcObj.remote)),
@@ -900,11 +916,11 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
)
f.putFtpConnection(&c, err)
if err != nil {
return nil, errors.Wrap(err, "Move Rename failed")
return nil, fmt.Errorf("Move Rename failed: %w", err)
}
dstObj, err := f.NewObject(ctx, remote)
if err != nil {
return nil, errors.Wrap(err, "Move NewObject failed")
return nil, fmt.Errorf("Move NewObject failed: %w", err)
}
return dstObj, nil
}
@@ -934,19 +950,19 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
}
return fs.ErrorIsFile
} else if err != fs.ErrorObjectNotFound {
return errors.Wrapf(err, "DirMove getInfo failed")
return fmt.Errorf("DirMove getInfo failed: %w", err)
}
// Make sure the parent directory exists
err = f.mkdir(ctx, path.Dir(dstPath))
if err != nil {
return errors.Wrap(err, "DirMove mkParentDir dst failed")
return fmt.Errorf("DirMove mkParentDir dst failed: %w", err)
}
// Do the move
c, err := f.getFtpConnection(ctx)
if err != nil {
return errors.Wrap(err, "DirMove")
return fmt.Errorf("DirMove: %w", err)
}
err = c.Rename(
f.dirFromStandardPath(srcPath),
@@ -954,7 +970,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
)
f.putFtpConnection(&c, err)
if err != nil {
return errors.Wrapf(err, "DirMove Rename(%q,%q) failed", srcPath, dstPath)
return fmt.Errorf("DirMove Rename(%q,%q) failed: %w", srcPath, dstPath, err)
}
return nil
}
@@ -1111,12 +1127,12 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.Read
}
c, err := o.fs.getFtpConnection(ctx)
if err != nil {
return nil, errors.Wrap(err, "open")
return nil, fmt.Errorf("open: %w", err)
}
fd, err := c.RetrFrom(o.fs.opt.Enc.FromStandardPath(path), uint64(offset))
if err != nil {
o.fs.putFtpConnection(&c, err)
return nil, errors.Wrap(err, "open")
return nil, fmt.Errorf("open: %w", err)
}
rc = &ftpReadCloser{rc: readers.NewLimitedReadCloser(fd, limit), c: c, f: o.fs}
return rc, nil
@@ -1146,7 +1162,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
}
c, err := o.fs.getFtpConnection(ctx)
if err != nil {
return errors.Wrap(err, "Update")
return fmt.Errorf("Update: %w", err)
}
err = c.Stor(o.fs.opt.Enc.FromStandardPath(path), in)
// Ignore error 250 here - send by some servers
@@ -1164,15 +1180,15 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// recycle connection in advance to let remove() find free token
o.fs.putFtpConnection(nil, err)
remove()
return errors.Wrap(err, "update stor")
return fmt.Errorf("update stor: %w", err)
}
o.fs.putFtpConnection(&c, nil)
if err = o.SetModTime(ctx, src.ModTime(ctx)); err != nil {
return errors.Wrap(err, "SetModTime")
return fmt.Errorf("SetModTime: %w", err)
}
o.info, err = o.fs.getInfo(ctx, path)
if err != nil {
return errors.Wrap(err, "update getinfo")
return fmt.Errorf("update getinfo: %w", err)
}
return nil
}
@@ -1191,7 +1207,7 @@ func (o *Object) Remove(ctx context.Context) (err error) {
} else {
c, err := o.fs.getFtpConnection(ctx)
if err != nil {
return errors.Wrap(err, "Remove")
return fmt.Errorf("Remove: %w", err)
}
err = c.Delete(o.fs.opt.Enc.FromStandardPath(path))
o.fs.putFtpConnection(&c, err)

View File

@@ -36,7 +36,7 @@ func (f *Fs) testUploadTimeout(t *testing.T) {
const (
fileSize = 100000000 // 100 MiB
idleTimeout = 40 * time.Millisecond // small because test server is local
maxTime = 5 * time.Second // prevent test hangup
maxTime = 10 * time.Second // prevent test hangup
)
if testing.Short() {

View File

@@ -16,6 +16,7 @@ import (
"context"
"encoding/base64"
"encoding/hex"
"errors"
"fmt"
"io"
"io/ioutil"
@@ -25,7 +26,6 @@ import (
"strings"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
@@ -65,7 +65,7 @@ var (
Endpoint: google.Endpoint,
ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.TitleBarRedirectURL,
RedirectURL: oauthutil.RedirectURL,
}
)
@@ -182,15 +182,30 @@ Docs: https://cloud.google.com/storage/docs/bucket-policy-only
}, {
Value: "asia-northeast1",
Help: "Tokyo",
}, {
Value: "asia-northeast2",
Help: "Osaka",
}, {
Value: "asia-northeast3",
Help: "Seoul",
}, {
Value: "asia-south1",
Help: "Mumbai",
}, {
Value: "asia-south2",
Help: "Delhi",
}, {
Value: "asia-southeast1",
Help: "Singapore",
}, {
Value: "asia-southeast2",
Help: "Jakarta",
}, {
Value: "australia-southeast1",
Help: "Sydney",
}, {
Value: "australia-southeast2",
Help: "Melbourne",
}, {
Value: "europe-north1",
Help: "Finland",
@@ -206,6 +221,12 @@ Docs: https://cloud.google.com/storage/docs/bucket-policy-only
}, {
Value: "europe-west4",
Help: "Netherlands",
}, {
Value: "europe-west6",
Help: "Zürich",
}, {
Value: "europe-central2",
Help: "Warsaw",
}, {
Value: "us-central1",
Help: "Iowa",
@@ -221,6 +242,33 @@ Docs: https://cloud.google.com/storage/docs/bucket-policy-only
}, {
Value: "us-west2",
Help: "California",
}, {
Value: "us-west3",
Help: "Salt Lake City",
}, {
Value: "us-west4",
Help: "Las Vegas",
}, {
Value: "northamerica-northeast1",
Help: "Montréal",
}, {
Value: "northamerica-northeast2",
Help: "Toronto",
}, {
Value: "southamerica-east1",
Help: "São Paulo",
}, {
Value: "southamerica-west1",
Help: "Santiago",
}, {
Value: "asia1",
Help: "Dual region: asia-northeast1 and asia-northeast2.",
}, {
Value: "eur4",
Help: "Dual region: europe-north1 and europe-west4.",
}, {
Value: "nam4",
Help: "Dual region: us-central1 and us-east1.",
}},
}, {
Name: "storage_class",
@@ -247,6 +295,15 @@ Docs: https://cloud.google.com/storage/docs/bucket-policy-only
Value: "DURABLE_REDUCED_AVAILABILITY",
Help: "Durable reduced availability storage class",
}},
}, {
Name: "no_check_bucket",
Help: `If set, don't attempt to check the bucket exists or create it.
This can be useful when trying to minimise the number of transactions
rclone does if you know the bucket exists already.
`,
Default: false,
Advanced: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
@@ -269,6 +326,7 @@ type Options struct {
BucketPolicyOnly bool `config:"bucket_policy_only"`
Location string `config:"location"`
StorageClass string `config:"storage_class"`
NoCheckBucket bool `config:"no_check_bucket"`
Enc encoder.MultiEncoder `config:"encoding"`
}
@@ -375,7 +433,7 @@ func (o *Object) split() (bucket, bucketPath string) {
func getServiceAccountClient(ctx context.Context, credentialsData []byte) (*http.Client, error) {
conf, err := google.JWTConfigFromJSON(credentialsData, storageConfig.Scopes...)
if err != nil {
return nil, errors.Wrap(err, "error processing credentials")
return nil, fmt.Errorf("error processing credentials: %w", err)
}
ctxWithSpecialClient := oauthutil.Context(ctx, fshttp.NewClient(ctx))
return oauth2.NewClient(ctxWithSpecialClient, conf.TokenSource(ctxWithSpecialClient)), nil
@@ -408,7 +466,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
if opt.ServiceAccountCredentials == "" && opt.ServiceAccountFile != "" {
loadedCreds, err := ioutil.ReadFile(env.ShellExpand(opt.ServiceAccountFile))
if err != nil {
return nil, errors.Wrap(err, "error opening service account credentials file")
return nil, fmt.Errorf("error opening service account credentials file: %w", err)
}
opt.ServiceAccountCredentials = string(loadedCreds)
}
@@ -417,7 +475,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
} else if opt.ServiceAccountCredentials != "" {
oAuthClient, err = getServiceAccountClient(ctx, []byte(opt.ServiceAccountCredentials))
if err != nil {
return nil, errors.Wrap(err, "failed configuring Google Cloud Storage Service Account")
return nil, fmt.Errorf("failed configuring Google Cloud Storage Service Account: %w", err)
}
} else {
oAuthClient, _, err = oauthutil.NewClient(ctx, name, m, storageConfig)
@@ -425,7 +483,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
ctx := context.Background()
oAuthClient, err = google.DefaultClient(ctx, storage.DevstorageFullControlScope)
if err != nil {
return nil, errors.Wrap(err, "failed to configure Google Cloud Storage")
return nil, fmt.Errorf("failed to configure Google Cloud Storage: %w", err)
}
}
}
@@ -434,7 +492,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
name: name,
root: root,
opt: *opt,
pacer: fs.NewPacer(ctx, pacer.NewGoogleDrive(pacer.MinSleep(minSleep))),
pacer: fs.NewPacer(ctx, pacer.NewS3(pacer.MinSleep(minSleep))),
cache: bucket.NewCache(),
}
f.setRoot(root)
@@ -449,7 +507,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
f.client = oAuthClient
f.svc, err = storage.New(f.client)
if err != nil {
return nil, errors.Wrap(err, "couldn't create Google Cloud Storage client")
return nil, fmt.Errorf("couldn't create Google Cloud Storage client: %w", err)
}
if f.rootBucket != "" && f.rootDirectory != "" {
@@ -759,10 +817,10 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) (err error) {
return nil
} else if gErr, ok := err.(*googleapi.Error); ok {
if gErr.Code != http.StatusNotFound {
return errors.Wrap(err, "failed to get bucket")
return fmt.Errorf("failed to get bucket: %w", err)
}
} else {
return errors.Wrap(err, "failed to get bucket")
return fmt.Errorf("failed to get bucket: %w", err)
}
if f.opt.ProjectNumber == "" {
@@ -792,6 +850,14 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) (err error) {
}, nil)
}
// checkBucket creates the bucket if it doesn't exist unless NoCheckBucket is true
func (f *Fs) checkBucket(ctx context.Context, bucket string) error {
if f.opt.NoCheckBucket {
return nil
}
return f.makeBucket(ctx, bucket)
}
// Rmdir deletes the bucket if the fs is at the root
//
// Returns an error if it isn't empty: Error 409: The bucket you tried
@@ -825,7 +891,7 @@ func (f *Fs) Precision() time.Duration {
// If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
dstBucket, dstPath := f.split(remote)
err := f.makeBucket(ctx, dstBucket)
err := f.checkBucket(ctx, dstBucket)
if err != nil {
return nil, err
}
@@ -1065,7 +1131,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
_, isRanging := req.Header["Range"]
if !(res.StatusCode == http.StatusOK || (isRanging && res.StatusCode == http.StatusPartialContent)) {
_ = res.Body.Close() // ignore error
return nil, errors.Errorf("bad response: %d: %s", res.StatusCode, res.Status)
return nil, fmt.Errorf("bad response: %d: %s", res.StatusCode, res.Status)
}
return res.Body, nil
}
@@ -1075,7 +1141,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
// The new object may have been created if an error is returned
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
bucket, bucketPath := o.split()
err := o.fs.makeBucket(ctx, bucket)
err := o.fs.checkBucket(ctx, bucket)
if err != nil {
return err
}

View File

@@ -6,6 +6,7 @@ package googlephotos
import (
"context"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
@@ -17,7 +18,6 @@ import (
"sync"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/backend/googlephotos/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
@@ -69,7 +69,7 @@ var (
Endpoint: google.Endpoint,
ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.TitleBarRedirectURL,
RedirectURL: oauthutil.RedirectURL,
}
)
@@ -85,7 +85,7 @@ func init() {
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, errors.Wrap(err, "couldn't parse config into struct")
return nil, fmt.Errorf("couldn't parse config into struct: %w", err)
}
switch config.State {
@@ -139,7 +139,7 @@ you want to read the media.`,
Default: false,
Help: `Also view and download archived media.
By default rclone does not request archived media. Thus, when syncing,
By default, rclone does not request archived media. Thus, when syncing,
archived media is not visible in directory listings or transferred.
Note that media in albums is always visible and synced, no matter
@@ -292,7 +292,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
baseClient := fshttp.NewClient(ctx)
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(ctx, name, m, oauthConfig, baseClient)
if err != nil {
return nil, errors.Wrap(err, "failed to configure Box")
return nil, fmt.Errorf("failed to configure Box: %w", err)
}
root = strings.Trim(path.Clean(root), "/")
@@ -345,13 +345,13 @@ func (f *Fs) fetchEndpoint(ctx context.Context, name string) (endpoint string, e
return shouldRetry(ctx, resp, err)
})
if err != nil {
return "", errors.Wrap(err, "couldn't read openID config")
return "", fmt.Errorf("couldn't read openID config: %w", err)
}
// Find userinfo endpoint
endpoint, ok := openIDconfig[name].(string)
if !ok {
return "", errors.Errorf("couldn't find %q from openID config", name)
return "", fmt.Errorf("couldn't find %q from openID config", name)
}
return endpoint, nil
@@ -374,7 +374,7 @@ func (f *Fs) UserInfo(ctx context.Context) (userInfo map[string]string, err erro
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't read user info")
return nil, fmt.Errorf("couldn't read user info: %w", err)
}
return userInfo, nil
}
@@ -405,7 +405,7 @@ func (f *Fs) Disconnect(ctx context.Context) (err error) {
return shouldRetry(ctx, resp, err)
})
if err != nil {
return errors.Wrap(err, "couldn't revoke token")
return fmt.Errorf("couldn't revoke token: %w", err)
}
fs.Infof(f, "res = %+v", res)
return nil
@@ -492,7 +492,7 @@ func (f *Fs) listAlbums(ctx context.Context, shared bool) (all *albums, err erro
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't list albums")
return nil, fmt.Errorf("couldn't list albums: %w", err)
}
newAlbums := result.Albums
if shared {
@@ -549,7 +549,7 @@ func (f *Fs) list(ctx context.Context, filter api.SearchFilter, fn listFn) (err
return shouldRetry(ctx, resp, err)
})
if err != nil {
return errors.Wrap(err, "couldn't list files")
return fmt.Errorf("couldn't list files: %w", err)
}
items := result.MediaItems
if len(items) > 0 && items[0].ID == lastID {
@@ -693,7 +693,7 @@ func (f *Fs) createAlbum(ctx context.Context, albumTitle string) (album *api.Alb
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't create album")
return nil, fmt.Errorf("couldn't create album: %w", err)
}
f.albums[false].add(&result)
return &result, nil
@@ -879,7 +879,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
return shouldRetry(ctx, resp, err)
})
if err != nil {
return errors.Wrap(err, "couldn't get media item")
return fmt.Errorf("couldn't get media item: %w", err)
}
o.setMetaData(&item)
return nil
@@ -1014,7 +1014,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return shouldRetry(ctx, resp, err)
})
if err != nil {
return errors.Wrap(err, "couldn't upload file")
return fmt.Errorf("couldn't upload file: %w", err)
}
uploadToken := strings.TrimSpace(string(token))
if uploadToken == "" {
@@ -1042,14 +1042,14 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return shouldRetry(ctx, resp, err)
})
if err != nil {
return errors.Wrap(err, "failed to create media item")
return fmt.Errorf("failed to create media item: %w", err)
}
if len(result.NewMediaItemResults) != 1 {
return errors.New("bad response to BatchCreate wrong number of items")
}
mediaItemResult := result.NewMediaItemResults[0]
if mediaItemResult.Status.Code != 0 {
return errors.Errorf("upload failed: %s (%d)", mediaItemResult.Status.Message, mediaItemResult.Status.Code)
return fmt.Errorf("upload failed: %s (%d)", mediaItemResult.Status.Message, mediaItemResult.Status.Code)
}
o.setMetaData(&mediaItemResult.MediaItem)
@@ -1071,7 +1071,7 @@ func (o *Object) Remove(ctx context.Context) (err error) {
albumTitle, fileName := match[1], match[2]
album, ok := o.fs.albums[false].get(albumTitle)
if !ok {
return errors.Errorf("couldn't file %q in album %q for delete", fileName, albumTitle)
return fmt.Errorf("couldn't file %q in album %q for delete", fileName, albumTitle)
}
opts := rest.Opts{
Method: "POST",
@@ -1087,7 +1087,7 @@ func (o *Object) Remove(ctx context.Context) (err error) {
return shouldRetry(ctx, resp, err)
})
if err != nil {
return errors.Wrap(err, "couldn't delete item from album")
return fmt.Errorf("couldn't delete item from album: %w", err)
}
return nil
}

View File

@@ -11,7 +11,6 @@ import (
"strings"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/backend/googlephotos/api"
"github.com/rclone/rclone/fs"
)
@@ -270,7 +269,7 @@ func days(ctx context.Context, f lister, prefix string, match []string) (entries
year := match[1]
current, err := time.Parse("2006", year)
if err != nil {
return nil, errors.Errorf("bad year %q", match[1])
return nil, fmt.Errorf("bad year %q", match[1])
}
currentYear := current.Year()
for current.Year() == currentYear {
@@ -284,7 +283,7 @@ func days(ctx context.Context, f lister, prefix string, match []string) (entries
func yearMonthDayFilter(ctx context.Context, f lister, match []string) (sf api.SearchFilter, err error) {
year, err := strconv.Atoi(match[1])
if err != nil || year < 1000 || year > 3000 {
return sf, errors.Errorf("bad year %q", match[1])
return sf, fmt.Errorf("bad year %q", match[1])
}
sf = api.SearchFilter{
Filters: &api.Filters{
@@ -300,14 +299,14 @@ func yearMonthDayFilter(ctx context.Context, f lister, match []string) (sf api.S
if len(match) >= 3 {
month, err := strconv.Atoi(match[2])
if err != nil || month < 1 || month > 12 {
return sf, errors.Errorf("bad month %q", match[2])
return sf, fmt.Errorf("bad month %q", match[2])
}
sf.Filters.DateFilter.Dates[0].Month = month
}
if len(match) >= 4 {
day, err := strconv.Atoi(match[3])
if err != nil || day < 1 || day > 31 {
return sf, errors.Errorf("bad day %q", match[3])
return sf, fmt.Errorf("bad day %q", match[3])
}
sf.Filters.DateFilter.Dates[0].Day = day
}

View File

@@ -2,9 +2,10 @@ package hasher
import (
"context"
"errors"
"fmt"
"path"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/cache"
@@ -118,18 +119,18 @@ func (f *Fs) dbImport(ctx context.Context, hashName, sumRemote string, sticky bo
case fs.ErrorIsFile:
// ok
case nil:
return errors.Errorf("not a file: %s", sumRemote)
return fmt.Errorf("not a file: %s", sumRemote)
default:
return err
}
sumObj, err := sumFs.NewObject(ctx, path.Base(sumPath))
if err != nil {
return errors.Wrap(err, "cannot open sum file")
return fmt.Errorf("cannot open sum file: %w", err)
}
hashes, err := operations.ParseSumFile(ctx, sumObj)
if err != nil {
return errors.Wrap(err, "failed to parse sum file")
return fmt.Errorf("failed to parse sum file: %w", err)
}
if sticky {

View File

@@ -4,6 +4,7 @@ package hasher
import (
"context"
"encoding/gob"
"errors"
"fmt"
"io"
"path"
@@ -11,7 +12,6 @@ import (
"sync"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/cache"
"github.com/rclone/rclone/fs/config/configmap"
@@ -102,7 +102,7 @@ func NewFs(ctx context.Context, fsname, rpath string, cmap configmap.Mapper) (fs
remotePath := fspath.JoinRootPath(opt.Remote, rpath)
baseFs, err := cache.Get(ctx, remotePath)
if err != nil && err != fs.ErrorIsFile {
return nil, errors.Wrapf(err, "failed to derive base remote %q", opt.Remote)
return nil, fmt.Errorf("failed to derive base remote %q: %w", opt.Remote, err)
}
f := &Fs{
@@ -127,7 +127,7 @@ func NewFs(ctx context.Context, fsname, rpath string, cmap configmap.Mapper) (fs
for _, hashName := range opt.Hashes {
var ht hash.Type
if err := ht.Set(hashName); err != nil {
return nil, errors.Errorf("invalid token %q in hash string %q", hashName, opt.Hashes.String())
return nil, fmt.Errorf("invalid token %q in hash string %q", hashName, opt.Hashes.String())
}
if !f.slowHashes.Contains(ht) {
f.autoHashes.Add(ht)
@@ -202,7 +202,11 @@ func (f *Fs) wrapEntries(baseEntries fs.DirEntries) (hashEntries fs.DirEntries,
for _, entry := range baseEntries {
switch x := entry.(type) {
case fs.Object:
hashEntries = append(hashEntries, f.wrapObject(x, nil))
obj, err := f.wrapObject(x, nil)
if err != nil {
return nil, err
}
hashEntries = append(hashEntries, obj)
default:
hashEntries = append(hashEntries, entry) // trash in - trash out
}
@@ -251,7 +255,7 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
if do := f.Fs.Features().PutStream; do != nil {
_ = f.pruneHash(src.Remote())
oResult, err := do(ctx, in, src, options...)
return f.wrapObject(oResult, err), err
return f.wrapObject(oResult, err)
}
return nil, errors.New("PutStream not supported")
}
@@ -261,7 +265,7 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
if do := f.Fs.Features().PutUnchecked; do != nil {
_ = f.pruneHash(src.Remote())
oResult, err := do(ctx, in, src, options...)
return f.wrapObject(oResult, err), err
return f.wrapObject(oResult, err)
}
return nil, errors.New("PutUnchecked not supported")
}
@@ -348,7 +352,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
return nil, fs.ErrorCantCopy
}
oResult, err := do(ctx, o.Object, remote)
return f.wrapObject(oResult, err), err
return f.wrapObject(oResult, err)
}
// Move src to this remote using server-side move operations.
@@ -371,7 +375,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
dir: false,
fs: f,
})
return f.wrapObject(oResult, nil), nil
return f.wrapObject(oResult, nil)
}
// DirMove moves src, srcRemote to this remote at dstRemote using server-side move operations.
@@ -410,7 +414,7 @@ func (f *Fs) Shutdown(ctx context.Context) (err error) {
// NewObject finds the Object at remote.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
o, err := f.Fs.NewObject(ctx, remote)
return f.wrapObject(o, err), err
return f.wrapObject(o, err)
}
//
@@ -424,11 +428,15 @@ type Object struct {
}
// Wrap base object into hasher object
func (f *Fs) wrapObject(o fs.Object, err error) *Object {
if err != nil || o == nil {
return nil
func (f *Fs) wrapObject(o fs.Object, err error) (obj fs.Object, outErr error) {
// log.Trace(o, "err=%v", err)("obj=%#v, outErr=%v", &obj, &outErr)
if err != nil {
return nil, err
}
return &Object{Object: o, f: f}
if o == nil {
return nil, fs.ErrorObjectNotFound
}
return &Object{Object: o, f: f}, nil
}
// Fs returns read only access to the Fs that this object is part of

View File

@@ -4,11 +4,11 @@ import (
"bytes"
"context"
"encoding/gob"
"errors"
"fmt"
"strings"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/operations"
@@ -199,10 +199,10 @@ func (op *kvPut) Do(ctx context.Context, b kv.Bucket) (err error) {
r.Hashes[hashType] = hashVal
}
if data, err = r.encode(op.key); err != nil {
return errors.Wrap(err, "marshal failed")
return fmt.Errorf("marshal failed: %w", err)
}
if err = b.Put([]byte(op.key), data); err != nil {
return errors.Wrap(err, "put failed")
return fmt.Errorf("put failed: %w", err)
}
return err
}
@@ -294,7 +294,7 @@ func (f *Fs) dumpLine(r *hashRecord, path string, include bool, err error) strin
if hashVal == "" || err != nil {
hashVal = "-"
}
hashVal = fmt.Sprintf("%-*s", hash.Width(hashType), hashVal)
hashVal = fmt.Sprintf("%-*s", hash.Width(hashType, false), hashVal)
hashes = append(hashes, hashName+":"+hashVal)
}
hashesStr := strings.Join(hashes, " ")

View File

@@ -2,13 +2,13 @@ package hasher
import (
"context"
"errors"
"fmt"
"io"
"io/ioutil"
"path"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/operations"
@@ -184,7 +184,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (r io.ReadC
// Put data into the remote path with given modTime and size
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
var (
o *Object
o fs.Object
common hash.Set
rehash bool
hashes hashMap
@@ -210,8 +210,8 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
_ = f.pruneHash(src.Remote())
oResult, err := f.Fs.Put(ctx, wrapIn, src, options...)
o = f.wrapObject(oResult, err)
if o == nil {
o, err = f.wrapObject(oResult, err)
if err != nil {
return nil, err
}
@@ -224,7 +224,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
}
}
if len(hashes) > 0 {
err := o.putHashes(ctx, hashes)
err := o.(*Object).putHashes(ctx, hashes)
fs.Debugf(o, "Applied %d source hashes, err: %v", len(hashes), err)
}
return o, err

View File

@@ -263,6 +263,98 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
return f.client.RemoveAll(realpath)
}
// Move src to this remote using server-side move operations.
//
// This is stored with the remote path given
//
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantMove
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object)
if !ok {
fs.Debugf(src, "Can't move - not same remote type")
return nil, fs.ErrorCantMove
}
// Get the real paths from the remote specs:
sourcePath := srcObj.fs.realpath(srcObj.remote)
targetPath := f.realpath(remote)
fs.Debugf(f, "rename [%s] to [%s]", sourcePath, targetPath)
// Make sure the target folder exists:
dirname := path.Dir(targetPath)
err := f.client.MkdirAll(dirname, 0755)
if err != nil {
return nil, err
}
// Do the move
// Note that the underlying HDFS library hard-codes Overwrite=True, but this is expected rclone behaviour.
err = f.client.Rename(sourcePath, targetPath)
if err != nil {
return nil, err
}
// Look up the resulting object
info, err := f.client.Stat(targetPath)
if err != nil {
return nil, err
}
// And return it:
return &Object{
fs: f,
remote: remote,
size: info.Size(),
modTime: info.ModTime(),
}, nil
}
// DirMove moves src, srcRemote to this remote at dstRemote
// using server-side move operations.
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantDirMove
//
// If destination exists then return fs.ErrorDirExists
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) (err error) {
srcFs, ok := src.(*Fs)
if !ok {
return fs.ErrorCantDirMove
}
// Get the real paths from the remote specs:
sourcePath := srcFs.realpath(srcRemote)
targetPath := f.realpath(dstRemote)
fs.Debugf(f, "rename [%s] to [%s]", sourcePath, targetPath)
// Check if the destination exists:
info, err := f.client.Stat(targetPath)
if err == nil {
fs.Debugf(f, "target directory already exits, IsDir = [%t]", info.IsDir())
return fs.ErrorDirExists
}
// Make sure the targets parent folder exists:
dirname := path.Dir(targetPath)
err = f.client.MkdirAll(dirname, 0755)
if err != nil {
return err
}
// Do the move
err = f.client.Rename(sourcePath, targetPath)
if err != nil {
return err
}
return nil
}
// About gets quota information from the Fs
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
info, err := f.client.StatFs()
@@ -318,4 +410,6 @@ var (
_ fs.Purger = (*Fs)(nil)
_ fs.PutStreamer = (*Fs)(nil)
_ fs.Abouter = (*Fs)(nil)
_ fs.Mover = (*Fs)(nil)
_ fs.DirMover = (*Fs)(nil)
)

View File

@@ -22,9 +22,8 @@ func init() {
Help: "Hadoop name node and port.\n\nE.g. \"namenode:8020\" to connect to host namenode at port 8020.",
Required: true,
}, {
Name: "username",
Help: "Hadoop user name.",
Required: false,
Name: "username",
Help: "Hadoop user name.",
Examples: []fs.OptionExample{{
Value: "root",
Help: "Connect to hdfs as root.",
@@ -36,7 +35,6 @@ func init() {
Enables KERBEROS authentication. Specifies the Service Principal Name
(SERVICE/FQDN) for the namenode. E.g. \"hdfs/namenode.hadoop.docker\"
for namenode running as service 'hdfs' with FQDN 'namenode.hadoop.docker'.`,
Required: false,
Advanced: true,
}, {
Name: "data_transfer_protection",
@@ -46,7 +44,6 @@ Specifies whether or not authentication, data signature integrity
checks, and wire encryption is required when communicating the the
datanodes. Possible values are 'authentication', 'integrity' and
'privacy'. Used only with KERBEROS enabled.`,
Required: false,
Examples: []fs.OptionExample{{
Value: "privacy",
Help: "Ensure authentication, integrity and encryption enabled.",

View File

@@ -6,6 +6,8 @@ package http
import (
"context"
"errors"
"fmt"
"io"
"mime"
"net/http"
@@ -16,7 +18,6 @@ import (
"sync"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
@@ -49,10 +50,9 @@ Use this to set additional HTTP headers for all transactions.
The input format is comma separated list of key,value pairs. Standard
[CSV encoding](https://godoc.org/encoding/csv) may be used.
For example to set a Cookie use 'Cookie,name=value', or '"Cookie","name=value"'.
For example, to set a Cookie use 'Cookie,name=value', or '"Cookie","name=value"'.
You can set multiple headers, e.g. '"Cookie","name=value","Authorization","xxx"'.
`,
You can set multiple headers, e.g. '"Cookie","name=value","Authorization","xxx"'.`,
Default: fs.CommaSepList{},
Advanced: true,
}, {
@@ -73,8 +73,9 @@ directories.`,
Advanced: true,
}, {
Name: "no_head",
Help: `Don't use HEAD requests to find file sizes in dir listing.
Help: `Don't use HEAD requests.
HEAD requests are mainly used to find file sizes in dir listing.
If your site is being very slow to load then you can try this option.
Normally rclone does a HEAD request for each potential file in a
directory listing to:
@@ -83,12 +84,9 @@ directory listing to:
- check it really exists
- check to see if it is a directory
If you set this option, rclone will not do the HEAD request. This will mean
- directory listings are much quicker
- rclone won't have the times or sizes of any files
- some files that don't exist may be in the listing
`,
If you set this option, rclone will not do the HEAD request. This will mean
that directory listings are much quicker, but rclone won't have the times or
sizes of any files, and some files that don't exist may be in the listing.`,
Default: false,
Advanced: true,
}},
@@ -132,11 +130,87 @@ func statusError(res *http.Response, err error) error {
}
if res.StatusCode < 200 || res.StatusCode > 299 {
_ = res.Body.Close()
return errors.Errorf("HTTP Error %d: %s", res.StatusCode, res.Status)
return fmt.Errorf("HTTP Error: %s", res.Status)
}
return nil
}
// getFsEndpoint decides if url is to be considered a file or directory,
// and returns a proper endpoint url to use for the fs.
func getFsEndpoint(ctx context.Context, client *http.Client, url string, opt *Options) (string, bool) {
// If url ends with '/' it is already a proper url always assumed to be a directory.
if url[len(url)-1] == '/' {
return url, false
}
// If url does not end with '/' we send a HEAD request to decide
// if it is directory or file, and if directory appends the missing
// '/', or if file returns the directory url to parent instead.
createFileResult := func() (string, bool) {
fs.Debugf(nil, "If path is a directory you must add a trailing '/'")
parent, _ := path.Split(url)
return parent, true
}
createDirResult := func() (string, bool) {
fs.Debugf(nil, "To avoid the initial HEAD request add a trailing '/' to the path")
return url + "/", false
}
// If HEAD requests are not allowed we just have to assume it is a file.
if opt.NoHead {
fs.Debugf(nil, "Assuming path is a file as --http-no-head is set")
return createFileResult()
}
// Use a client which doesn't follow redirects so the server
// doesn't redirect http://host/dir to http://host/dir/
noRedir := *client
noRedir.CheckRedirect = func(req *http.Request, via []*http.Request) error {
return http.ErrUseLastResponse
}
req, err := http.NewRequestWithContext(ctx, "HEAD", url, nil)
if err != nil {
fs.Debugf(nil, "Assuming path is a file as HEAD request could not be created: %v", err)
return createFileResult()
}
addHeaders(req, opt)
res, err := noRedir.Do(req)
if err != nil {
fs.Debugf(nil, "Assuming path is a file as HEAD request could not be sent: %v", err)
return createFileResult()
}
if res.StatusCode == http.StatusNotFound {
fs.Debugf(nil, "Assuming path is a directory as HEAD response is it does not exist as a file (%s)", res.Status)
return createDirResult()
}
if res.StatusCode == http.StatusMovedPermanently ||
res.StatusCode == http.StatusFound ||
res.StatusCode == http.StatusSeeOther ||
res.StatusCode == http.StatusTemporaryRedirect ||
res.StatusCode == http.StatusPermanentRedirect {
redir := res.Header.Get("Location")
if redir != "" {
if redir[len(redir)-1] == '/' {
fs.Debugf(nil, "Assuming path is a directory as HEAD response is redirect (%s) to a path that ends with '/': %s", res.Status, redir)
return createDirResult()
}
fs.Debugf(nil, "Assuming path is a file as HEAD response is redirect (%s) to a path that does not end with '/': %s", res.Status, redir)
return createFileResult()
}
fs.Debugf(nil, "Assuming path is a file as HEAD response is redirect (%s) but no location header", res.Status)
return createFileResult()
}
if res.StatusCode < 200 || res.StatusCode > 299 {
// Example is 403 (http.StatusForbidden) for servers not allowing HEAD requests.
fs.Debugf(nil, "Assuming path is a file as HEAD response is an error (%s)", res.Status)
return createFileResult()
}
fs.Debugf(nil, "Assuming path is a file as HEAD response is success (%s)", res.Status)
return createFileResult()
}
// NewFs creates a new Fs object from the name and root. It connects to
// the host specified in the config file.
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
@@ -167,37 +241,9 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
client := fshttp.NewClient(ctx)
var isFile = false
if !strings.HasSuffix(u.String(), "/") {
// Make a client which doesn't follow redirects so the server
// doesn't redirect http://host/dir to http://host/dir/
noRedir := *client
noRedir.CheckRedirect = func(req *http.Request, via []*http.Request) error {
return http.ErrUseLastResponse
}
// check to see if points to a file
req, err := http.NewRequestWithContext(ctx, "HEAD", u.String(), nil)
if err == nil {
addHeaders(req, opt)
res, err := noRedir.Do(req)
err = statusError(res, err)
if err == nil {
isFile = true
}
}
}
newRoot := u.String()
if isFile {
// Point to the parent if this is a file
newRoot, _ = path.Split(u.String())
} else {
if !strings.HasSuffix(newRoot, "/") {
newRoot += "/"
}
}
u, err = url.Parse(newRoot)
endpoint, isFile := getFsEndpoint(ctx, client, u.String(), opt)
fs.Debugf(nil, "Root: %s", endpoint)
u, err = url.Parse(endpoint)
if err != nil {
return nil, err
}
@@ -215,12 +261,16 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
f.features = (&fs.Features{
CanHaveEmptyDirectories: true,
}).Fill(ctx, f)
if isFile {
// return an error with an fs which points to the parent
return f, fs.ErrorIsFile
}
if !strings.HasSuffix(f.endpointURL, "/") {
return nil, errors.New("internal error: url doesn't end with /")
}
return f, nil
}
@@ -296,7 +346,7 @@ func parseName(base *url.URL, name string) (string, error) {
}
// check it doesn't have URL parameters
uStr := u.String()
if strings.Index(uStr, "?") >= 0 {
if strings.Contains(uStr, "?") {
return "", errFoundQuestionMark
}
// check that this is going back to the same host and scheme
@@ -377,15 +427,15 @@ func (f *Fs) readDir(ctx context.Context, dir string) (names []string, err error
URL := f.url(dir)
u, err := url.Parse(URL)
if err != nil {
return nil, errors.Wrap(err, "failed to readDir")
return nil, fmt.Errorf("failed to readDir: %w", err)
}
if !strings.HasSuffix(URL, "/") {
return nil, errors.Errorf("internal error: readDir URL %q didn't end in /", URL)
return nil, fmt.Errorf("internal error: readDir URL %q didn't end in /", URL)
}
// Do the request
req, err := http.NewRequestWithContext(ctx, "GET", URL, nil)
if err != nil {
return nil, errors.Wrap(err, "readDir failed")
return nil, fmt.Errorf("readDir failed: %w", err)
}
f.addHeaders(req)
res, err := f.httpClient.Do(req)
@@ -397,7 +447,7 @@ func (f *Fs) readDir(ctx context.Context, dir string) (names []string, err error
}
err = statusError(res, err)
if err != nil {
return nil, errors.Wrap(err, "failed to readDir")
return nil, fmt.Errorf("failed to readDir: %w", err)
}
contentType := strings.SplitN(res.Header.Get("Content-Type"), ";", 2)[0]
@@ -405,10 +455,10 @@ func (f *Fs) readDir(ctx context.Context, dir string) (names []string, err error
case "text/html":
names, err = parse(u, res.Body)
if err != nil {
return nil, errors.Wrap(err, "readDir")
return nil, fmt.Errorf("readDir: %w", err)
}
default:
return nil, errors.Errorf("Can't parse content type %q", contentType)
return nil, fmt.Errorf("can't parse content type %q", contentType)
}
return names, nil
}
@@ -428,7 +478,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
}
names, err := f.readDir(ctx, dir)
if err != nil {
return nil, errors.Wrapf(err, "error listing %q", dir)
return nil, fmt.Errorf("error listing %q: %w", dir, err)
}
var (
entriesMu sync.Mutex // to protect entries
@@ -540,7 +590,7 @@ func (o *Object) stat(ctx context.Context) error {
url := o.url()
req, err := http.NewRequestWithContext(ctx, "HEAD", url, nil)
if err != nil {
return errors.Wrap(err, "stat failed")
return fmt.Errorf("stat failed: %w", err)
}
o.fs.addHeaders(req)
res, err := o.fs.httpClient.Do(req)
@@ -549,7 +599,7 @@ func (o *Object) stat(ctx context.Context) error {
}
err = statusError(res, err)
if err != nil {
return errors.Wrap(err, "failed to stat")
return fmt.Errorf("failed to stat: %w", err)
}
t, err := http.ParseTime(res.Header.Get("Last-Modified"))
if err != nil {
@@ -562,7 +612,7 @@ func (o *Object) stat(ctx context.Context) error {
if o.fs.opt.NoSlash {
mediaType, _, err := mime.ParseMediaType(o.contentType)
if err != nil {
return errors.Wrapf(err, "failed to parse Content-Type: %q", o.contentType)
return fmt.Errorf("failed to parse Content-Type: %q: %w", o.contentType, err)
}
if mediaType == "text/html" {
return fs.ErrorNotAFile
@@ -588,7 +638,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
url := o.url()
req, err := http.NewRequestWithContext(ctx, "GET", url, nil)
if err != nil {
return nil, errors.Wrap(err, "Open failed")
return nil, fmt.Errorf("Open failed: %w", err)
}
// Add optional headers
@@ -601,7 +651,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
res, err := o.fs.httpClient.Do(req)
err = statusError(res, err)
if err != nil {
return nil, errors.Wrap(err, "Open failed")
return nil, fmt.Errorf("Open failed: %w", err)
}
return res.Body, nil
}

View File

@@ -8,8 +8,10 @@ import (
"net/http/httptest"
"net/url"
"os"
"path"
"path/filepath"
"sort"
"strconv"
"strings"
"testing"
"time"
@@ -24,10 +26,11 @@ import (
)
var (
remoteName = "TestHTTP"
testPath = "test"
filesPath = filepath.Join(testPath, "files")
headers = []string{"X-Potato", "sausage", "X-Rhubarb", "cucumber"}
remoteName = "TestHTTP"
testPath = "test"
filesPath = filepath.Join(testPath, "files")
headers = []string{"X-Potato", "sausage", "X-Rhubarb", "cucumber"}
lineEndSize = 1
)
// prepareServer the test server and return a function to tidy it up afterwards
@@ -35,6 +38,22 @@ func prepareServer(t *testing.T) (configmap.Simple, func()) {
// file server for test/files
fileServer := http.FileServer(http.Dir(filesPath))
// verify the file path is correct, and also check which line endings
// are used to get sizes right ("\n" except on Windows, but even there
// we may have "\n" or "\r\n" depending on git crlf setting)
fileList, err := ioutil.ReadDir(filesPath)
require.NoError(t, err)
require.Greater(t, len(fileList), 0)
for _, file := range fileList {
if !file.IsDir() {
data, _ := ioutil.ReadFile(filepath.Join(filesPath, file.Name()))
if strings.HasSuffix(string(data), "\r\n") {
lineEndSize = 2
}
break
}
}
// test the headers are there then pass on to fileServer
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
what := fmt.Sprintf("%s %s: Header ", r.Method, r.URL.Path)
@@ -91,7 +110,7 @@ func testListRoot(t *testing.T, f fs.Fs, noSlash bool) {
e = entries[1]
assert.Equal(t, "one%.txt", e.Remote())
assert.Equal(t, int64(6), e.Size())
assert.Equal(t, int64(5+lineEndSize), e.Size())
_, ok = e.(*Object)
assert.True(t, ok)
@@ -108,7 +127,7 @@ func testListRoot(t *testing.T, f fs.Fs, noSlash bool) {
_, ok = e.(fs.Directory)
assert.True(t, ok)
} else {
assert.Equal(t, int64(41), e.Size())
assert.Equal(t, int64(40+lineEndSize), e.Size())
_, ok = e.(*Object)
assert.True(t, ok)
}
@@ -141,7 +160,7 @@ func TestListSubDir(t *testing.T) {
e := entries[0]
assert.Equal(t, "three/underthree.txt", e.Remote())
assert.Equal(t, int64(9), e.Size())
assert.Equal(t, int64(8+lineEndSize), e.Size())
_, ok := e.(*Object)
assert.True(t, ok)
}
@@ -154,7 +173,7 @@ func TestNewObject(t *testing.T) {
require.NoError(t, err)
assert.Equal(t, "four/under four.txt", o.Remote())
assert.Equal(t, int64(9), o.Size())
assert.Equal(t, int64(8+lineEndSize), o.Size())
_, ok := o.(*Object)
assert.True(t, ok)
@@ -187,7 +206,11 @@ func TestOpen(t *testing.T) {
data, err := ioutil.ReadAll(fd)
require.NoError(t, err)
require.NoError(t, fd.Close())
assert.Equal(t, "beetroot\n", string(data))
if lineEndSize == 2 {
assert.Equal(t, "beetroot\r\n", string(data))
} else {
assert.Equal(t, "beetroot\n", string(data))
}
// Test with range request
fd, err = o.Open(context.Background(), &fs.RangeOption{Start: 1, End: 5})
@@ -236,7 +259,7 @@ func TestIsAFileSubDir(t *testing.T) {
e := entries[0]
assert.Equal(t, "underthree.txt", e.Remote())
assert.Equal(t, int64(9), e.Size())
assert.Equal(t, int64(8+lineEndSize), e.Size())
_, ok := e.(*Object)
assert.True(t, ok)
}
@@ -353,3 +376,106 @@ func TestParseCaddy(t *testing.T) {
"v1.36-22-g06ea13a-ssh-agentβ/",
})
}
func TestFsNoSlashRoots(t *testing.T) {
// Test Fs with roots that does not end with '/', the logic that
// decides if url is to be considered a file or directory, based
// on result from a HEAD request.
// Handler for faking HEAD responses with different status codes
headCount := 0
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.Method == "HEAD" {
headCount++
responseCode, err := strconv.Atoi(path.Base(r.URL.String()))
require.NoError(t, err)
if strings.HasPrefix(r.URL.String(), "/redirect/") {
var redir string
if strings.HasPrefix(r.URL.String(), "/redirect/file/") {
redir = "/redirected"
} else if strings.HasPrefix(r.URL.String(), "/redirect/dir/") {
redir = "/redirected/"
} else {
require.Fail(t, "Redirect test requests must start with '/redirect/file/' or '/redirect/dir/'")
}
http.Redirect(w, r, redir, responseCode)
} else {
http.Error(w, http.StatusText(responseCode), responseCode)
}
}
})
// Make the test server
ts := httptest.NewServer(handler)
defer ts.Close()
// Configure the remote
configfile.Install()
m := configmap.Simple{
"type": "http",
"url": ts.URL,
}
// Test
for i, test := range []struct {
root string
isFile bool
}{
// 2xx success
{"parent/200", true},
{"parent/204", true},
// 3xx redirection Redirect status 301, 302, 303, 307, 308
{"redirect/file/301", true}, // Request is redirected to "/redirected"
{"redirect/dir/301", false}, // Request is redirected to "/redirected/"
{"redirect/file/302", true}, // Request is redirected to "/redirected"
{"redirect/dir/302", false}, // Request is redirected to "/redirected/"
{"redirect/file/303", true}, // Request is redirected to "/redirected"
{"redirect/dir/303", false}, // Request is redirected to "/redirected/"
{"redirect/file/304", true}, // Not really a redirect, handled like 4xx errors (below)
{"redirect/file/305", true}, // Not really a redirect, handled like 4xx errors (below)
{"redirect/file/306", true}, // Not really a redirect, handled like 4xx errors (below)
{"redirect/file/307", true}, // Request is redirected to "/redirected"
{"redirect/dir/307", false}, // Request is redirected to "/redirected/"
{"redirect/file/308", true}, // Request is redirected to "/redirected"
{"redirect/dir/308", false}, // Request is redirected to "/redirected/"
// 4xx client errors
{"parent/403", true}, // Forbidden status (head request blocked)
{"parent/404", false}, // Not found status
} {
for _, noHead := range []bool{false, true} {
var isFile bool
if noHead {
m.Set("no_head", "true")
isFile = true
} else {
m.Set("no_head", "false")
isFile = test.isFile
}
headCount = 0
f, err := NewFs(context.Background(), remoteName, test.root, m)
if noHead {
assert.Equal(t, 0, headCount)
} else {
assert.Equal(t, 1, headCount)
}
if isFile {
assert.ErrorIs(t, err, fs.ErrorIsFile)
} else {
assert.NoError(t, err)
}
var endpoint string
if isFile {
parent, _ := path.Split(test.root)
endpoint = "/" + parent
} else {
endpoint = "/" + test.root + "/"
}
what := fmt.Sprintf("i=%d, root=%q, isFile=%v, noHead=%v", i, test.root, isFile, noHead)
assert.Equal(t, ts.URL+endpoint, f.String(), what)
}
}
}

View File

@@ -9,6 +9,7 @@ package hubic
import (
"context"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net/http"
@@ -16,7 +17,6 @@ import (
"time"
swiftLib "github.com/ncw/swift/v2"
"github.com/pkg/errors"
"github.com/rclone/rclone/backend/swift"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
@@ -120,7 +120,7 @@ func (f *Fs) getCredentials(ctx context.Context) (err error) {
if resp.StatusCode < 200 || resp.StatusCode > 299 {
body, _ := ioutil.ReadAll(resp.Body)
bodyStr := strings.TrimSpace(strings.Replace(string(body), "\n", " ", -1))
return errors.Errorf("failed to get credentials: %s: %s", resp.Status, bodyStr)
return fmt.Errorf("failed to get credentials: %s: %s", resp.Status, bodyStr)
}
decoder := json.NewDecoder(resp.Body)
var result credentials
@@ -146,7 +146,7 @@ func (f *Fs) getCredentials(ctx context.Context) (err error) {
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
client, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
if err != nil {
return nil, errors.Wrap(err, "failed to configure Hubic")
return nil, fmt.Errorf("failed to configure Hubic: %w", err)
}
f := &Fs{
@@ -163,7 +163,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
}
err = c.Authenticate(ctx)
if err != nil {
return nil, errors.Wrap(err, "error authenticating swift connection")
return nil, fmt.Errorf("error authenticating swift connection: %w", err)
}
// Parse config into swift.Options struct

View File

@@ -2,10 +2,9 @@ package api
import (
"encoding/xml"
"errors"
"fmt"
"time"
"github.com/pkg/errors"
)
const (

View File

@@ -7,6 +7,8 @@ import (
"encoding/base64"
"encoding/hex"
"encoding/json"
"encoding/xml"
"errors"
"fmt"
"io"
"io/ioutil"
@@ -19,7 +21,6 @@ import (
"strings"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/backend/jottacloud/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
@@ -69,6 +70,10 @@ const (
teliaCloudTokenURL = "https://cloud-auth.telia.se/auth/realms/telia_se/protocol/openid-connect/token"
teliaCloudAuthURL = "https://cloud-auth.telia.se/auth/realms/telia_se/protocol/openid-connect/auth"
teliaCloudClientID = "desktop"
tele2CloudTokenURL = "https://mittcloud-auth.tele2.se/auth/realms/comhem/protocol/openid-connect/token"
tele2CloudAuthURL = "https://mittcloud-auth.tele2.se/auth/realms/comhem/protocol/openid-connect/auth"
tele2CloudClientID = "desktop"
)
// Register with Fs
@@ -131,6 +136,9 @@ func Config(ctx context.Context, name string, m configmap.Mapper, config fs.Conf
}, {
Value: "telia",
Help: "Telia Cloud authentication.\nUse this if you are using Telia Cloud.",
}, {
Value: "tele2",
Help: "Tele2 Cloud authentication.\nUse this if you are using Tele2 Cloud.",
}})
case "auth_type_done":
// Jump to next state according to config chosen
@@ -146,12 +154,12 @@ func Config(ctx context.Context, name string, m configmap.Mapper, config fs.Conf
srv := rest.NewClient(fshttp.NewClient(ctx))
token, tokenEndpoint, err := doTokenAuth(ctx, srv, loginToken)
if err != nil {
return nil, errors.Wrap(err, "failed to get oauth token")
return nil, fmt.Errorf("failed to get oauth token: %w", err)
}
m.Set(configTokenURL, tokenEndpoint)
err = oauthutil.PutToken(name, m, &token, true)
if err != nil {
return nil, errors.Wrap(err, "error while saving token")
return nil, fmt.Errorf("error while saving token: %w", err)
}
return fs.ConfigGoto("choose_device")
case "legacy": // configure a jottacloud backend using legacy authentication
@@ -168,7 +176,7 @@ machines.`)
if config.Result == "true" {
deviceRegistration, err := registerDevice(ctx, srv)
if err != nil {
return nil, errors.Wrap(err, "failed to register device")
return nil, fmt.Errorf("failed to register device: %w", err)
}
m.Set(configClientID, deviceRegistration.ClientID)
m.Set(configClientSecret, obscure.MustObscure(deviceRegistration.ClientSecret))
@@ -216,11 +224,11 @@ machines.`)
m.Set("password", "")
m.Set("auth_code", "")
if err != nil {
return nil, errors.Wrap(err, "failed to get oauth token")
return nil, fmt.Errorf("failed to get oauth token: %w", err)
}
err = oauthutil.PutToken(name, m, &token, true)
if err != nil {
return nil, errors.Wrap(err, "error while saving token")
return nil, fmt.Errorf("error while saving token: %w", err)
}
return fs.ConfigGoto("choose_device")
case "telia": // telia cloud config
@@ -238,6 +246,21 @@ machines.`)
RedirectURL: oauthutil.RedirectLocalhostURL,
},
})
case "tele2": // tele2 cloud config
m.Set("configVersion", fmt.Sprint(configVersion))
m.Set(configClientID, tele2CloudClientID)
m.Set(configTokenURL, tele2CloudTokenURL)
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
OAuth2Config: &oauth2.Config{
Endpoint: oauth2.Endpoint{
AuthURL: tele2CloudAuthURL,
TokenURL: tele2CloudTokenURL,
},
ClientID: tele2CloudClientID,
Scopes: []string{"openid", "jotta-default", "offline_access"},
RedirectURL: oauthutil.RedirectLocalhostURL,
},
})
case "choose_device":
return fs.ConfigConfirm("choose_device_query", false, "config_non_standard", "Use a non standard device/mountpoint e.g. for accessing files uploaded using the official Jottacloud client?")
case "choose_device_query":
@@ -529,7 +552,7 @@ func getCustomerInfo(ctx context.Context, apiSrv *rest.Client) (info *api.Custom
_, err = apiSrv.CallJSON(ctx, &opts, nil, &info)
if err != nil {
return nil, errors.Wrap(err, "couldn't get customer info")
return nil, fmt.Errorf("couldn't get customer info: %w", err)
}
return info, nil
@@ -544,7 +567,7 @@ func getDriveInfo(ctx context.Context, srv *rest.Client, username string) (info
_, err = srv.CallXML(ctx, &opts, nil, &info)
if err != nil {
return nil, errors.Wrap(err, "couldn't get drive info")
return nil, fmt.Errorf("couldn't get drive info: %w", err)
}
return info, nil
@@ -559,7 +582,7 @@ func getDeviceInfo(ctx context.Context, srv *rest.Client, path string) (info *ap
_, err = srv.CallXML(ctx, &opts, nil, &info)
if err != nil {
return nil, errors.Wrap(err, "couldn't get device info")
return nil, fmt.Errorf("couldn't get device info: %w", err)
}
return info, nil
@@ -597,7 +620,7 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.Jo
}
if err != nil {
return nil, errors.Wrap(err, "read metadata failed")
return nil, fmt.Errorf("read metadata failed: %w", err)
}
if result.XMLName.Local == "folder" {
return nil, fs.ErrorIsDir
@@ -720,7 +743,7 @@ func getOAuthClient(ctx context.Context, name string, m configmap.Mapper) (oAuth
// Create OAuth Client
oAuthClient, ts, err = oauthutil.NewClientWithBaseClient(ctx, name, m, oauthConfig, baseClient)
if err != nil {
return nil, nil, errors.Wrap(err, "Failed to configure Jottacloud oauth client")
return nil, nil, fmt.Errorf("Failed to configure Jottacloud oauth client: %w", err)
}
return oAuthClient, ts, nil
}
@@ -786,7 +809,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
}
_, err := f.NewObject(context.TODO(), remote)
if err != nil {
if uErr := errors.Cause(err); uErr == fs.ErrorObjectNotFound || uErr == fs.ErrorNotAFile || uErr == fs.ErrorIsDir {
if errors.Is(err, fs.ErrorObjectNotFound) || errors.Is(err, fs.ErrorNotAFile) || errors.Is(err, fs.ErrorIsDir) {
// File doesn't exist so return old f
f.root = root
return f, nil
@@ -881,7 +904,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
return nil, fs.ErrorDirNotFound
}
}
return nil, errors.Wrap(err, "couldn't list files")
return nil, fmt.Errorf("couldn't list files: %w", err)
}
if !f.validFolder(&result) {
@@ -909,49 +932,121 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
return entries, nil
}
// listFileDirFn is called from listFileDir to handle an object.
type listFileDirFn func(fs.DirEntry) error
type listStreamTime time.Time
// List the objects and directories into entries, from a
// special kind of JottaFolder representing a FileDirLis
func (f *Fs) listFileDir(ctx context.Context, remoteStartPath string, startFolder *api.JottaFolder, fn listFileDirFn) error {
pathPrefix := "/" + f.filePathRaw("") // Non-escaped prefix of API paths to be cut off, to be left with the remote path including the remoteStartPath
pathPrefixLength := len(pathPrefix)
startPath := path.Join(pathPrefix, remoteStartPath) // Non-escaped API path up to and including remoteStartPath, to decide if it should be created as a new dir object
startPathLength := len(startPath)
for i := range startFolder.Folders {
folder := &startFolder.Folders[i]
if !f.validFolder(folder) {
return nil
func (c *listStreamTime) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
var v string
if err := d.DecodeElement(&v, &start); err != nil {
return err
}
t, err := time.Parse(time.RFC3339, v)
if err != nil {
return err
}
*c = listStreamTime(t)
return nil
}
func (c listStreamTime) MarshalJSON() ([]byte, error) {
return []byte(fmt.Sprintf("\"%s\"", time.Time(c).Format(time.RFC3339))), nil
}
func parseListRStream(ctx context.Context, r io.Reader, trimPrefix string, filesystem *Fs, callback func(fs.DirEntry) error) error {
type stats struct {
Folders int `xml:"folders"`
Files int `xml:"files"`
}
var expected, actual stats
type xmlFile struct {
Path string `xml:"path"`
Name string `xml:"filename"`
Checksum string `xml:"md5"`
Size int64 `xml:"size"`
Modified listStreamTime `xml:"modified"`
Created listStreamTime `xml:"created"`
}
type xmlFolder struct {
Path string `xml:"path"`
}
addFolder := func(path string) error {
return callback(fs.NewDir(filesystem.opt.Enc.ToStandardPath(path), time.Time{}))
}
addFile := func(f *xmlFile) error {
return callback(&Object{
hasMetaData: true,
fs: filesystem,
remote: filesystem.opt.Enc.ToStandardPath(path.Join(f.Path, f.Name)),
size: f.Size,
md5: f.Checksum,
modTime: time.Time(f.Modified),
})
}
trimPathPrefix := func(p string) string {
p = strings.TrimPrefix(p, trimPrefix)
p = strings.TrimPrefix(p, "/")
return p
}
uniqueFolders := map[string]bool{}
decoder := xml.NewDecoder(r)
for {
t, err := decoder.Token()
if err != nil {
if err != io.EOF {
return err
}
break
}
folderPath := f.opt.Enc.ToStandardPath(path.Join(folder.Path, folder.Name))
folderPathLength := len(folderPath)
var remoteDir string
if folderPathLength > pathPrefixLength {
remoteDir = folderPath[pathPrefixLength+1:]
if folderPathLength > startPathLength {
d := fs.NewDir(remoteDir, time.Time(folder.ModifiedAt))
err := fn(d)
if err != nil {
return err
}
}
}
for i := range folder.Files {
file := &folder.Files[i]
if f.validFile(file) {
remoteFile := path.Join(remoteDir, f.opt.Enc.ToStandardName(file.Name))
o, err := f.newObjectWithInfo(ctx, remoteFile, file)
if err != nil {
return err
}
err = fn(o)
if err != nil {
switch se := t.(type) {
case xml.StartElement:
switch se.Name.Local {
case "file":
var f xmlFile
if err := decoder.DecodeElement(&f, &se); err != nil {
return err
}
f.Path = trimPathPrefix(f.Path)
actual.Files++
if !uniqueFolders[f.Path] {
uniqueFolders[f.Path] = true
actual.Folders++
if err := addFolder(f.Path); err != nil {
return err
}
}
if err := addFile(&f); err != nil {
return err
}
case "folder":
var f xmlFolder
if err := decoder.DecodeElement(&f, &se); err != nil {
return err
}
f.Path = trimPathPrefix(f.Path)
uniqueFolders[f.Path] = true
actual.Folders++
if err := addFolder(f.Path); err != nil {
return err
}
case "stats":
if err := decoder.DecodeElement(&expected, &se); err != nil {
return err
}
}
}
}
if expected.Folders != actual.Folders ||
expected.Files != actual.Files {
return fmt.Errorf("Invalid result from listStream: expected[%#v] != actual[%#v]", expected, actual)
}
return nil
}
@@ -966,12 +1061,27 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
Path: f.filePath(dir),
Parameters: url.Values{},
}
opts.Parameters.Set("mode", "list")
opts.Parameters.Set("mode", "liststream")
list := walk.NewListRHelper(callback)
var resp *http.Response
var result api.JottaFolder // Could be JottaFileDirList, but JottaFolder is close enough
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallXML(ctx, &opts, nil, &result)
resp, err = f.srv.Call(ctx, &opts)
if err != nil {
return shouldRetry(ctx, resp, err)
}
// liststream paths are /mountpoint/root/path
// so the returned paths should have /mountpoint/root/ trimmed
// as the caller is expecting path.
trimPrefix := path.Join("/", f.opt.Mountpoint, f.root)
err = parseListRStream(ctx, resp.Body, trimPrefix, f, func(d fs.DirEntry) error {
if d.Remote() == dir {
return nil
}
return list.Add(d)
})
_ = resp.Body.Close()
return shouldRetry(ctx, resp, err)
})
if err != nil {
@@ -981,12 +1091,8 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
return fs.ErrorDirNotFound
}
}
return errors.Wrap(err, "couldn't list files")
return fmt.Errorf("couldn't list files: %w", err)
}
list := walk.NewListRHelper(callback)
err = f.listFileDir(ctx, dir, &result, func(entry fs.DirEntry) error {
return list.Add(entry)
})
if err != nil {
return err
}
@@ -1081,7 +1187,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error)
return shouldRetry(ctx, resp, err)
})
if err != nil {
return errors.Wrap(err, "couldn't purge directory")
return fmt.Errorf("couldn't purge directory: %w", err)
}
return nil
@@ -1148,7 +1254,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
info, err := f.copyOrMove(ctx, "cp", srcObj.filePath(), remote)
if err != nil {
return nil, errors.Wrap(err, "couldn't copy file")
return nil, fmt.Errorf("couldn't copy file: %w", err)
}
return f.newObjectWithInfo(ctx, remote, info)
@@ -1178,7 +1284,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
info, err := f.copyOrMove(ctx, "mv", srcObj.filePath(), remote)
if err != nil {
return nil, errors.Wrap(err, "couldn't move file")
return nil, fmt.Errorf("couldn't move file: %w", err)
}
return f.newObjectWithInfo(ctx, remote, info)
@@ -1222,7 +1328,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
_, err = f.copyOrMove(ctx, "mvDir", path.Join(f.endpointURL, f.opt.Enc.FromStandardPath(srcPath))+"/", dstRemote)
if err != nil {
return errors.Wrap(err, "couldn't move directory")
return fmt.Errorf("couldn't move directory: %w", err)
}
return nil
}
@@ -1256,13 +1362,13 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
}
if err != nil {
if unlink {
return "", errors.Wrap(err, "couldn't remove public link")
return "", fmt.Errorf("couldn't remove public link: %w", err)
}
return "", errors.Wrap(err, "couldn't create public link")
return "", fmt.Errorf("couldn't create public link: %w", err)
}
if unlink {
if result.PublicURI != "" {
return "", errors.Errorf("couldn't remove public link - %q", result.PublicURI)
return "", fmt.Errorf("couldn't remove public link - %q", result.PublicURI)
}
return "", nil
}
@@ -1322,7 +1428,7 @@ func (f *Fs) CleanUp(ctx context.Context) error {
var info api.TrashResponse
_, err := f.apiSrv.CallJSON(ctx, &opts, nil, &info)
if err != nil {
return errors.Wrap(err, "couldn't empty trash")
return fmt.Errorf("couldn't empty trash: %w", err)
}
return nil
@@ -1584,7 +1690,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// if the object exists delete it
err = o.remove(ctx, true)
if err != nil {
return errors.Wrap(err, "failed to remove old object")
return fmt.Errorf("failed to remove old object: %w", err)
}
}
// if the object does not exist we can just continue but if the error is something different we should report that
@@ -1605,7 +1711,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
md5String, in, cleanup, err = readMD5(in, size, int64(o.fs.opt.MD5MemoryThreshold))
defer cleanup()
if err != nil {
return errors.Wrap(err, "failed to calculate MD5")
return fmt.Errorf("failed to calculate MD5: %w", err)
}
// Wrap the accounting back onto the stream
in = wrap(in)

View File

@@ -28,33 +28,57 @@ import (
func init() {
fs.Register(&fs.RegInfo{
Name: "koofr",
Description: "Koofr",
Description: "Koofr, Digi Storage and other Koofr-compatible storage providers",
NewFs: NewFs,
Options: []fs.Option{{
Name: fs.ConfigProvider,
Help: "Choose your storage provider.",
// NOTE if you add a new provider here, then add it in the
// setProviderDefaults() function and update options accordingly
Examples: []fs.OptionExample{{
Value: "koofr",
Help: "Koofr, https://app.koofr.net/",
}, {
Value: "digistorage",
Help: "Digi Storage, https://storage.rcs-rds.ro/",
}, {
Value: "other",
Help: "Any other Koofr API compatible storage service",
}},
}, {
Name: "endpoint",
Help: "The Koofr API endpoint to use.",
Default: "https://app.koofr.net",
Provider: "other",
Required: true,
Advanced: true,
}, {
Name: "mountid",
Help: "Mount ID of the mount to use.\n\nIf omitted, the primary mount is used.",
Required: false,
Default: "",
Advanced: true,
}, {
Name: "setmtime",
Help: "Does the backend support setting modification time.\n\nSet this to false if you use a mount ID that points to a Dropbox or Amazon Drive backend.",
Default: true,
Required: true,
Advanced: true,
}, {
Name: "user",
Help: "Your Koofr user name.",
Help: "Your user name.",
Required: true,
}, {
Name: "password",
Help: "Your Koofr password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password).",
Help: "Your password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password).",
Provider: "koofr",
IsPassword: true,
Required: true,
}, {
Name: "password",
Help: "Your password for rclone (generate one at https://storage.rcs-rds.ro/app/admin/preferences/password).",
Provider: "digistorage",
IsPassword: true,
Required: true,
}, {
Name: "password",
Help: "Your password for rclone (generate one at your service's settings page).",
Provider: "other",
IsPassword: true,
Required: true,
}, {
@@ -71,6 +95,7 @@ func init() {
// Options represent the configuration of the Koofr backend
type Options struct {
Provider string `config:"provider"`
Endpoint string `config:"endpoint"`
MountID string `config:"mountid"`
User string `config:"user"`
@@ -255,13 +280,38 @@ func (f *Fs) fullPath(part string) string {
return f.opt.Enc.FromStandardPath(path.Join("/", f.root, part))
}
// NewFs constructs a new filesystem given a root path and configuration options
func setProviderDefaults(opt *Options) {
// handle old, provider-less configs
if opt.Provider == "" {
if opt.Endpoint == "" || strings.HasPrefix(opt.Endpoint, "https://app.koofr.net") {
opt.Provider = "koofr"
} else if strings.HasPrefix(opt.Endpoint, "https://storage.rcs-rds.ro") {
opt.Provider = "digistorage"
} else {
opt.Provider = "other"
}
}
// now assign an endpoint
if opt.Provider == "koofr" {
opt.Endpoint = "https://app.koofr.net"
} else if opt.Provider == "digistorage" {
opt.Endpoint = "https://storage.rcs-rds.ro"
}
}
// NewFs constructs a new filesystem given a root path and rclone configuration options
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
opt := new(Options)
err = configstruct.Set(m, opt)
if err != nil {
return nil, err
}
setProviderDefaults(opt)
return NewFsFromOptions(ctx, name, root, opt)
}
// NewFsFromOptions constructs a new filesystem given a root path and internal configuration options
func NewFsFromOptions(ctx context.Context, name, root string, opt *Options) (ff fs.Fs, err error) {
pass, err := obscure.Reveal(opt.Password)
if err != nil {
return nil, err

View File

@@ -5,10 +5,10 @@ package local
import (
"context"
"fmt"
"os"
"syscall"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
)
@@ -20,7 +20,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
if os.IsNotExist(err) {
return nil, fs.ErrorDirNotFound
}
return nil, errors.Wrap(err, "failed to read disk usage")
return nil, fmt.Errorf("failed to read disk usage: %w", err)
}
bs := int64(s.Bsize) // nolint: unconvert
usage := &fs.Usage{

View File

@@ -5,10 +5,10 @@ package local
import (
"context"
"fmt"
"syscall"
"unsafe"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
)
@@ -24,7 +24,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
uintptr(unsafe.Pointer(&free)), // lpTotalNumberOfFreeBytes
)
if e1 != syscall.Errno(0) {
return nil, errors.Wrap(e1, "failed to read disk usage")
return nil, fmt.Errorf("failed to read disk usage: %w", e1)
}
usage := &fs.Usage{
Total: fs.NewUsageValue(total), // quota of bytes that can be used

View File

@@ -4,6 +4,7 @@ package local
import (
"bytes"
"context"
"errors"
"fmt"
"io"
"io/ioutil"
@@ -16,7 +17,6 @@ import (
"time"
"unicode/utf8"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/config"
@@ -432,7 +432,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
fd, err := os.Open(fsDirPath)
if err != nil {
isPerm := os.IsPermission(err)
err = errors.Wrapf(err, "failed to open directory %q", dir)
err = fmt.Errorf("failed to open directory %q: %w", dir, err)
fs.Errorf(dir, "%v", err)
if isPerm {
_ = accounting.Stats(ctx).Error(fserrors.NoRetryError(err))
@@ -443,7 +443,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
defer func() {
cerr := fd.Close()
if cerr != nil && err == nil {
err = errors.Wrapf(cerr, "failed to close directory %q:", dir)
err = fmt.Errorf("failed to close directory %q:: %w", dir, cerr)
}
}()
@@ -473,7 +473,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
continue
}
if fierr != nil {
err = errors.Wrapf(err, "failed to read directory %q", namepath)
err = fmt.Errorf("failed to read directory %q: %w", namepath, err)
fs.Errorf(dir, "%v", fierr)
_ = accounting.Stats(ctx).Error(fserrors.NoRetryError(fierr)) // fail the sync
continue
@@ -483,7 +483,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
}
}
if err != nil {
return nil, errors.Wrap(err, "failed to read directory entry")
return nil, fmt.Errorf("failed to read directory entry: %w", err)
}
for _, fi := range fis {
@@ -496,7 +496,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
fi, err = os.Stat(localPath)
if os.IsNotExist(err) || isCircularSymlinkError(err) {
// Skip bad symlinks and circular symlinks
err = fserrors.NoRetryError(errors.Wrap(err, "symlink"))
err = fserrors.NoRetryError(fmt.Errorf("symlink: %w", err))
fs.Errorf(newRemote, "Listing error: %v", err)
err = accounting.Stats(ctx).Error(err)
continue
@@ -672,7 +672,7 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
return err
}
if !fi.Mode().IsDir() {
return errors.Errorf("can't purge non directory: %q", dir)
return fmt.Errorf("can't purge non directory: %q", dir)
}
return os.RemoveAll(dir)
}
@@ -866,12 +866,12 @@ func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
err := o.lstat()
var changed bool
if err != nil {
if os.IsNotExist(errors.Cause(err)) {
if errors.Is(err, os.ErrNotExist) {
// If file not found then we assume any accumulated
// hashes are OK - this will error on Open
changed = true
} else {
return "", errors.Wrap(err, "hash: failed to stat")
return "", fmt.Errorf("hash: failed to stat: %w", err)
}
} else {
o.fs.objectMetaMu.RLock()
@@ -900,16 +900,16 @@ func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
in = readers.NewLimitedReadCloser(in, o.size)
}
if err != nil {
return "", errors.Wrap(err, "hash: failed to open")
return "", fmt.Errorf("hash: failed to open: %w", err)
}
var hashes map[hash.Type]string
hashes, err = hash.StreamTypes(in, hash.NewHashSet(r))
closeErr := in.Close()
if err != nil {
return "", errors.Wrap(err, "hash: failed to read")
return "", fmt.Errorf("hash: failed to read: %w", err)
}
if closeErr != nil {
return "", errors.Wrap(closeErr, "hash: failed to close")
return "", fmt.Errorf("hash: failed to close: %w", closeErr)
}
hashValue = hashes[r]
o.fs.objectMetaMu.Lock()
@@ -990,17 +990,17 @@ func (file *localOpenFile) Read(p []byte) (n int, err error) {
// Check if file has the same size and modTime
fi, err := file.fd.Stat()
if err != nil {
return 0, errors.Wrap(err, "can't read status of source file while transferring")
return 0, fmt.Errorf("can't read status of source file while transferring: %w", err)
}
file.o.fs.objectMetaMu.RLock()
oldtime := file.o.modTime
oldsize := file.o.size
file.o.fs.objectMetaMu.RUnlock()
if oldsize != fi.Size() {
return 0, fserrors.NoLowLevelRetryError(errors.Errorf("can't copy - source file is being updated (size changed from %d to %d)", oldsize, fi.Size()))
return 0, fserrors.NoLowLevelRetryError(fmt.Errorf("can't copy - source file is being updated (size changed from %d to %d)", oldsize, fi.Size()))
}
if !oldtime.Equal(fi.ModTime()) {
return 0, fserrors.NoLowLevelRetryError(errors.Errorf("can't copy - source file is being updated (mod time changed from %v to %v)", oldtime, fi.ModTime()))
return 0, fserrors.NoLowLevelRetryError(fmt.Errorf("can't copy - source file is being updated (mod time changed from %v to %v)", oldtime, fi.ModTime()))
}
}
@@ -1133,6 +1133,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return err
}
// Wipe hashes before update
o.clearHashCache()
var symlinkData bytes.Buffer
// If the object is a regular file, create it.
// If it is a translated link, just read in the contents, and
@@ -1295,6 +1298,13 @@ func (o *Object) setMetadata(info os.FileInfo) {
}
}
// clearHashCache wipes any cached hashes for the object
func (o *Object) clearHashCache() {
o.fs.objectMetaMu.Lock()
o.hashes = nil
o.fs.objectMetaMu.Unlock()
}
// Stat an Object into info
func (o *Object) lstat() error {
info, err := o.fs.lstat(o.path)
@@ -1306,6 +1316,7 @@ func (o *Object) lstat() error {
// Remove an object
func (o *Object) Remove(ctx context.Context) error {
o.clearHashCache()
return remove(o.path)
}

View File

@@ -1,6 +1,7 @@
package local
import (
"bytes"
"context"
"io/ioutil"
"os"
@@ -12,6 +13,7 @@ import (
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/lib/file"
"github.com/rclone/rclone/lib/readers"
@@ -93,16 +95,16 @@ func TestSymlink(t *testing.T) {
file2d := fstest.NewItem("symlink.txt", "hello", modTime1)
// Check with no symlink flags
fstest.CheckItems(t, r.Flocal, file1)
fstest.CheckItems(t, r.Fremote)
r.CheckLocalItems(t, file1)
r.CheckRemoteItems(t)
// Set fs into "-L" mode
f.opt.FollowSymlinks = true
f.opt.TranslateSymlinks = false
f.lstat = os.Stat
fstest.CheckItems(t, r.Flocal, file1, file2d)
fstest.CheckItems(t, r.Fremote)
r.CheckLocalItems(t, file1, file2d)
r.CheckRemoteItems(t)
// Set fs into "-l" mode
f.opt.FollowSymlinks = false
@@ -111,7 +113,7 @@ func TestSymlink(t *testing.T) {
fstest.CheckListingWithPrecision(t, r.Flocal, []fstest.Item{file1, file2}, nil, fs.ModTimeNotSupported)
if haveLChtimes {
fstest.CheckItems(t, r.Flocal, file1, file2)
r.CheckLocalItems(t, file1, file2)
}
// Create a symlink
@@ -119,7 +121,7 @@ func TestSymlink(t *testing.T) {
file3 := r.WriteObjectTo(ctx, r.Flocal, "symlink2.txt"+linkSuffix, "file.txt", modTime3, false)
fstest.CheckListingWithPrecision(t, r.Flocal, []fstest.Item{file1, file2, file3}, nil, fs.ModTimeNotSupported)
if haveLChtimes {
fstest.CheckItems(t, r.Flocal, file1, file2, file3)
r.CheckLocalItems(t, file1, file2, file3)
}
// Check it got the correct contents
@@ -166,3 +168,64 @@ func TestSymlinkError(t *testing.T) {
_, err := NewFs(context.Background(), "local", "/", m)
assert.Equal(t, errLinksAndCopyLinks, err)
}
// Test hashes on updating an object
func TestHashOnUpdate(t *testing.T) {
ctx := context.Background()
r := fstest.NewRun(t)
defer r.Finalise()
const filePath = "file.txt"
when := time.Now()
r.WriteFile(filePath, "content", when)
f := r.Flocal.(*Fs)
// Get the object
o, err := f.NewObject(ctx, filePath)
require.NoError(t, err)
// Test the hash is as we expect
md5, err := o.Hash(ctx, hash.MD5)
require.NoError(t, err)
assert.Equal(t, "9a0364b9e99bb480dd25e1f0284c8555", md5)
// Reupload it with diferent contents but same size and timestamp
var b = bytes.NewBufferString("CONTENT")
src := object.NewStaticObjectInfo(filePath, when, int64(b.Len()), true, nil, f)
err = o.Update(ctx, b, src)
require.NoError(t, err)
// Check the hash is as expected
md5, err = o.Hash(ctx, hash.MD5)
require.NoError(t, err)
assert.Equal(t, "45685e95985e20822fb2538a522a5ccf", md5)
}
// Test hashes on deleting an object
func TestHashOnDelete(t *testing.T) {
ctx := context.Background()
r := fstest.NewRun(t)
defer r.Finalise()
const filePath = "file.txt"
when := time.Now()
r.WriteFile(filePath, "content", when)
f := r.Flocal.(*Fs)
// Get the object
o, err := f.NewObject(ctx, filePath)
require.NoError(t, err)
// Test the hash is as we expect
md5, err := o.Hash(ctx, hash.MD5)
require.NoError(t, err)
assert.Equal(t, "9a0364b9e99bb480dd25e1f0284c8555", md5)
// Delete the object
require.NoError(t, o.Remove(ctx))
// Test the hash cache is empty
require.Nil(t, o.(*Object).hashes)
// Test the hash returns an error
_, err = o.Hash(ctx, hash.MD5)
require.Error(t, err)
}

View File

@@ -6,11 +6,11 @@ import (
"bufio"
"bytes"
"encoding/binary"
"errors"
"fmt"
"io"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/lib/readers"
)

View File

@@ -58,7 +58,7 @@ type UserInfoResponse struct {
AutoProlong bool `json:"auto_prolong"`
Basequota int64 `json:"basequota"`
Enabled bool `json:"enabled"`
Expires int `json:"expires"`
Expires int64 `json:"expires"`
Prolong bool `json:"prolong"`
Promocodes struct {
} `json:"promocodes"`
@@ -80,7 +80,7 @@ type UserInfoResponse struct {
FileSizeLimit int64 `json:"file_size_limit"`
Space struct {
BytesTotal int64 `json:"bytes_total"`
BytesUsed int `json:"bytes_used"`
BytesUsed int64 `json:"bytes_used"`
Overquota bool `json:"overquota"`
} `json:"space"`
} `json:"cloud"`

View File

@@ -3,6 +3,7 @@ package mailru
import (
"bytes"
"context"
"errors"
"fmt"
gohash "hash"
"io"
@@ -40,7 +41,6 @@ import (
"github.com/rclone/rclone/lib/readers"
"github.com/rclone/rclone/lib/rest"
"github.com/pkg/errors"
"golang.org/x/oauth2"
)
@@ -269,7 +269,7 @@ func errorHandler(res *http.Response) (err error) {
}
serverError.Message = string(data)
if serverError.Message == "" || strings.HasPrefix(serverError.Message, "{") {
// Replace empty or JSON response with a human readable text.
// Replace empty or JSON response with a human-readable text.
serverError.Message = res.Status
}
serverError.Status = res.StatusCode
@@ -438,7 +438,7 @@ func (f *Fs) authorize(ctx context.Context, force bool) (err error) {
err = errors.New("Invalid token")
}
if err != nil {
return errors.Wrap(err, "Failed to authorize")
return fmt.Errorf("Failed to authorize: %w", err)
}
if err = oauthutil.PutToken(f.name, f.m, t, false); err != nil {
@@ -507,7 +507,7 @@ func (f *Fs) reAuthorize(opts *rest.Opts, origErr error) error {
func (f *Fs) accessToken() (string, error) {
token, err := f.source.Token()
if err != nil {
return "", errors.Wrap(err, "cannot refresh access token")
return "", fmt.Errorf("cannot refresh access token: %w", err)
}
return token.AccessToken, nil
}
@@ -1196,7 +1196,7 @@ func (f *Fs) purgeWithCheck(ctx context.Context, dir string, check bool, opName
_, dirSize, err := f.readItemMetaData(ctx, path)
if err != nil {
return errors.Wrapf(err, "%s failed", opName)
return fmt.Errorf("%s failed: %w", opName, err)
}
if check && dirSize > 0 {
return fs.ErrorDirectoryNotEmpty
@@ -1300,7 +1300,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
})
if err != nil {
return nil, errors.Wrap(err, "couldn't copy file")
return nil, fmt.Errorf("couldn't copy file: %w", err)
}
if response.Status != 200 {
return nil, fmt.Errorf("copy failed with code %d", response.Status)
@@ -1572,7 +1572,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
}
total := info.Body.Cloud.Space.BytesTotal
used := int64(info.Body.Cloud.Space.BytesUsed)
used := info.Body.Cloud.Space.BytesUsed
usage := &fs.Usage{
Total: fs.NewUsageValue(total),
@@ -1684,7 +1684,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
spoolFile, mrHash, err := makeTempFile(ctx, tmpFs, wrapIn, src)
if err != nil {
return errors.Wrap(err, "Failed to create spool file")
return fmt.Errorf("Failed to create spool file: %w", err)
}
if o.putByHash(ctx, mrHash, src, "spool") {
// If put by hash is successful, ignore transitive error
@@ -2318,7 +2318,7 @@ func (p *serverPool) Dispatch(ctx context.Context, current string) (string, erro
})
if err != nil || url == "" {
closeBody(res)
return "", errors.Wrap(err, "Failed to request file server")
return "", fmt.Errorf("Failed to request file server: %w", err)
}
p.addServer(url, now)

View File

@@ -17,6 +17,7 @@ Improvements:
import (
"context"
"errors"
"fmt"
"io"
"path"
@@ -24,7 +25,6 @@ import (
"sync"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
@@ -165,13 +165,6 @@ func shouldRetry(ctx context.Context, err error) (bool, error) {
}
// Let the mega library handle the low level retries
return false, err
/*
switch errors.Cause(err) {
case mega.EAGAIN, mega.ERATELIMIT, mega.ETEMPUNAVAIL:
return true, err
}
return fserrors.ShouldRetry(err), err
*/
}
// readMetaDataForPath reads the metadata from the path
@@ -195,7 +188,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
var err error
opt.Pass, err = obscure.Reveal(opt.Pass)
if err != nil {
return nil, errors.Wrap(err, "couldn't decrypt password")
return nil, fmt.Errorf("couldn't decrypt password: %w", err)
}
}
ci := fs.GetConfig(ctx)
@@ -222,7 +215,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
err := srv.Login(opt.User, opt.Pass)
if err != nil {
return nil, errors.Wrap(err, "couldn't login")
return nil, fmt.Errorf("couldn't login: %w", err)
}
megaCache[opt.User] = srv
}
@@ -261,7 +254,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
// splitNodePath splits nodePath into / separated parts, returning nil if it
// should refer to the root.
// It also encodes the parts into backend specific encoding
// It also encodes the parts into backend-specific encoding
func (f *Fs) splitNodePath(nodePath string) (parts []string) {
nodePath = path.Clean(nodePath)
if nodePath == "." || nodePath == "/" {
@@ -350,11 +343,11 @@ func (f *Fs) mkdir(ctx context.Context, rootNode *mega.Node, dir string) (node *
break
}
if err != mega.ENOENT {
return nil, errors.Wrap(err, "mkdir lookup failed")
return nil, fmt.Errorf("mkdir lookup failed: %w", err)
}
}
if err != nil {
return nil, errors.Wrap(err, "internal error: mkdir called with non existent root node")
return nil, fmt.Errorf("internal error: mkdir called with non-existent root node: %w", err)
}
// i is number of directories to create (may be 0)
// node is directory to create them from
@@ -365,7 +358,7 @@ func (f *Fs) mkdir(ctx context.Context, rootNode *mega.Node, dir string) (node *
return shouldRetry(ctx, err)
})
if err != nil {
return nil, errors.Wrap(err, "mkdir create node failed")
return nil, fmt.Errorf("mkdir create node failed: %w", err)
}
}
return node, nil
@@ -428,7 +421,7 @@ func (f *Fs) CleanUp(ctx context.Context) (err error) {
return false
})
if err != nil {
return errors.Wrap(err, "CleanUp failed to list items in trash")
return fmt.Errorf("CleanUp failed to list items in trash: %w", err)
}
fs.Infof(f, "Deleting %d items from the trash", len(items))
errors := 0
@@ -489,7 +482,7 @@ type listFn func(*mega.Node) bool
func (f *Fs) list(ctx context.Context, dir *mega.Node, fn listFn) (found bool, err error) {
nodes, err := f.srv.FS.GetChildren(dir)
if err != nil {
return false, errors.Wrapf(err, "list failed")
return false, fmt.Errorf("list failed: %w", err)
}
for _, item := range nodes {
if fn(item) {
@@ -609,7 +602,10 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
return err
}
_, err = f.mkdir(ctx, rootNode, dir)
return errors.Wrap(err, "Mkdir failed")
if err != nil {
return fmt.Errorf("Mkdir failed: %w", err)
}
return nil
}
// deleteNode removes a file or directory, observing useTrash
@@ -639,7 +635,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
if check {
children, err := f.srv.FS.GetChildren(dirNode)
if err != nil {
return errors.Wrap(err, "purgeCheck GetChildren failed")
return fmt.Errorf("purgeCheck GetChildren failed: %w", err)
}
if len(children) > 0 {
return fs.ErrorDirectoryNotEmpty
@@ -650,7 +646,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
err = f.deleteNode(ctx, dirNode)
if err != nil {
return errors.Wrap(err, "delete directory node failed")
return fmt.Errorf("delete directory node failed: %w", err)
}
// Remove the root node if we just deleted it
@@ -704,7 +700,7 @@ func (f *Fs) move(ctx context.Context, dstRemote string, srcFs *Fs, srcRemote st
dstDirNode, err = dstFs.mkdir(ctx, absRoot, dstParent)
}
if err != nil {
return errors.Wrap(err, "server-side move failed to make dst parent dir")
return fmt.Errorf("server-side move failed to make dst parent dir: %w", err)
}
if srcRemote != "" {
@@ -717,7 +713,7 @@ func (f *Fs) move(ctx context.Context, dstRemote string, srcFs *Fs, srcRemote st
srcDirNode, err = f.findDir(absRoot, srcParent)
}
if err != nil {
return errors.Wrap(err, "server-side move failed to lookup src parent dir")
return fmt.Errorf("server-side move failed to lookup src parent dir: %w", err)
}
// move the object into its new directory if required
@@ -728,7 +724,7 @@ func (f *Fs) move(ctx context.Context, dstRemote string, srcFs *Fs, srcRemote st
return shouldRetry(ctx, err)
})
if err != nil {
return errors.Wrap(err, "server-side move failed")
return fmt.Errorf("server-side move failed: %w", err)
}
}
@@ -742,7 +738,7 @@ func (f *Fs) move(ctx context.Context, dstRemote string, srcFs *Fs, srcRemote st
return shouldRetry(ctx, err)
})
if err != nil {
return errors.Wrap(err, "server-side rename failed")
return fmt.Errorf("server-side rename failed: %w", err)
}
}
@@ -812,7 +808,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
if err == nil {
return fs.ErrorDirExists
} else if err != fs.ErrorDirNotFound {
return errors.Wrap(err, "DirMove error while checking dest directory")
return fmt.Errorf("DirMove error while checking dest directory: %w", err)
}
// Do the move
@@ -844,15 +840,15 @@ func (f *Fs) Hashes() hash.Set {
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (link string, err error) {
root, err := f.findRoot(ctx, false)
if err != nil {
return "", errors.Wrap(err, "PublicLink failed to find root node")
return "", fmt.Errorf("PublicLink failed to find root node: %w", err)
}
node, err := f.findNode(root, remote)
if err != nil {
return "", errors.Wrap(err, "PublicLink failed to find path")
return "", fmt.Errorf("PublicLink failed to find path: %w", err)
}
link, err = f.srv.Link(node, true)
if err != nil {
return "", errors.Wrap(err, "PublicLink failed to create link")
return "", fmt.Errorf("PublicLink failed to create link: %w", err)
}
return link, nil
}
@@ -867,13 +863,13 @@ func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
dstDir := dirs[0]
dstDirNode := f.srv.FS.HashLookup(dstDir.ID())
if dstDirNode == nil {
return errors.Errorf("MergeDirs failed to find node for: %v", dstDir)
return fmt.Errorf("MergeDirs failed to find node for: %v", dstDir)
}
for _, srcDir := range dirs[1:] {
// find src directory
srcDirNode := f.srv.FS.HashLookup(srcDir.ID())
if srcDirNode == nil {
return errors.Errorf("MergeDirs failed to find node for: %v", srcDir)
return fmt.Errorf("MergeDirs failed to find node for: %v", srcDir)
}
// list the objects
@@ -883,7 +879,7 @@ func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
return false
})
if err != nil {
return errors.Wrapf(err, "MergeDirs list failed on %v", srcDir)
return fmt.Errorf("MergeDirs list failed on %v: %w", srcDir, err)
}
// move them into place
for _, info := range infos {
@@ -893,14 +889,14 @@ func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
return shouldRetry(ctx, err)
})
if err != nil {
return errors.Wrapf(err, "MergeDirs move failed on %q in %v", f.opt.Enc.ToStandardName(info.GetName()), srcDir)
return fmt.Errorf("MergeDirs move failed on %q in %v: %w", f.opt.Enc.ToStandardName(info.GetName()), srcDir, err)
}
}
// rmdir (into trash) the now empty source directory
fs.Infof(srcDir, "removing empty directory")
err = f.deleteNode(ctx, srcDirNode)
if err != nil {
return errors.Wrapf(err, "MergeDirs move failed to rmdir %q", srcDir)
return fmt.Errorf("MergeDirs move failed to rmdir %q: %w", srcDir, err)
}
}
return nil
@@ -915,7 +911,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
return shouldRetry(ctx, err)
})
if err != nil {
return nil, errors.Wrap(err, "failed to get Mega Quota")
return nil, fmt.Errorf("failed to get Mega Quota: %w", err)
}
usage := &fs.Usage{
Total: fs.NewUsageValue(int64(q.Mstrg)), // quota of bytes that can be used
@@ -1076,7 +1072,7 @@ func (oo *openObject) Close() (err error) {
return shouldRetry(oo.ctx, err)
})
if err != nil {
return errors.Wrap(err, "failed to finish download")
return fmt.Errorf("failed to finish download: %w", err)
}
oo.closed = true
return nil
@@ -1104,7 +1100,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
return shouldRetry(ctx, err)
})
if err != nil {
return nil, errors.Wrap(err, "open download file failed")
return nil, fmt.Errorf("open download file failed: %w", err)
}
oo := &openObject{
@@ -1133,7 +1129,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// Create the parent directory
dirNode, leaf, err := o.fs.mkdirParent(ctx, remote)
if err != nil {
return errors.Wrap(err, "update make parent dir failed")
return fmt.Errorf("update make parent dir failed: %w", err)
}
var u *mega.Upload
@@ -1142,7 +1138,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return shouldRetry(ctx, err)
})
if err != nil {
return errors.Wrap(err, "upload file failed to create session")
return fmt.Errorf("upload file failed to create session: %w", err)
}
// Upload the chunks
@@ -1150,12 +1146,12 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
for id := 0; id < u.Chunks(); id++ {
_, chunkSize, err := u.ChunkLocation(id)
if err != nil {
return errors.Wrap(err, "upload failed to read chunk location")
return fmt.Errorf("upload failed to read chunk location: %w", err)
}
chunk := make([]byte, chunkSize)
_, err = io.ReadFull(in, chunk)
if err != nil {
return errors.Wrap(err, "upload failed to read data")
return fmt.Errorf("upload failed to read data: %w", err)
}
err = o.fs.pacer.Call(func() (bool, error) {
@@ -1163,7 +1159,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return shouldRetry(ctx, err)
})
if err != nil {
return errors.Wrap(err, "upload file failed to upload chunk")
return fmt.Errorf("upload file failed to upload chunk: %w", err)
}
}
@@ -1174,14 +1170,14 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return shouldRetry(ctx, err)
})
if err != nil {
return errors.Wrap(err, "failed to finish upload")
return fmt.Errorf("failed to finish upload: %w", err)
}
// If the upload succeeded and the original object existed, then delete it
if o.info != nil {
err = o.fs.deleteNode(ctx, o.info)
if err != nil {
return errors.Wrap(err, "upload failed to remove old version")
return fmt.Errorf("upload failed to remove old version: %w", err)
}
o.info = nil
}
@@ -1193,7 +1189,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
func (o *Object) Remove(ctx context.Context) error {
err := o.fs.deleteNode(ctx, o.info)
if err != nil {
return errors.Wrap(err, "Remove object failed")
return fmt.Errorf("Remove object failed: %w", err)
}
return nil
}

View File

@@ -14,7 +14,6 @@ import (
"sync"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
@@ -586,7 +585,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
bucket, bucketPath := o.split()
data, err := ioutil.ReadAll(in)
if err != nil {
return errors.Wrap(err, "failed to update memory object")
return fmt.Errorf("failed to update memory object: %w", err)
}
o.od = &objectData{
data: data,

1277
backend/netstorage/netstorage.go Executable file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,16 @@
package netstorage_test
import (
"testing"
"github.com/rclone/rclone/backend/netstorage"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestnStorage:",
NilObject: (*netstorage.Object)(nil),
})
}

106
backend/onedrive/onedrive.go Executable file → Normal file
View File

@@ -7,6 +7,7 @@ import (
"encoding/base64"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
@@ -18,7 +19,6 @@ import (
"sync"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/backend/onedrive/api"
"github.com/rclone/rclone/backend/onedrive/quickxorhash"
"github.com/rclone/rclone/fs"
@@ -65,9 +65,12 @@ var (
authPath = "/common/oauth2/v2.0/authorize"
tokenPath = "/common/oauth2/v2.0/token"
scopesWithSitePermission = []string{"Files.Read", "Files.ReadWrite", "Files.Read.All", "Files.ReadWrite.All", "offline_access", "Sites.Read.All"}
scopesWithoutSitePermission = []string{"Files.Read", "Files.ReadWrite", "Files.Read.All", "Files.ReadWrite.All", "offline_access"}
// Description of how to auth for this app for a business account
oauthConfig = &oauth2.Config{
Scopes: []string{"Files.Read", "Files.ReadWrite", "Files.Read.All", "Files.ReadWrite.All", "offline_access", "Sites.Read.All"},
Scopes: scopesWithSitePermission,
ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.RedirectLocalhostURL,
@@ -137,11 +140,31 @@ Note that the chunks will be buffered into memory.`,
Help: "The type of the drive (" + driveTypePersonal + " | " + driveTypeBusiness + " | " + driveTypeSharepoint + ").",
Default: "",
Advanced: true,
}, {
Name: "root_folder_id",
Help: `ID of the root folder.
This isn't normally needed, but in special circumstances you might
know the folder ID that you wish to access but not be able to get
there through a path traversal.
`,
Advanced: true,
}, {
Name: "disable_site_permission",
Help: `Disable the request for Sites.Read.All permission.
If set to true, you will no longer be able to search for a SharePoint site when
configuring drive ID, because rclone will not request Sites.Read.All permission.
Set it to true if your organization didn't assign Sites.Read.All permission to the
application, and your organization disallows users to consent app permission
request on their own.`,
Default: false,
Advanced: true,
}, {
Name: "expose_onenote_files",
Help: `Set to make OneNote files show up in directory listings.
By default rclone will hide OneNote files in directory listings because
By default, rclone will hide OneNote files in directory listings because
operations like "Open" and "Update" won't work on them. But this
behaviour may also prevent you from deleting them. If you want to
delete OneNote files or otherwise want them to show up in directory
@@ -374,6 +397,12 @@ func Config(ctx context.Context, name string, m configmap.Mapper, config fs.Conf
region, graphURL := getRegionURL(m)
if config.State == "" {
disableSitePermission, _ := m.Get("disable_site_permission")
if disableSitePermission == "true" {
oauthConfig.Scopes = scopesWithoutSitePermission
} else {
oauthConfig.Scopes = scopesWithSitePermission
}
oauthConfig.Endpoint = oauth2.Endpoint{
AuthURL: authEndpoint[region] + authPath,
TokenURL: authEndpoint[region] + tokenPath,
@@ -385,7 +414,7 @@ func Config(ctx context.Context, name string, m configmap.Mapper, config fs.Conf
oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
if err != nil {
return nil, errors.Wrap(err, "failed to configure OneDrive")
return nil, fmt.Errorf("failed to configure OneDrive: %w", err)
}
srv := rest.NewClient(oAuthClient)
@@ -527,6 +556,8 @@ type Options struct {
ChunkSize fs.SizeSuffix `config:"chunk_size"`
DriveID string `config:"drive_id"`
DriveType string `config:"drive_type"`
RootFolderID string `config:"root_folder_id"`
DisableSitePermission bool `config:"disable_site_permission"`
ExposeOneNoteFiles bool `config:"expose_onenote_files"`
ServerSideAcrossConfigs bool `config:"server_side_across_configs"`
ListChunk int64 `config:"list_chunk"`
@@ -618,6 +649,12 @@ func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, err
retry := false
if resp != nil {
switch resp.StatusCode {
case 400:
if apiErr, ok := err.(*api.Error); ok {
if apiErr.ErrorInfo.InnerError.Code == "pathIsTooLong" {
return false, fserrors.NoRetryError(err)
}
}
case 401:
if len(resp.Header["Www-Authenticate"]) == 1 && strings.Index(resp.Header["Www-Authenticate"][0], "expired_token") >= 0 {
retry = true
@@ -754,10 +791,10 @@ func errorHandler(resp *http.Response) error {
func checkUploadChunkSize(cs fs.SizeSuffix) error {
const minChunkSize = fs.SizeSuffixBase
if cs%chunkSizeMultiple != 0 {
return errors.Errorf("%s is not a multiple of %s", cs, chunkSizeMultiple)
return fmt.Errorf("%s is not a multiple of %s", cs, chunkSizeMultiple)
}
if cs < minChunkSize {
return errors.Errorf("%s is less than %s", cs, minChunkSize)
return fmt.Errorf("%s is less than %s", cs, minChunkSize)
}
return nil
}
@@ -781,7 +818,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
err = checkUploadChunkSize(opt.ChunkSize)
if err != nil {
return nil, errors.Wrap(err, "onedrive: chunk size")
return nil, fmt.Errorf("onedrive: chunk size: %w", err)
}
if opt.DriveID == "" || opt.DriveType == "" {
@@ -789,6 +826,11 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
}
rootURL := graphAPIEndpoint[opt.Region] + "/v1.0" + "/drives/" + opt.DriveID
if opt.DisableSitePermission {
oauthConfig.Scopes = scopesWithoutSitePermission
} else {
oauthConfig.Scopes = scopesWithSitePermission
}
oauthConfig.Endpoint = oauth2.Endpoint{
AuthURL: authEndpoint[opt.Region] + authPath,
TokenURL: authEndpoint[opt.Region] + tokenPath,
@@ -797,7 +839,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
root = parsePath(root)
oAuthClient, ts, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
if err != nil {
return nil, errors.Wrap(err, "failed to configure OneDrive")
return nil, fmt.Errorf("failed to configure OneDrive: %w", err)
}
ci := fs.GetConfig(ctx)
@@ -826,12 +868,19 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
})
// Get rootID
rootInfo, _, err := f.readMetaDataForPath(ctx, "")
if err != nil || rootInfo.GetID() == "" {
return nil, errors.Wrap(err, "failed to get root")
var rootID = opt.RootFolderID
if rootID == "" {
rootInfo, _, err := f.readMetaDataForPath(ctx, "")
if err != nil {
return nil, fmt.Errorf("failed to get root: %w", err)
}
rootID = rootInfo.GetID()
}
if rootID == "" {
return nil, errors.New("failed to get root: ID was empty")
}
f.dirCache = dircache.New(root, rootInfo.GetID(), f)
f.dirCache = dircache.New(root, rootID, f)
// Find the current root
err = f.dirCache.FindRoot(ctx, false)
@@ -839,7 +888,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
// Assume it is a file
newRoot, remote := dircache.SplitPath(root)
tempF := *f
tempF.dirCache = dircache.New(newRoot, rootInfo.ID, &tempF)
tempF.dirCache = dircache.New(newRoot, rootID, &tempF)
tempF.root = newRoot
// Make new Fs which is the parent
err = tempF.dirCache.FindRoot(ctx, false)
@@ -971,7 +1020,7 @@ OUTER:
return shouldRetry(ctx, resp, err)
})
if err != nil {
return found, errors.Wrap(err, "couldn't list files")
return found, fmt.Errorf("couldn't list files: %w", err)
}
if len(result.Value) == 0 {
break
@@ -1175,7 +1224,7 @@ func (f *Fs) waitForJob(ctx context.Context, location string, o *Object) error {
var status api.AsyncOperationStatus
err = json.Unmarshal(body, &status)
if err != nil {
return errors.Wrapf(err, "async status result not JSON: %q", body)
return fmt.Errorf("async status result not JSON: %q: %w", body, err)
}
switch status.Status {
@@ -1185,15 +1234,18 @@ func (f *Fs) waitForJob(ctx context.Context, location string, o *Object) error {
}
fallthrough
case "deleteFailed":
return errors.Errorf("%s: async operation returned %q", o.remote, status.Status)
return fmt.Errorf("%s: async operation returned %q", o.remote, status.Status)
case "completed":
err = o.readMetaData(ctx)
return errors.Wrapf(err, "async operation completed but readMetaData failed")
if err != nil {
return fmt.Errorf("async operation completed but readMetaData failed: %w", err)
}
return nil
}
time.Sleep(1 * time.Second)
}
return errors.Errorf("async operation didn't complete after %v", f.ci.TimeoutOrInfinite())
return fmt.Errorf("async operation didn't complete after %v", f.ci.TimeoutOrInfinite())
}
// Copy src to this remote using server-side copy operations.
@@ -1232,7 +1284,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
srcPath := srcObj.rootPath()
dstPath := f.rootPath(remote)
if strings.ToLower(srcPath) == strings.ToLower(dstPath) {
return nil, errors.Errorf("can't copy %q -> %q as are same name when lowercase", srcPath, dstPath)
return nil, fmt.Errorf("can't copy %q -> %q as are same name when lowercase", srcPath, dstPath)
}
}
@@ -1450,7 +1502,7 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "about failed")
return nil, fmt.Errorf("about failed: %w", err)
}
q := drive.Quota
// On (some?) Onedrive sharepoints these are all 0 so return unknown in that case
@@ -1501,7 +1553,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
})
if err != nil {
if resp != nil && resp.StatusCode == 400 && f.driveType != driveTypePersonal {
return "", errors.Errorf("%v (is making public links permitted by the org admin?)", err)
return "", fmt.Errorf("%v (is making public links permitted by the org admin?)", err)
}
return "", err
}
@@ -1886,17 +1938,17 @@ func (o *Object) getPosition(ctx context.Context, url string) (pos int64, err er
return 0, err
}
if len(info.NextExpectedRanges) != 1 {
return 0, errors.Errorf("bad number of ranges in upload position: %v", info.NextExpectedRanges)
return 0, fmt.Errorf("bad number of ranges in upload position: %v", info.NextExpectedRanges)
}
position := info.NextExpectedRanges[0]
i := strings.IndexByte(position, '-')
if i < 0 {
return 0, errors.Errorf("no '-' in next expected range: %q", position)
return 0, fmt.Errorf("no '-' in next expected range: %q", position)
}
position = position[:i]
pos, err = strconv.ParseInt(position, 10, 64)
if err != nil {
return 0, errors.Wrapf(err, "bad expected range: %q", position)
return 0, fmt.Errorf("bad expected range: %q: %w", position, err)
}
return pos, nil
}
@@ -1930,14 +1982,14 @@ func (o *Object) uploadFragment(ctx context.Context, url string, start int64, to
fs.Debugf(o, "Read position %d, chunk is %d..%d, bytes to skip = %d", pos, start, start+chunkSize, skip)
switch {
case skip < 0:
return false, errors.Wrapf(err, "sent block already (skip %d < 0), can't rewind", skip)
return false, fmt.Errorf("sent block already (skip %d < 0), can't rewind: %w", skip, err)
case skip > chunkSize:
return false, errors.Wrapf(err, "position is in the future (skip %d > chunkSize %d), can't skip forward", skip, chunkSize)
return false, fmt.Errorf("position is in the future (skip %d > chunkSize %d), can't skip forward: %w", skip, chunkSize, err)
case skip == chunkSize:
fs.Debugf(o, "Skipping chunk as already sent (skip %d == chunkSize %d)", skip, chunkSize)
return false, nil
}
return true, errors.Wrapf(err, "retry this chunk skipping %d bytes", skip)
return true, fmt.Errorf("retry this chunk skipping %d bytes: %w", skip, err)
}
if err != nil {
return shouldRetry(ctx, resp, err)

View File

@@ -136,7 +136,8 @@ func (q *quickXorHash) Write(p []byte) (n int, err error) {
func (q *quickXorHash) checkSum() (h [Size]byte) {
// Output the data as little endian bytes
ph := 0
for _, d := range q.data[:len(q.data)-1] {
for i := 0; i < len(q.data)-1; i++ {
d := q.data[i]
_ = h[ph+7] // bounds check
h[ph+0] = byte(d >> (8 * 0))
h[ph+1] = byte(d >> (8 * 1))

View File

@@ -2,6 +2,7 @@ package opendrive
import (
"context"
"errors"
"fmt"
"io"
"net/http"
@@ -11,7 +12,6 @@ import (
"strings"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
@@ -210,7 +210,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
return f.shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "failed to create session")
return nil, fmt.Errorf("failed to create session: %w", err)
}
fs.Debugf(nil, "Starting OpenDrive session with ID: %s", f.session.SessionID)
@@ -362,7 +362,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
srcPath := srcObj.fs.rootSlash() + srcObj.remote
dstPath := f.rootSlash() + remote
if strings.ToLower(srcPath) == strings.ToLower(dstPath) {
return nil, errors.Errorf("Can't copy %q -> %q as are same name when lowercase", srcPath, dstPath)
return nil, fmt.Errorf("Can't copy %q -> %q as are same name when lowercase", srcPath, dstPath)
}
// Create temporary object
@@ -636,7 +636,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
return o.fs.shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "failed to create file")
return nil, fmt.Errorf("failed to create file: %w", err)
}
o.id = response.FileID
@@ -719,7 +719,7 @@ func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut strin
return f.shouldRetry(ctx, resp, err)
})
if err != nil {
return "", false, errors.Wrap(err, "failed to get folder list")
return "", false, fmt.Errorf("failed to get folder list: %w", err)
}
leaf = f.opt.Enc.FromStandardName(leaf)
@@ -762,7 +762,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
return f.shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "failed to get folder list")
return nil, fmt.Errorf("failed to get folder list: %w", err)
}
for _, folder := range folderList.Folders {
@@ -871,7 +871,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
return o.fs.shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "failed to open file)")
return nil, fmt.Errorf("failed to open file): %w", err)
}
return resp.Body, nil
@@ -919,7 +919,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return o.fs.shouldRetry(ctx, resp, err)
})
if err != nil {
return errors.Wrap(err, "failed to create file")
return fmt.Errorf("failed to create file: %w", err)
}
// resp.Body.Close()
// fs.Debugf(nil, "PostOpen: %#v", openResponse)
@@ -963,10 +963,10 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return o.fs.shouldRetry(ctx, resp, err)
})
if err != nil {
return errors.Wrap(err, "failed to create file")
return fmt.Errorf("failed to create file: %w", err)
}
if reply.TotalWritten != currentChunkSize {
return errors.Errorf("failed to create file: incomplete write of %d/%d bytes", reply.TotalWritten, currentChunkSize)
return fmt.Errorf("failed to create file: incomplete write of %d/%d bytes", reply.TotalWritten, currentChunkSize)
}
chunkCounter++
@@ -986,7 +986,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return o.fs.shouldRetry(ctx, resp, err)
})
if err != nil {
return errors.Wrap(err, "failed to create file")
return fmt.Errorf("failed to create file: %w", err)
}
// fs.Debugf(nil, "PostClose: %#v", closeResponse)
@@ -1038,7 +1038,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
return o.fs.shouldRetry(ctx, resp, err)
})
if err != nil {
return errors.Wrap(err, "failed to get folder list")
return fmt.Errorf("failed to get folder list: %w", err)
}
if len(folderList.Files) == 0 {

View File

@@ -2,14 +2,13 @@
// object storage system.
package pcloud
// FIXME implement ListR? /listfolder can do recursive lists
// FIXME cleanup returns login required?
// FIXME mime type? Fix overview if implement.
import (
"context"
"errors"
"fmt"
"io"
"net/http"
@@ -18,7 +17,6 @@ import (
"strings"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/backend/pcloud/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
@@ -27,6 +25,7 @@ import (
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/dircache"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/oauthutil"
@@ -246,7 +245,7 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.It
return nil, err
}
found, err := f.listAll(ctx, directoryID, false, true, func(item *api.Item) bool {
found, err := f.listAll(ctx, directoryID, false, true, false, func(item *api.Item) bool {
if item.Name == leaf {
info = item
return true
@@ -290,7 +289,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
root = parsePath(root)
oAuthClient, ts, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
if err != nil {
return nil, errors.Wrap(err, "failed to configure Pcloud")
return nil, fmt.Errorf("failed to configure Pcloud: %w", err)
}
updateTokenURL(oauthConfig, opt.Hostname)
@@ -380,7 +379,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
// FindLeaf finds a directory of name leaf in the folder with ID pathID
func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) {
// Find the leaf in pathID
found, err = f.listAll(ctx, pathID, true, false, func(item *api.Item) bool {
found, err = f.listAll(ctx, pathID, true, false, false, func(item *api.Item) bool {
if item.Name == leaf {
pathIDOut = item.ID
return true
@@ -446,14 +445,16 @@ type listAllFn func(*api.Item) bool
// Lists the directory required calling the user function on each item found
//
// If the user fn ever returns true then it early exits with found = true
func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, filesOnly bool, recursive bool, fn listAllFn) (found bool, err error) {
opts := rest.Opts{
Method: "GET",
Path: "/listfolder",
Parameters: url.Values{},
}
if recursive {
opts.Parameters.Set("recursive", "1")
}
opts.Parameters.Set("folderid", dirIDtoNumber(dirID))
// FIXME can do recursive
var result api.ItemResult
var resp *http.Response
@@ -463,28 +464,73 @@ func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, fi
return shouldRetry(ctx, resp, err)
})
if err != nil {
return found, errors.Wrap(err, "couldn't list files")
return found, fmt.Errorf("couldn't list files: %w", err)
}
for i := range result.Metadata.Contents {
item := &result.Metadata.Contents[i]
if item.IsFolder {
if filesOnly {
continue
var recursiveContents func(is []api.Item, path string)
recursiveContents = func(is []api.Item, path string) {
for i := range is {
item := &is[i]
if item.IsFolder {
if filesOnly {
continue
}
} else {
if directoriesOnly {
continue
}
}
} else {
if directoriesOnly {
continue
item.Name = path + f.opt.Enc.ToStandardName(item.Name)
if fn(item) {
found = true
break
}
if recursive {
recursiveContents(item.Contents, item.Name+"/")
}
}
item.Name = f.opt.Enc.ToStandardName(item.Name)
if fn(item) {
found = true
break
}
}
recursiveContents(result.Metadata.Contents, "")
return
}
// listHelper iterates over all items from the directory
// and calls the callback for each element.
func (f *Fs) listHelper(ctx context.Context, dir string, recursive bool, callback func(entries fs.DirEntry) error) (err error) {
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
if err != nil {
return err
}
var iErr error
_, err = f.listAll(ctx, directoryID, false, false, recursive, func(info *api.Item) bool {
remote := path.Join(dir, info.Name)
if info.IsFolder {
// cache the directory ID for later lookups
f.dirCache.Put(remote, info.ID)
d := fs.NewDir(remote, info.ModTime()).SetID(info.ID)
// FIXME more info from dir?
iErr = callback(d)
} else {
o, err := f.newObjectWithInfo(ctx, remote, info)
if err != nil {
iErr = err
return true
}
iErr = callback(o)
}
if iErr != nil {
return true
}
return false
})
if err != nil {
return err
}
if iErr != nil {
return iErr
}
return nil
}
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
@@ -495,36 +541,24 @@ func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, fi
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
if err != nil {
return nil, err
}
var iErr error
_, err = f.listAll(ctx, directoryID, false, false, func(info *api.Item) bool {
remote := path.Join(dir, info.Name)
if info.IsFolder {
// cache the directory ID for later lookups
f.dirCache.Put(remote, info.ID)
d := fs.NewDir(remote, info.ModTime()).SetID(info.ID)
// FIXME more info from dir?
entries = append(entries, d)
} else {
o, err := f.newObjectWithInfo(ctx, remote, info)
if err != nil {
iErr = err
return true
}
entries = append(entries, o)
}
return false
err = f.listHelper(ctx, dir, false, func(o fs.DirEntry) error {
entries = append(entries, o)
return nil
})
return entries, err
}
// ListR lists the objects and directories of the Fs starting
// from dir recursively into out.
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
list := walk.NewListRHelper(callback)
err = f.listHelper(ctx, dir, true, func(o fs.DirEntry) error {
return list.Add(o)
})
if err != nil {
return nil, err
return err
}
if iErr != nil {
return nil, iErr
}
return entries, nil
return list.Flush()
}
// Creates from the parameters passed in a half finished Object which
@@ -600,7 +634,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
return shouldRetry(ctx, resp, err)
})
if err != nil {
return errors.Wrap(err, "rmdir failed")
return fmt.Errorf("rmdir failed: %w", err)
}
f.dirCache.FlushDir(dir)
if err != nil {
@@ -656,7 +690,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
opts.Parameters.Set("fileid", fileIDtoNumber(srcObj.id))
opts.Parameters.Set("toname", f.opt.Enc.FromStandardName(leaf))
opts.Parameters.Set("tofolderid", dirIDtoNumber(directoryID))
opts.Parameters.Set("mtime", fmt.Sprintf("%d", srcObj.modTime.Unix()))
opts.Parameters.Set("mtime", fmt.Sprintf("%d", uint64(srcObj.modTime.Unix())))
var resp *http.Response
var result api.ItemResult
err = f.pacer.Call(func() (bool, error) {
@@ -872,7 +906,7 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "about failed")
return nil, fmt.Errorf("about failed: %w", err)
}
usage = &fs.Usage{
Total: fs.NewUsageValue(q.Quota), // quota of bytes that can be used
@@ -952,7 +986,7 @@ func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
if o.md5 == "" && o.sha1 == "" && o.sha256 == "" {
err := o.getHashes(ctx)
if err != nil {
return "", errors.Wrap(err, "failed to get hash")
return "", fmt.Errorf("failed to get hash: %w", err)
}
}
return *pHash, nil
@@ -971,7 +1005,7 @@ func (o *Object) Size() int64 {
// setMetaData sets the metadata from info
func (o *Object) setMetaData(info *api.Item) (err error) {
if info.IsFolder {
return errors.Wrapf(fs.ErrorNotAFile, "%q is a folder", o.remote)
return fmt.Errorf("%q is a folder: %w", o.remote, fs.ErrorNotAFile)
}
o.hasMetaData = true
o.size = info.Size
@@ -1058,7 +1092,7 @@ func (o *Object) downloadURL(ctx context.Context) (URL string, err error) {
return "", err
}
if !result.IsValid() {
return "", errors.Errorf("fetched invalid link %+v", result)
return "", fmt.Errorf("fetched invalid link %+v", result)
}
o.link = &result
return o.link.URL(), nil
@@ -1137,7 +1171,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
opts.Parameters.Set("filename", leaf)
opts.Parameters.Set("folderid", dirIDtoNumber(directoryID))
opts.Parameters.Set("nopartial", "1")
opts.Parameters.Set("mtime", fmt.Sprintf("%d", modTime.Unix()))
opts.Parameters.Set("mtime", fmt.Sprintf("%d", uint64(modTime.Unix())))
// Special treatment for a 0 length upload. This doesn't work
// with PUT even with Content-Length set (by setting
@@ -1146,7 +1180,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
if size == 0 {
formReader, contentType, overhead, err := rest.MultipartUpload(ctx, in, opts.Parameters, "content", leaf)
if err != nil {
return errors.Wrap(err, "failed to make multipart upload for 0 length file")
return fmt.Errorf("failed to make multipart upload for 0 length file: %w", err)
}
contentLength := overhead + size
@@ -1177,7 +1211,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return err
}
if len(result.Items) != 1 {
return errors.Errorf("failed to upload %v - not sure why", o)
return fmt.Errorf("failed to upload %v - not sure why", o)
}
o.setHashes(&result.Checksums[0])
return o.setMetaData(&result.Items[0])

View File

@@ -18,6 +18,7 @@ canStream = false
import (
"context"
"encoding/json"
"errors"
"fmt"
"io"
"net"
@@ -27,7 +28,6 @@ import (
"strings"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/backend/premiumizeme/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
@@ -250,7 +250,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
if opt.APIKey == "" {
client, ts, err = oauthutil.NewClient(ctx, name, m, oauthConfig)
if err != nil {
return nil, errors.Wrap(err, "failed to configure premiumize.me")
return nil, fmt.Errorf("failed to configure premiumize.me: %w", err)
}
} else {
client = fshttp.NewClient(ctx)
@@ -380,10 +380,10 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
})
if err != nil {
//fmt.Printf("...Error %v\n", err)
return "", errors.Wrap(err, "CreateDir http")
return "", fmt.Errorf("CreateDir http: %w", err)
}
if err = info.AsErr(); err != nil {
return "", errors.Wrap(err, "CreateDir")
return "", fmt.Errorf("CreateDir: %w", err)
}
// fmt.Printf("...Id %q\n", *info.Id)
return info.ID, nil
@@ -420,10 +420,10 @@ func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, fi
return shouldRetry(ctx, resp, err)
})
if err != nil {
return newDirID, found, errors.Wrap(err, "couldn't list files")
return newDirID, found, fmt.Errorf("couldn't list files: %w", err)
}
if err = result.AsErr(); err != nil {
return newDirID, found, errors.Wrap(err, "error while listing")
return newDirID, found, fmt.Errorf("error while listing: %w", err)
}
newDirID = result.FolderID
for i := range result.Content {
@@ -572,7 +572,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
return true
})
if err != nil {
return errors.Wrap(err, "purgeCheck")
return fmt.Errorf("purgeCheck: %w", err)
}
if found {
return fs.ErrorDirectoryNotEmpty
@@ -594,10 +594,10 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
return shouldRetry(ctx, resp, err)
})
if err != nil {
return errors.Wrap(err, "rmdir failed")
return fmt.Errorf("rmdir failed: %w", err)
}
if err = result.AsErr(); err != nil {
return errors.Wrap(err, "rmdir")
return fmt.Errorf("rmdir: %w", err)
}
f.dirCache.FlushDir(dir)
if err != nil {
@@ -645,7 +645,7 @@ func (f *Fs) move(ctx context.Context, isFile bool, id, oldLeaf, newLeaf, oldDir
tmpLeaf := newLeaf + "." + random.String(8)
err = f.renameLeaf(ctx, isFile, id, tmpLeaf)
if err != nil {
return errors.Wrap(err, "Move rename leaf")
return fmt.Errorf("Move rename leaf: %w", err)
}
}
@@ -674,10 +674,10 @@ func (f *Fs) move(ctx context.Context, isFile bool, id, oldLeaf, newLeaf, oldDir
return shouldRetry(ctx, resp, err)
})
if err != nil {
return errors.Wrap(err, "Move http")
return fmt.Errorf("Move http: %w", err)
}
if err = result.AsErr(); err != nil {
return errors.Wrap(err, "Move")
return fmt.Errorf("Move: %w", err)
}
}
@@ -685,7 +685,7 @@ func (f *Fs) move(ctx context.Context, isFile bool, id, oldLeaf, newLeaf, oldDir
if doRenameLeaf {
err = f.renameLeaf(ctx, isFile, id, newLeaf)
if err != nil {
return errors.Wrap(err, "Move rename leaf")
return fmt.Errorf("Move rename leaf: %w", err)
}
}
@@ -783,10 +783,10 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "CreateDir http")
return nil, fmt.Errorf("CreateDir http: %w", err)
}
if err = info.AsErr(); err != nil {
return nil, errors.Wrap(err, "CreateDir")
return nil, fmt.Errorf("CreateDir: %w", err)
}
usage = &fs.Usage{
Used: fs.NewUsageValue(int64(info.SpaceUsed)),
@@ -843,7 +843,7 @@ func (o *Object) Size() int64 {
// setMetaData sets the metadata from info
func (o *Object) setMetaData(info *api.Item) (err error) {
if info.Type != "file" {
return errors.Wrapf(fs.ErrorNotAFile, "%q is %q", o.remote, info.Type)
return fmt.Errorf("%q is %q: %w", o.remote, info.Type, fs.ErrorNotAFile)
}
o.hasMetaData = true
o.size = info.Size
@@ -953,19 +953,19 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
var u *url.URL
u, err = url.Parse(info.URL)
if err != nil {
return true, errors.Wrap(err, "failed to parse download URL")
return true, fmt.Errorf("failed to parse download URL: %w", err)
}
_, err = net.LookupIP(u.Hostname())
if err != nil {
return true, errors.Wrap(err, "failed to resolve download URL")
return true, fmt.Errorf("failed to resolve download URL: %w", err)
}
return false, nil
})
if err != nil {
return errors.Wrap(err, "upload get URL http")
return fmt.Errorf("upload get URL http: %w", err)
}
if err = info.AsErr(); err != nil {
return errors.Wrap(err, "upload get URL")
return fmt.Errorf("upload get URL: %w", err)
}
// if file exists then rename it out the way otherwise uploads can fail
@@ -976,7 +976,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
fs.Debugf(o, "Moving old file out the way to %q", newLeaf)
err = o.fs.renameLeaf(ctx, true, oldID, newLeaf)
if err != nil {
return errors.Wrap(err, "upload rename old file")
return fmt.Errorf("upload rename old file: %w", err)
}
defer func() {
// on failed upload rename old file back
@@ -984,7 +984,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
fs.Debugf(o, "Renaming old file back (from %q to %q) since upload failed", leaf, newLeaf)
newErr := o.fs.renameLeaf(ctx, true, oldID, leaf)
if newErr != nil && err == nil {
err = errors.Wrap(newErr, "upload renaming old file back")
err = fmt.Errorf("upload renaming old file back: %w", newErr)
}
}
}()
@@ -1007,10 +1007,10 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return shouldRetry(ctx, resp, err)
})
if err != nil {
return errors.Wrap(err, "upload file http")
return fmt.Errorf("upload file http: %w", err)
}
if err = result.AsErr(); err != nil {
return errors.Wrap(err, "upload file")
return fmt.Errorf("upload file: %w", err)
}
// on successful upload, remove old file if it exists
@@ -1019,7 +1019,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
fs.Debugf(o, "Removing old file")
err := o.fs.remove(ctx, oldID)
if err != nil {
return errors.Wrap(err, "upload remove old file")
return fmt.Errorf("upload remove old file: %w", err)
}
}
@@ -1049,10 +1049,10 @@ func (f *Fs) renameLeaf(ctx context.Context, isFile bool, id string, newLeaf str
return shouldRetry(ctx, resp, err)
})
if err != nil {
return errors.Wrap(err, "rename http")
return fmt.Errorf("rename http: %w", err)
}
if err = result.AsErr(); err != nil {
return errors.Wrap(err, "rename")
return fmt.Errorf("rename: %w", err)
}
return nil
}
@@ -1074,10 +1074,10 @@ func (f *Fs) remove(ctx context.Context, id string) (err error) {
return shouldRetry(ctx, resp, err)
})
if err != nil {
return errors.Wrap(err, "remove http")
return fmt.Errorf("remove http: %w", err)
}
if err = result.AsErr(); err != nil {
return errors.Wrap(err, "remove")
return fmt.Errorf("remove: %w", err)
}
return nil
}
@@ -1086,7 +1086,7 @@ func (f *Fs) remove(ctx context.Context, id string) (err error) {
func (o *Object) Remove(ctx context.Context) error {
err := o.readMetaData(ctx)
if err != nil {
return errors.Wrap(err, "Remove: Failed to read metadata")
return fmt.Errorf("Remove: Failed to read metadata: %w", err)
}
return o.fs.remove(ctx, o.id)
}

View File

@@ -4,16 +4,21 @@ import (
"context"
"fmt"
"net/http"
"strconv"
"time"
"github.com/putdotio/go-putio/putio"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/lib/pacer"
)
func checkStatusCode(resp *http.Response, expected int) error {
if resp.StatusCode != expected {
return &statusCodeError{response: resp}
func checkStatusCode(resp *http.Response, expected ...int) error {
for _, code := range expected {
if resp.StatusCode == code {
return nil
}
}
return nil
return &statusCodeError{response: resp}
}
type statusCodeError struct {
@@ -24,8 +29,10 @@ func (e *statusCodeError) Error() string {
return fmt.Sprintf("unexpected status code (%d) response while doing %s to %s", e.response.StatusCode, e.response.Request.Method, e.response.Request.URL.String())
}
// This method is called from fserrors.ShouldRetry() to determine if an error should be retried.
// Some errors (e.g. 429 Too Many Requests) are handled before this step, so they are not included here.
func (e *statusCodeError) Temporary() bool {
return e.response.StatusCode == 429 || e.response.StatusCode >= 500
return e.response.StatusCode >= 500
}
// shouldRetry returns a boolean as to whether this err deserves to be
@@ -40,6 +47,16 @@ func shouldRetry(ctx context.Context, err error) (bool, error) {
if perr, ok := err.(*putio.ErrorResponse); ok {
err = &statusCodeError{response: perr.Response}
}
if scerr, ok := err.(*statusCodeError); ok && scerr.response.StatusCode == 429 {
delay := defaultRateLimitSleep
header := scerr.response.Header.Get("x-ratelimit-reset")
if header != "" {
if resetTime, cerr := strconv.ParseInt(header, 10, 64); cerr == nil {
delay = time.Until(time.Unix(resetTime+1, 0))
}
}
return true, pacer.RetryAfterError(scerr, delay)
}
if fserrors.ShouldRetry(err) {
return true, err
}

View File

@@ -4,6 +4,7 @@ import (
"bytes"
"context"
"encoding/base64"
"errors"
"fmt"
"io"
"net/http"
@@ -13,7 +14,6 @@ import (
"strings"
"time"
"github.com/pkg/errors"
"github.com/putdotio/go-putio/putio"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
@@ -80,7 +80,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (f fs.Fs,
httpClient := fshttp.NewClient(ctx)
oAuthClient, _, err := oauthutil.NewClientWithBaseClient(ctx, name, m, putioConfig, httpClient)
if err != nil {
return nil, errors.Wrap(err, "failed to configure putio")
return nil, fmt.Errorf("failed to configure putio: %w", err)
}
p := &Fs{
name: name,
@@ -302,8 +302,8 @@ func (f *Fs) createUpload(ctx context.Context, name string, size int64, parentID
if err != nil {
return false, err
}
if resp.StatusCode != 201 {
return false, fmt.Errorf("unexpected status code from upload create: %d", resp.StatusCode)
if err := checkStatusCode(resp, 201); err != nil {
return shouldRetry(ctx, err)
}
location = resp.Header.Get("location")
if location == "" {
@@ -469,7 +469,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error)
// check directory exists
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
if err != nil {
return errors.Wrap(err, "Rmdir")
return fmt.Errorf("Rmdir: %w", err)
}
dirID := atoi(directoryID)
@@ -482,7 +482,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error)
return shouldRetry(ctx, err)
})
if err != nil {
return errors.Wrap(err, "Rmdir")
return fmt.Errorf("Rmdir: %w", err)
}
if len(children) != 0 {
return errors.New("directory not empty")
@@ -647,7 +647,7 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
return shouldRetry(ctx, err)
})
if err != nil {
return nil, errors.Wrap(err, "about failed")
return nil, fmt.Errorf("about failed: %w", err)
}
return &fs.Usage{
Total: fs.NewUsageValue(ai.Disk.Size), // quota of bytes that can be used

View File

@@ -2,6 +2,7 @@ package putio
import (
"context"
"fmt"
"io"
"net/http"
"net/url"
@@ -9,7 +10,6 @@ import (
"strconv"
"time"
"github.com/pkg/errors"
"github.com/putdotio/go-putio/putio"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/fserrors"
@@ -82,7 +82,7 @@ func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
}
err := o.readEntryAndSetMetadata(ctx)
if err != nil {
return "", errors.Wrap(err, "failed to read hash from metadata")
return "", fmt.Errorf("failed to read hash from metadata: %w", err)
}
return o.file.CRC32, nil
}
@@ -241,7 +241,13 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
}
// fs.Debugf(o, "opening file: id=%d", o.file.ID)
resp, err = o.fs.httpClient.Do(req)
return shouldRetry(ctx, err)
if err != nil {
return shouldRetry(ctx, err)
}
if err := checkStatusCode(resp, 200, 206); err != nil {
return shouldRetry(ctx, err)
}
return false, nil
})
if perr, ok := err.(*putio.ErrorResponse); ok && perr.Response.StatusCode >= 400 && perr.Response.StatusCode <= 499 {
_ = resp.Body.Close()

View File

@@ -33,8 +33,9 @@ const (
rcloneObscuredClientSecret = "cMwrjWVmrHZp3gf1ZpCrlyGAmPpB-YY5BbVnO1fj-G9evcd8"
minSleep = 10 * time.Millisecond
maxSleep = 2 * time.Second
decayConstant = 2 // bigger for slower decay, exponential
decayConstant = 1 // bigger for slower decay, exponential
defaultChunkSize = 48 * fs.Mebi
defaultRateLimitSleep = 60 * time.Second
)
var (

View File

@@ -8,6 +8,7 @@ package qingstor
import (
"context"
"errors"
"fmt"
"io"
"net/http"
@@ -17,7 +18,6 @@ import (
"strings"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
@@ -285,7 +285,7 @@ func qsServiceConnection(ctx context.Context, opt *Options) (*qs.Service, error)
func checkUploadChunkSize(cs fs.SizeSuffix) error {
if cs < minChunkSize {
return errors.Errorf("%s is less than %s", cs, minChunkSize)
return fmt.Errorf("%s is less than %s", cs, minChunkSize)
}
return nil
}
@@ -300,7 +300,7 @@ func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error)
func checkUploadCutoff(cs fs.SizeSuffix) error {
if cs > maxUploadCutoff {
return errors.Errorf("%s is greater than %s", cs, maxUploadCutoff)
return fmt.Errorf("%s is greater than %s", cs, maxUploadCutoff)
}
return nil
}
@@ -329,11 +329,11 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
}
err = checkUploadChunkSize(opt.ChunkSize)
if err != nil {
return nil, errors.Wrap(err, "qingstor: chunk size")
return nil, fmt.Errorf("qingstor: chunk size: %w", err)
}
err = checkUploadCutoff(opt.UploadCutoff)
if err != nil {
return nil, errors.Wrap(err, "qingstor: upload cutoff")
return nil, fmt.Errorf("qingstor: upload cutoff: %w", err)
}
svc, err := qsServiceConnection(ctx, opt)
if err != nil {
@@ -884,7 +884,7 @@ func (f *Fs) cleanUpBucket(ctx context.Context, bucket string) (err error) {
var resp *qs.ListMultipartUploadsOutput
resp, err = bucketInit.ListMultipartUploads(&req)
if err != nil {
return errors.Wrap(err, "clean up bucket list multipart uploads")
return fmt.Errorf("clean up bucket list multipart uploads: %w", err)
}
for _, upload := range resp.Uploads {
if upload.Created != nil && upload.Key != nil && upload.UploadID != nil {
@@ -896,7 +896,7 @@ func (f *Fs) cleanUpBucket(ctx context.Context, bucket string) (err error) {
}
_, abortErr := bucketInit.AbortMultipartUpload(*upload.Key, &req)
if abortErr != nil {
err = errors.Wrapf(abortErr, "failed to remove multipart upload for %q", *upload.Key)
err = fmt.Errorf("failed to remove multipart upload for %q: %w", *upload.Key, abortErr)
fs.Errorf(f, "%v", err)
}
} else {

View File

@@ -8,13 +8,13 @@ package qingstor
import (
"bytes"
"crypto/md5"
"errors"
"fmt"
"hash"
"io"
"sort"
"sync"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/lib/atexit"
qs "github.com/yunify/qingstor-sdk-go/v3/service"
@@ -175,7 +175,7 @@ func (u *uploader) upload() error {
u.init()
if u.cfg.partSize < minMultiPartSize {
return errors.Errorf("part size must be at least %d bytes", minMultiPartSize)
return fmt.Errorf("part size must be at least %d bytes", minMultiPartSize)
}
// Do one read to determine if we have more than one part
@@ -184,7 +184,7 @@ func (u *uploader) upload() error {
fs.Debugf(u, "Uploading as single part object to QingStor")
return u.singlePartUpload(reader, u.readerPos)
} else if err != nil {
return errors.Errorf("read upload data failed: %s", err)
return fmt.Errorf("read upload data failed: %s", err)
}
fs.Debugf(u, "Uploading as multi-part object to QingStor")

File diff suppressed because it is too large Load Diff

View File

@@ -2,6 +2,7 @@ package seafile
import (
"context"
"errors"
"fmt"
"io"
"net/http"
@@ -13,7 +14,6 @@ import (
"time"
"github.com/coreos/go-semver/semver"
"github.com/pkg/errors"
"github.com/rclone/rclone/backend/seafile/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
@@ -171,14 +171,14 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
var err error
opt.Password, err = obscure.Reveal(opt.Password)
if err != nil {
return nil, errors.Wrap(err, "couldn't decrypt user password")
return nil, fmt.Errorf("couldn't decrypt user password: %w", err)
}
}
if opt.LibraryKey != "" {
var err error
opt.LibraryKey, err = obscure.Reveal(opt.LibraryKey)
if err != nil {
return nil, errors.Wrap(err, "couldn't decrypt library password")
return nil, fmt.Errorf("couldn't decrypt library password: %w", err)
}
}
@@ -282,7 +282,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
}
_, err := f.NewObject(ctx, remote)
if err != nil {
if errors.Cause(err) == fs.ErrorObjectNotFound || errors.Cause(err) == fs.ErrorNotAFile {
if errors.Is(err, fs.ErrorObjectNotFound) || errors.Is(err, fs.ErrorNotAFile) {
// File doesn't exist so return the original f
f.rootDirectory = rootDirectory
return f, nil
@@ -305,7 +305,7 @@ func Config(ctx context.Context, name string, m configmap.Mapper, config fs.Conf
u, err := url.Parse(serverURL)
if err != nil {
return nil, errors.Errorf("invalid server URL %s", serverURL)
return nil, fmt.Errorf("invalid server URL %s", serverURL)
}
is2faEnabled, _ := m.Get(config2FA)
@@ -886,7 +886,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
// 1- rename source
err = srcFs.renameDir(ctx, srcLibraryID, srcPath, tempName)
if err != nil {
return errors.Wrap(err, "Cannot rename source directory to a temporary name")
return fmt.Errorf("Cannot rename source directory to a temporary name: %w", err)
}
// 2- move source to destination
@@ -900,7 +900,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
// 3- rename destination back to source name
err = f.renameDir(ctx, dstLibraryID, path.Join(dstDir, tempName), dstName)
if err != nil {
return errors.Wrap(err, "Cannot rename temporary directory to destination name")
return fmt.Errorf("Cannot rename temporary directory to destination name: %w", err)
}
return nil

View File

@@ -3,6 +3,7 @@ package seafile
import (
"bytes"
"context"
"errors"
"fmt"
"io"
"io/ioutil"
@@ -11,7 +12,6 @@ import (
"path"
"strings"
"github.com/pkg/errors"
"github.com/rclone/rclone/backend/seafile/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/lib/readers"
@@ -61,7 +61,7 @@ func getAuthorizationToken(ctx context.Context, srv *rest.Client, user, password
_, err := srv.CallJSON(ctx, &opts, &request, &result)
if err != nil {
// This is only going to be http errors here
return "", errors.Wrap(err, "failed to authenticate")
return "", fmt.Errorf("failed to authenticate: %w", err)
}
if result.Errors != nil && len(result.Errors) > 0 {
return "", errors.New(strings.Join(result.Errors, ", "))
@@ -94,7 +94,7 @@ func (f *Fs) getServerInfo(ctx context.Context) (account *api.ServerInfo, err er
return nil, fs.ErrorPermissionDenied
}
}
return nil, errors.Wrap(err, "failed to get server info")
return nil, fmt.Errorf("failed to get server info: %w", err)
}
return &result, nil
}
@@ -120,7 +120,7 @@ func (f *Fs) getUserAccountInfo(ctx context.Context) (account *api.AccountInfo,
return nil, fs.ErrorPermissionDenied
}
}
return nil, errors.Wrap(err, "failed to get account info")
return nil, fmt.Errorf("failed to get account info: %w", err)
}
return &result, nil
}
@@ -147,7 +147,7 @@ func (f *Fs) getLibraries(ctx context.Context) ([]api.Library, error) {
return nil, fs.ErrorPermissionDenied
}
}
return nil, errors.Wrap(err, "failed to get libraries")
return nil, fmt.Errorf("failed to get libraries: %w", err)
}
return result, nil
}
@@ -178,7 +178,7 @@ func (f *Fs) createLibrary(ctx context.Context, libraryName, password string) (l
return nil, fs.ErrorPermissionDenied
}
}
return nil, errors.Wrap(err, "failed to create library")
return nil, fmt.Errorf("failed to create library: %w", err)
}
return result, nil
}
@@ -205,7 +205,7 @@ func (f *Fs) deleteLibrary(ctx context.Context, libraryID string) error {
return fs.ErrorPermissionDenied
}
}
return errors.Wrap(err, "failed to delete library")
return fmt.Errorf("failed to delete library: %w", err)
}
return nil
}
@@ -240,7 +240,7 @@ func (f *Fs) decryptLibrary(ctx context.Context, libraryID, password string) err
return nil
}
}
return errors.Wrap(err, "failed to decrypt library")
return fmt.Errorf("failed to decrypt library: %w", err)
}
return nil
}
@@ -286,7 +286,7 @@ func (f *Fs) getDirectoryEntriesAPIv21(ctx context.Context, libraryID, dirPath s
return nil, fs.ErrorPermissionDenied
}
}
return nil, errors.Wrap(err, "failed to get directory contents")
return nil, fmt.Errorf("failed to get directory contents: %w", err)
}
// Clean up encoded names
@@ -327,7 +327,7 @@ func (f *Fs) getDirectoryDetails(ctx context.Context, libraryID, dirPath string)
return nil, fs.ErrorDirNotFound
}
}
return nil, errors.Wrap(err, "failed to get directory details")
return nil, fmt.Errorf("failed to get directory details: %w", err)
}
result.Name = f.opt.Enc.ToStandardName(result.Name)
result.Path = f.opt.Enc.ToStandardPath(result.Path)
@@ -366,7 +366,7 @@ func (f *Fs) createDir(ctx context.Context, libraryID, dirPath string) error {
return fs.ErrorPermissionDenied
}
}
return errors.Wrap(err, "failed to create directory")
return fmt.Errorf("failed to create directory: %w", err)
}
return nil
}
@@ -406,7 +406,7 @@ func (f *Fs) renameDir(ctx context.Context, libraryID, dirPath, newName string)
return fs.ErrorPermissionDenied
}
}
return errors.Wrap(err, "failed to rename directory")
return fmt.Errorf("failed to rename directory: %w", err)
}
return nil
}
@@ -449,7 +449,7 @@ func (f *Fs) moveDir(ctx context.Context, srcLibraryID, srcDir, srcName, dstLibr
return fs.ErrorObjectNotFound
}
}
return errors.Wrap(err, fmt.Sprintf("failed to move directory '%s' from '%s' to '%s'", srcName, srcDir, dstPath))
return fmt.Errorf("failed to move directory '%s' from '%s' to '%s': %w", srcName, srcDir, dstPath, err)
}
return nil
@@ -482,7 +482,7 @@ func (f *Fs) deleteDir(ctx context.Context, libraryID, filePath string) error {
return fs.ErrorPermissionDenied
}
}
return errors.Wrap(err, "failed to delete directory")
return fmt.Errorf("failed to delete directory: %w", err)
}
return nil
}
@@ -516,7 +516,7 @@ func (f *Fs) getFileDetails(ctx context.Context, libraryID, filePath string) (*a
return nil, fs.ErrorPermissionDenied
}
}
return nil, errors.Wrap(err, "failed to get file details")
return nil, fmt.Errorf("failed to get file details: %w", err)
}
result.Name = f.opt.Enc.ToStandardName(result.Name)
result.Parent = f.opt.Enc.ToStandardPath(result.Parent)
@@ -542,7 +542,7 @@ func (f *Fs) deleteFile(ctx context.Context, libraryID, filePath string) error {
return f.shouldRetry(ctx, resp, err)
})
if err != nil {
return errors.Wrap(err, "failed to delete file")
return fmt.Errorf("failed to delete file: %w", err)
}
return nil
}
@@ -573,7 +573,7 @@ func (f *Fs) getDownloadLink(ctx context.Context, libraryID, filePath string) (s
return "", fs.ErrorObjectNotFound
}
}
return "", errors.Wrap(err, "failed to get download link")
return "", fmt.Errorf("failed to get download link: %w", err)
}
return result, nil
}
@@ -667,7 +667,7 @@ func (f *Fs) getUploadLink(ctx context.Context, libraryID string) (string, error
return "", fs.ErrorPermissionDenied
}
}
return "", errors.Wrap(err, "failed to get upload link")
return "", fmt.Errorf("failed to get upload link: %w", err)
}
return result, nil
}
@@ -684,7 +684,7 @@ func (f *Fs) upload(ctx context.Context, in io.Reader, uploadLink, filePath stri
}
formReader, contentType, _, err := rest.MultipartUpload(ctx, in, parameters, "file", f.opt.Enc.FromStandardName(filename))
if err != nil {
return nil, errors.Wrap(err, "failed to make multipart upload")
return nil, fmt.Errorf("failed to make multipart upload: %w", err)
}
opts := rest.Opts{
@@ -711,7 +711,7 @@ func (f *Fs) upload(ctx context.Context, in io.Reader, uploadLink, filePath stri
return nil, ErrorInternalDuringUpload
}
}
return nil, errors.Wrap(err, "failed to upload file")
return nil, fmt.Errorf("failed to upload file: %w", err)
}
if len(result) > 0 {
result[0].Parent = f.opt.Enc.ToStandardPath(result[0].Parent)
@@ -750,7 +750,7 @@ func (f *Fs) listShareLinks(ctx context.Context, libraryID, remote string) ([]ap
return nil, fs.ErrorObjectNotFound
}
}
return nil, errors.Wrap(err, "failed to list shared links")
return nil, fmt.Errorf("failed to list shared links: %w", err)
}
return result, nil
}
@@ -788,7 +788,7 @@ func (f *Fs) createShareLink(ctx context.Context, libraryID, remote string) (*ap
return nil, fs.ErrorObjectNotFound
}
}
return nil, errors.Wrap(err, "failed to create a shared link")
return nil, fmt.Errorf("failed to create a shared link: %w", err)
}
return result, nil
}
@@ -830,7 +830,7 @@ func (f *Fs) copyFile(ctx context.Context, srcLibraryID, srcPath, dstLibraryID,
return nil, fs.ErrorObjectNotFound
}
}
return nil, errors.Wrap(err, fmt.Sprintf("failed to copy file %s:'%s' to %s:'%s'", srcLibraryID, srcPath, dstLibraryID, dstPath))
return nil, fmt.Errorf("failed to copy file %s:'%s' to %s:'%s': %w", srcLibraryID, srcPath, dstLibraryID, dstPath, err)
}
return f.decodeFileInfo(result), nil
}
@@ -872,7 +872,7 @@ func (f *Fs) moveFile(ctx context.Context, srcLibraryID, srcPath, dstLibraryID,
return nil, fs.ErrorObjectNotFound
}
}
return nil, errors.Wrap(err, fmt.Sprintf("failed to move file %s:'%s' to %s:'%s'", srcLibraryID, srcPath, dstLibraryID, dstPath))
return nil, fmt.Errorf("failed to move file %s:'%s' to %s:'%s': %w", srcLibraryID, srcPath, dstLibraryID, dstPath, err)
}
return f.decodeFileInfo(result), nil
}
@@ -912,7 +912,7 @@ func (f *Fs) renameFile(ctx context.Context, libraryID, filePath, newname string
return nil, fs.ErrorObjectNotFound
}
}
return nil, errors.Wrap(err, fmt.Sprintf("failed to rename file '%s' to '%s'", filePath, newname))
return nil, fmt.Errorf("failed to rename file '%s' to '%s': %w", filePath, newname, err)
}
return f.decodeFileInfo(result), nil
}
@@ -949,7 +949,7 @@ func (f *Fs) emptyLibraryTrash(ctx context.Context, libraryID string) error {
return fs.ErrorObjectNotFound
}
}
return errors.Wrap(err, "failed empty the library trash")
return fmt.Errorf("failed empty the library trash: %w", err)
}
return nil
}
@@ -991,7 +991,7 @@ func (f *Fs) getDirectoryEntriesAPIv2(ctx context.Context, libraryID, dirPath st
return nil, fs.ErrorPermissionDenied
}
}
return nil, errors.Wrap(err, "failed to get directory contents")
return nil, fmt.Errorf("failed to get directory contents: %w", err)
}
// Clean up encoded names
@@ -1038,7 +1038,7 @@ func (f *Fs) copyFileAPIv2(ctx context.Context, srcLibraryID, srcPath, dstLibrar
return nil, fs.ErrorPermissionDenied
}
}
return nil, errors.Wrap(err, fmt.Sprintf("failed to copy file %s:'%s' to %s:'%s'", srcLibraryID, srcPath, dstLibraryID, dstPath))
return nil, fmt.Errorf("failed to copy file %s:'%s' to %s:'%s': %w", srcLibraryID, srcPath, dstLibraryID, dstPath, err)
}
err = rest.DecodeJSON(resp, &result)
if err != nil {
@@ -1090,7 +1090,7 @@ func (f *Fs) renameFileAPIv2(ctx context.Context, libraryID, filePath, newname s
return fs.ErrorObjectNotFound
}
}
return errors.Wrap(err, "failed to rename file")
return fmt.Errorf("failed to rename file: %w", err)
}
return nil
}

View File

@@ -8,6 +8,7 @@ package sftp
import (
"bytes"
"context"
"errors"
"fmt"
"io"
"io/ioutil"
@@ -20,7 +21,6 @@ import (
"sync/atomic"
"time"
"github.com/pkg/errors"
"github.com/pkg/sftp"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
@@ -42,7 +42,8 @@ const (
hashCommandNotSupported = "none"
minSleep = 100 * time.Millisecond
maxSleep = 2 * time.Second
decayConstant = 2 // bigger for slower decay, exponential
decayConstant = 2 // bigger for slower decay, exponential
keepAliveInterval = time.Minute // send keepalives every this long while running commands
)
var (
@@ -59,11 +60,13 @@ func init() {
Help: "SSH host to connect to.\n\nE.g. \"example.com\".",
Required: true,
}, {
Name: "user",
Help: "SSH username, leave blank for current username, " + currentUser + ".",
Name: "user",
Help: "SSH username.",
Default: currentUser,
}, {
Name: "port",
Help: "SSH port, leave blank to use default (22).",
Name: "port",
Help: "SSH port number.",
Default: 22,
}, {
Name: "pass",
Help: "SSH password, leave blank to use ssh-agent.",
@@ -152,11 +155,11 @@ different. This issue affects among others Synology NAS boxes.
Shared folders can be found in directories representing volumes
rclone sync /home/local/directory remote:/directory --ssh-path-override /volume2/directory
rclone sync /home/local/directory remote:/directory --sftp-path-override /volume2/directory
Home directory can be found in a shared folder called "home"
rclone sync /home/local/directory remote:/home/directory --ssh-path-override /volume1/homes/USER/directory`,
rclone sync /home/local/directory remote:/home/directory --sftp-path-override /volume1/homes/USER/directory`,
Advanced: true,
}, {
Name: "set_modtime",
@@ -339,6 +342,32 @@ func (c *conn) wait() {
c.err <- c.sshClient.Conn.Wait()
}
// Send a keepalive over the ssh connection
func (c *conn) sendKeepAlive() {
_, _, err := c.sshClient.SendRequest("keepalive@openssh.com", true, nil)
if err != nil {
fs.Debugf(nil, "Failed to send keep alive: %v", err)
}
}
// Send keepalives every interval over the ssh connection until done is closed
func (c *conn) sendKeepAlives(interval time.Duration) (done chan struct{}) {
done = make(chan struct{})
go func() {
t := time.NewTicker(interval)
defer t.Stop()
for {
select {
case <-t.C:
c.sendKeepAlive()
case <-done:
return
}
}
}()
return done
}
// Closes the connection
func (c *conn) close() error {
sftpErr := c.sftpClient.Close()
@@ -384,12 +413,12 @@ func (f *Fs) sftpConnection(ctx context.Context) (c *conn, err error) {
}
c.sshClient, err = f.dial(ctx, "tcp", f.opt.Host+":"+f.opt.Port, f.config)
if err != nil {
return nil, errors.Wrap(err, "couldn't connect SSH")
return nil, fmt.Errorf("couldn't connect SSH: %w", err)
}
c.sftpClient, err = f.newSftpClient(c.sshClient)
if err != nil {
_ = c.sshClient.Close()
return nil, errors.Wrap(err, "couldn't initialise SFTP")
return nil, fmt.Errorf("couldn't initialise SFTP: %w", err)
}
go c.wait()
return c, nil
@@ -468,16 +497,16 @@ func (f *Fs) putSftpConnection(pc **conn, err error) {
*pc = nil
if err != nil {
// work out if this is an expected error
underlyingErr := errors.Cause(err)
isRegularError := false
switch underlyingErr {
case os.ErrNotExist:
var statusErr *sftp.StatusError
var pathErr *os.PathError
switch {
case errors.Is(err, os.ErrNotExist):
isRegularError = true
case errors.As(err, &statusErr):
isRegularError = true
case errors.As(err, &pathErr):
isRegularError = true
default:
switch underlyingErr.(type) {
case *sftp.StatusError, *os.PathError:
isRegularError = true
}
}
// If not a regular SFTP error code then check the connection
if !isRegularError {
@@ -561,7 +590,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
if opt.KnownHostsFile != "" {
hostcallback, err := knownhosts.New(env.ShellExpand(opt.KnownHostsFile))
if err != nil {
return nil, errors.Wrap(err, "couldn't parse known_hosts_file")
return nil, fmt.Errorf("couldn't parse known_hosts_file: %w", err)
}
sshConfig.HostKeyCallback = hostcallback
}
@@ -579,20 +608,20 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
if (opt.Pass == "" && keyFile == "" && !opt.AskPassword && opt.KeyPem == "") || opt.KeyUseAgent {
sshAgentClient, _, err := sshagent.New()
if err != nil {
return nil, errors.Wrap(err, "couldn't connect to ssh-agent")
return nil, fmt.Errorf("couldn't connect to ssh-agent: %w", err)
}
signers, err := sshAgentClient.Signers()
if err != nil {
return nil, errors.Wrap(err, "couldn't read ssh agent signers")
return nil, fmt.Errorf("couldn't read ssh agent signers: %w", err)
}
if keyFile != "" {
pubBytes, err := ioutil.ReadFile(keyFile + ".pub")
if err != nil {
return nil, errors.Wrap(err, "failed to read public key file")
return nil, fmt.Errorf("failed to read public key file: %w", err)
}
pub, _, _, _, err := ssh.ParseAuthorizedKey(pubBytes)
if err != nil {
return nil, errors.Wrap(err, "failed to parse public key file")
return nil, fmt.Errorf("failed to parse public key file: %w", err)
}
pubM := pub.Marshal()
found := false
@@ -617,13 +646,13 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
if opt.KeyPem == "" {
key, err = ioutil.ReadFile(keyFile)
if err != nil {
return nil, errors.Wrap(err, "failed to read private key file")
return nil, fmt.Errorf("failed to read private key file: %w", err)
}
} else {
// wrap in quotes because the config is a coming as a literal without them.
opt.KeyPem, err = strconv.Unquote("\"" + opt.KeyPem + "\"")
if err != nil {
return nil, errors.Wrap(err, "pem key not formatted properly")
return nil, fmt.Errorf("pem key not formatted properly: %w", err)
}
key = []byte(opt.KeyPem)
}
@@ -641,19 +670,19 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
signer, err = ssh.ParsePrivateKeyWithPassphrase(key, []byte(clearpass))
}
if err != nil {
return nil, errors.Wrap(err, "failed to parse private key file")
return nil, fmt.Errorf("failed to parse private key file: %w", err)
}
// If a public key has been specified then use that
if pubkeyFile != "" {
certfile, err := ioutil.ReadFile(pubkeyFile)
if err != nil {
return nil, errors.Wrap(err, "unable to read cert file")
return nil, fmt.Errorf("unable to read cert file: %w", err)
}
pk, _, _, _, err := ssh.ParseAuthorizedKey(certfile)
if err != nil {
return nil, errors.Wrap(err, "unable to parse cert file")
return nil, fmt.Errorf("unable to parse cert file: %w", err)
}
// And the signer for this, which includes the private key signer
@@ -669,7 +698,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
}
pubsigner, err := ssh.NewCertSigner(cert, signer)
if err != nil {
return nil, errors.Wrap(err, "error generating cert signer")
return nil, fmt.Errorf("error generating cert signer: %w", err)
}
sshConfig.Auth = append(sshConfig.Auth, ssh.PublicKeys(pubsigner))
} else {
@@ -759,7 +788,7 @@ func NewFsWithConnection(ctx context.Context, f *Fs, name string, root string, m
// Make a connection and pool it to return errors early
c, err := f.getSftpConnection(ctx)
if err != nil {
return nil, errors.Wrap(err, "NewFs")
return nil, fmt.Errorf("NewFs: %w", err)
}
cwd, err := c.sftpClient.Getwd()
f.putSftpConnection(&c, nil)
@@ -840,7 +869,7 @@ func (f *Fs) dirExists(ctx context.Context, dir string) (bool, error) {
}
c, err := f.getSftpConnection(ctx)
if err != nil {
return false, errors.Wrap(err, "dirExists")
return false, fmt.Errorf("dirExists: %w", err)
}
info, err := c.sftpClient.Stat(dir)
f.putSftpConnection(&c, err)
@@ -848,7 +877,7 @@ func (f *Fs) dirExists(ctx context.Context, dir string) (bool, error) {
if os.IsNotExist(err) {
return false, nil
}
return false, errors.Wrap(err, "dirExists stat failed")
return false, fmt.Errorf("dirExists stat failed: %w", err)
}
if !info.IsDir() {
return false, fs.ErrorIsFile
@@ -869,7 +898,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
root := path.Join(f.absRoot, dir)
ok, err := f.dirExists(ctx, root)
if err != nil {
return nil, errors.Wrap(err, "List failed")
return nil, fmt.Errorf("List failed: %w", err)
}
if !ok {
return nil, fs.ErrorDirNotFound
@@ -880,12 +909,12 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
}
c, err := f.getSftpConnection(ctx)
if err != nil {
return nil, errors.Wrap(err, "List")
return nil, fmt.Errorf("List: %w", err)
}
infos, err := c.sftpClient.ReadDir(sftpDir)
f.putSftpConnection(&c, err)
if err != nil {
return nil, errors.Wrapf(err, "error listing %q", dir)
return nil, fmt.Errorf("error listing %q: %w", dir, err)
}
for _, info := range infos {
remote := path.Join(dir, info.Name())
@@ -924,7 +953,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
err := f.mkParentDir(ctx, src.Remote())
if err != nil {
return nil, errors.Wrap(err, "Put mkParentDir failed")
return nil, fmt.Errorf("Put mkParentDir failed: %w", err)
}
// Temporary object under construction
o := &Object{
@@ -959,7 +988,7 @@ func (f *Fs) mkdir(ctx context.Context, dirPath string) error {
}
ok, err := f.dirExists(ctx, dirPath)
if err != nil {
return errors.Wrap(err, "mkdir dirExists failed")
return fmt.Errorf("mkdir dirExists failed: %w", err)
}
if ok {
return nil
@@ -971,12 +1000,12 @@ func (f *Fs) mkdir(ctx context.Context, dirPath string) error {
}
c, err := f.getSftpConnection(ctx)
if err != nil {
return errors.Wrap(err, "mkdir")
return fmt.Errorf("mkdir: %w", err)
}
err = c.sftpClient.Mkdir(dirPath)
f.putSftpConnection(&c, err)
if err != nil {
return errors.Wrapf(err, "mkdir %q failed", dirPath)
return fmt.Errorf("mkdir %q failed: %w", dirPath, err)
}
return nil
}
@@ -993,7 +1022,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
// delete recursively with RemoveDirectory
entries, err := f.List(ctx, dir)
if err != nil {
return errors.Wrap(err, "Rmdir")
return fmt.Errorf("Rmdir: %w", err)
}
if len(entries) != 0 {
return fs.ErrorDirectoryNotEmpty
@@ -1002,7 +1031,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
root := path.Join(f.absRoot, dir)
c, err := f.getSftpConnection(ctx)
if err != nil {
return errors.Wrap(err, "Rmdir")
return fmt.Errorf("Rmdir: %w", err)
}
err = c.sftpClient.RemoveDirectory(root)
f.putSftpConnection(&c, err)
@@ -1018,11 +1047,11 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
}
err := f.mkParentDir(ctx, remote)
if err != nil {
return nil, errors.Wrap(err, "Move mkParentDir failed")
return nil, fmt.Errorf("Move mkParentDir failed: %w", err)
}
c, err := f.getSftpConnection(ctx)
if err != nil {
return nil, errors.Wrap(err, "Move")
return nil, fmt.Errorf("Move: %w", err)
}
err = c.sftpClient.Rename(
srcObj.path(),
@@ -1030,11 +1059,11 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
)
f.putSftpConnection(&c, err)
if err != nil {
return nil, errors.Wrap(err, "Move Rename failed")
return nil, fmt.Errorf("Move Rename failed: %w", err)
}
dstObj, err := f.NewObject(ctx, remote)
if err != nil {
return nil, errors.Wrap(err, "Move NewObject failed")
return nil, fmt.Errorf("Move NewObject failed: %w", err)
}
return dstObj, nil
}
@@ -1059,7 +1088,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
// Check if destination exists
ok, err := f.dirExists(ctx, dstPath)
if err != nil {
return errors.Wrap(err, "DirMove dirExists dst failed")
return fmt.Errorf("DirMove dirExists dst failed: %w", err)
}
if ok {
return fs.ErrorDirExists
@@ -1068,13 +1097,13 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
// Make sure the parent directory exists
err = f.mkdir(ctx, path.Dir(dstPath))
if err != nil {
return errors.Wrap(err, "DirMove mkParentDir dst failed")
return fmt.Errorf("DirMove mkParentDir dst failed: %w", err)
}
// Do the move
c, err := f.getSftpConnection(ctx)
if err != nil {
return errors.Wrap(err, "DirMove")
return fmt.Errorf("DirMove: %w", err)
}
err = c.sftpClient.Rename(
srcPath,
@@ -1082,7 +1111,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
)
f.putSftpConnection(&c, err)
if err != nil {
return errors.Wrapf(err, "DirMove Rename(%q,%q) failed", srcPath, dstPath)
return fmt.Errorf("DirMove Rename(%q,%q) failed: %w", srcPath, dstPath, err)
}
return nil
}
@@ -1094,13 +1123,16 @@ func (f *Fs) run(ctx context.Context, cmd string) ([]byte, error) {
c, err := f.getSftpConnection(ctx)
if err != nil {
return nil, errors.Wrap(err, "run: get SFTP connection")
return nil, fmt.Errorf("run: get SFTP connection: %w", err)
}
defer f.putSftpConnection(&c, err)
// Send keepalives while the connection is open
defer close(c.sendKeepAlives(keepAliveInterval))
session, err := c.sshClient.NewSession()
if err != nil {
return nil, errors.Wrap(err, "run: get SFTP session")
return nil, fmt.Errorf("run: get SFTP session: %w", err)
}
defer func() {
_ = session.Close()
@@ -1110,10 +1142,12 @@ func (f *Fs) run(ctx context.Context, cmd string) ([]byte, error) {
session.Stdout = &stdout
session.Stderr = &stderr
fs.Debugf(f, "Running remote command: %s", cmd)
err = session.Run(cmd)
if err != nil {
return nil, errors.Wrapf(err, "failed to run %q: %s", cmd, stderr.Bytes())
return nil, fmt.Errorf("failed to run %q: %s: %w", cmd, bytes.TrimSpace(stderr.Bytes()), err)
}
fs.Debugf(f, "Remote command result: %s", bytes.TrimSpace(stdout.Bytes()))
return stdout.Bytes(), nil
}
@@ -1155,8 +1189,8 @@ func (f *Fs) Hashes() hash.Set {
}
changed := false
md5Works := checkHash([]string{"md5sum", "md5 -r"}, "d41d8cd98f00b204e9800998ecf8427e", &f.opt.Md5sumCommand, &changed)
sha1Works := checkHash([]string{"sha1sum", "sha1 -r"}, "da39a3ee5e6b4b0d3255bfef95601890afd80709", &f.opt.Sha1sumCommand, &changed)
md5Works := checkHash([]string{"md5sum", "md5 -r", "rclone md5sum"}, "d41d8cd98f00b204e9800998ecf8427e", &f.opt.Md5sumCommand, &changed)
sha1Works := checkHash([]string{"sha1sum", "sha1 -r", "rclone sha1sum"}, "da39a3ee5e6b4b0d3255bfef95601890afd80709", &f.opt.Sha1sumCommand, &changed)
if changed {
f.m.Set("md5sum_command", f.opt.Md5sumCommand)
@@ -1186,7 +1220,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
}
stdout, err := f.run(ctx, "df -k "+escapedPath)
if err != nil {
return nil, errors.Wrap(err, "your remote may not support About")
return nil, fmt.Errorf("your remote may not support About: %w", err)
}
usageTotal, usageUsed, usageAvail := parseUsage(stdout)
@@ -1230,8 +1264,6 @@ func (o *Object) Remote() string {
// Hash returns the selected checksum of the file
// If no checksum is available it returns ""
func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
o.fs.addSession() // Show session in use
defer o.fs.removeSession()
if o.fs.opt.DisableHashCheck {
return "", nil
}
@@ -1255,36 +1287,16 @@ func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
return "", hash.ErrUnsupported
}
c, err := o.fs.getSftpConnection(ctx)
if err != nil {
return "", errors.Wrap(err, "Hash get SFTP connection")
}
session, err := c.sshClient.NewSession()
o.fs.putSftpConnection(&c, err)
if err != nil {
return "", errors.Wrap(err, "Hash put SFTP connection")
}
var stdout, stderr bytes.Buffer
session.Stdout = &stdout
session.Stderr = &stderr
escapedPath := shellEscape(o.path())
if o.fs.opt.PathOverride != "" {
escapedPath = shellEscape(path.Join(o.fs.opt.PathOverride, o.remote))
}
err = session.Run(hashCmd + " " + escapedPath)
fs.Debugf(nil, "sftp cmd = %s", escapedPath)
b, err := o.fs.run(ctx, hashCmd+" "+escapedPath)
if err != nil {
_ = session.Close()
fs.Debugf(o, "Failed to calculate %v hash: %v (%s)", r, err, bytes.TrimSpace(stderr.Bytes()))
return "", nil
return "", fmt.Errorf("failed to calculate %v hash: %w", r, err)
}
_ = session.Close()
b := stdout.Bytes()
fs.Debugf(nil, "sftp output = %q", b)
str := parseHash(b)
fs.Debugf(nil, "sftp hash = %q", str)
if r == hash.MD5 {
o.md5sum = &str
} else if r == hash.SHA1 {
@@ -1366,7 +1378,7 @@ func (o *Object) setMetadata(info os.FileInfo) {
func (f *Fs) stat(ctx context.Context, remote string) (info os.FileInfo, err error) {
c, err := f.getSftpConnection(ctx)
if err != nil {
return nil, errors.Wrap(err, "stat")
return nil, fmt.Errorf("stat: %w", err)
}
absPath := path.Join(f.absRoot, remote)
info, err = c.sftpClient.Stat(absPath)
@@ -1381,7 +1393,7 @@ func (o *Object) stat(ctx context.Context) error {
if os.IsNotExist(err) {
return fs.ErrorObjectNotFound
}
return errors.Wrap(err, "stat failed")
return fmt.Errorf("stat failed: %w", err)
}
if info.IsDir() {
return fs.ErrorIsDir
@@ -1399,16 +1411,16 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
}
c, err := o.fs.getSftpConnection(ctx)
if err != nil {
return errors.Wrap(err, "SetModTime")
return fmt.Errorf("SetModTime: %w", err)
}
err = c.sftpClient.Chtimes(o.path(), modTime, modTime)
o.fs.putSftpConnection(&c, err)
if err != nil {
return errors.Wrap(err, "SetModTime failed")
return fmt.Errorf("SetModTime failed: %w", err)
}
err = o.stat(ctx)
if err != nil {
return errors.Wrap(err, "SetModTime stat failed")
return fmt.Errorf("SetModTime stat failed: %w", err)
}
return nil
}
@@ -1487,17 +1499,17 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
}
c, err := o.fs.getSftpConnection(ctx)
if err != nil {
return nil, errors.Wrap(err, "Open")
return nil, fmt.Errorf("Open: %w", err)
}
sftpFile, err := c.sftpClient.Open(o.path())
o.fs.putSftpConnection(&c, err)
if err != nil {
return nil, errors.Wrap(err, "Open failed")
return nil, fmt.Errorf("Open failed: %w", err)
}
if offset > 0 {
off, err := sftpFile.Seek(offset, io.SeekStart)
if err != nil || off != offset {
return nil, errors.Wrap(err, "Open Seek failed")
return nil, fmt.Errorf("Open Seek failed: %w", err)
}
}
in = readers.NewLimitedReadCloser(o.fs.newObjectReader(sftpFile), limit)
@@ -1526,12 +1538,12 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
o.sha1sum = nil
c, err := o.fs.getSftpConnection(ctx)
if err != nil {
return errors.Wrap(err, "Update")
return fmt.Errorf("Update: %w", err)
}
file, err := c.sftpClient.OpenFile(o.path(), os.O_WRONLY|os.O_CREATE|os.O_TRUNC)
o.fs.putSftpConnection(&c, err)
if err != nil {
return errors.Wrap(err, "Update Create failed")
return fmt.Errorf("Update Create failed: %w", err)
}
// remove the file if upload failed
remove := func() {
@@ -1551,18 +1563,18 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
_, err = file.ReadFrom(&sizeReader{Reader: in, size: src.Size()})
if err != nil {
remove()
return errors.Wrap(err, "Update ReadFrom failed")
return fmt.Errorf("Update ReadFrom failed: %w", err)
}
err = file.Close()
if err != nil {
remove()
return errors.Wrap(err, "Update Close failed")
return fmt.Errorf("Update Close failed: %w", err)
}
// Set the mod time - this stats the object if o.fs.opt.SetModTime == true
err = o.SetModTime(ctx, src.ModTime(ctx))
if err != nil {
return errors.Wrap(err, "Update SetModTime failed")
return fmt.Errorf("Update SetModTime failed: %w", err)
}
// Stat the file after the upload to read its stats back if o.fs.opt.SetModTime == false
@@ -1576,7 +1588,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
o.size = src.Size()
o.mode = os.FileMode(0666) // regular file
} else if err != nil {
return errors.Wrap(err, "Update stat failed")
return fmt.Errorf("Update stat failed: %w", err)
}
}
@@ -1587,7 +1599,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
func (o *Object) Remove(ctx context.Context) error {
c, err := o.fs.getSftpConnection(ctx)
if err != nil {
return errors.Wrap(err, "Remove")
return fmt.Errorf("Remove: %w", err)
}
err = c.sftpClient.Remove(o.path())
o.fs.putSftpConnection(&c, err)

View File

@@ -2,10 +2,9 @@
package api
import (
"errors"
"fmt"
"time"
"github.com/pkg/errors"
)
// ListRequestSelect should be used in $select for Items/Children
@@ -122,7 +121,7 @@ type UploadFinishResponse struct {
// ID returns the ID of the first response if available
func (finish *UploadFinishResponse) ID() (string, error) {
if finish.Error {
return "", errors.Errorf("upload failed: %s (%d)", finish.ErrorMessage, finish.ErrorCode)
return "", fmt.Errorf("upload failed: %s (%d)", finish.ErrorMessage, finish.ErrorCode)
}
if len(finish.Value) == 0 {
return "", errors.New("upload failed: no results returned")

View File

@@ -74,6 +74,7 @@ Which is control chars + [' ', '*', '.', '/', ':', '<', '>', '?', '|']
import (
"context"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
@@ -83,7 +84,6 @@ import (
"strings"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/backend/sharefile/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
@@ -144,7 +144,7 @@ func init() {
subdomain := auth.Form.Get("subdomain")
apicp := auth.Form.Get("apicp")
if subdomain == "" || apicp == "" {
return errors.Errorf("subdomain or apicp not found in response: %+v", auth.Form)
return fmt.Errorf("subdomain or apicp not found in response: %+v", auth.Form)
}
endpoint := "https://" + subdomain + "." + apicp
m.Set("endpoint", endpoint)
@@ -334,7 +334,7 @@ func (f *Fs) readMetaDataForIDPath(ctx context.Context, id, path string, directo
}
return nil, fs.ErrorDirNotFound
}
return nil, errors.Wrap(err, "couldn't find item")
return nil, fmt.Errorf("couldn't find item: %w", err)
}
if directoriesOnly && item.Type != api.ItemTypeFolder {
return nil, fs.ErrorIsFile
@@ -386,10 +386,10 @@ func errorHandler(resp *http.Response) error {
func checkUploadChunkSize(cs fs.SizeSuffix) error {
if cs < minChunkSize {
return errors.Errorf("ChunkSize: %s is less than %s", cs, minChunkSize)
return fmt.Errorf("ChunkSize: %s is less than %s", cs, minChunkSize)
}
if cs > maxChunkSize {
return errors.Errorf("ChunkSize: %s is greater than %s", cs, maxChunkSize)
return fmt.Errorf("ChunkSize: %s is greater than %s", cs, maxChunkSize)
}
return nil
}
@@ -444,7 +444,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
var ts *oauthutil.TokenSource
client, ts, err = oauthutil.NewClient(ctx, name, m, oauthConfig)
if err != nil {
return nil, errors.Wrap(err, "failed to configure sharefile")
return nil, fmt.Errorf("failed to configure sharefile: %w", err)
}
ci := fs.GetConfig(ctx)
@@ -477,23 +477,23 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
const serverTimezone = "America/New_York"
timezone, err := tzdata.Open(serverTimezone)
if err != nil {
return nil, errors.Wrap(err, "failed to open timezone db")
return nil, fmt.Errorf("failed to open timezone db: %w", err)
}
tzdata, err := ioutil.ReadAll(timezone)
if err != nil {
return nil, errors.Wrap(err, "failed to read timezone")
return nil, fmt.Errorf("failed to read timezone: %w", err)
}
_ = timezone.Close()
f.location, err = time.LoadLocationFromTZData(serverTimezone, tzdata)
if err != nil {
return nil, errors.Wrap(err, "failed to load location from timezone")
return nil, fmt.Errorf("failed to load location from timezone: %w", err)
}
// Find ID of user's root folder
if opt.RootFolderID == "" {
item, err := f.readMetaDataForID(ctx, opt.RootFolderID, true, false)
if err != nil {
return nil, errors.Wrap(err, "couldn't find root ID")
return nil, fmt.Errorf("couldn't find root ID: %w", err)
}
f.rootID = item.ID
} else {
@@ -639,7 +639,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
return shouldRetry(ctx, resp, err)
})
if err != nil {
return "", errors.Wrap(err, "CreateDir")
return "", fmt.Errorf("CreateDir: %w", err)
}
return info.ID, nil
}
@@ -671,7 +671,7 @@ func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, fi
return shouldRetry(ctx, resp, err)
})
if err != nil {
return found, errors.Wrap(err, "couldn't list files")
return found, fmt.Errorf("couldn't list files: %w", err)
}
for i := range result.Value {
item := &result.Value[i]
@@ -825,7 +825,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
return true
})
if err != nil {
return errors.Wrap(err, "purgeCheck")
return fmt.Errorf("purgeCheck: %w", err)
}
if found {
return fs.ErrorDirectoryNotEmpty
@@ -900,7 +900,7 @@ func (f *Fs) updateItem(ctx context.Context, id, leaf, directoryID string, modTi
// Parse it back into a time
newModTime, err := time.Parse(time.RFC3339Nano, isoTime)
if err != nil {
return nil, errors.Wrap(err, "updateItem: time parse")
return nil, fmt.Errorf("updateItem: time parse: %w", err)
}
modTime = &newModTime
}
@@ -934,7 +934,7 @@ func (f *Fs) move(ctx context.Context, isFile bool, id, oldLeaf, newLeaf, oldDir
// To demonstrate bug
// item, err = f.updateItem(ctx, id, newLeaf, newDirectoryID, nil)
// if err != nil {
// return nil, errors.Wrap(err, "Move rename leaf")
// return nil, fmt.Errorf("Move rename leaf: %w", err)
// }
// return item, nil
doRenameLeaf := oldLeaf != newLeaf
@@ -947,7 +947,7 @@ func (f *Fs) move(ctx context.Context, isFile bool, id, oldLeaf, newLeaf, oldDir
tmpLeaf := newLeaf + "." + random.String(8)
item, err = f.updateItem(ctx, id, tmpLeaf, "", nil)
if err != nil {
return nil, errors.Wrap(err, "Move rename leaf")
return nil, fmt.Errorf("Move rename leaf: %w", err)
}
}
@@ -956,7 +956,7 @@ func (f *Fs) move(ctx context.Context, isFile bool, id, oldLeaf, newLeaf, oldDir
if doMove {
item, err = f.updateItem(ctx, id, "", newDirectoryID, nil)
if err != nil {
return nil, errors.Wrap(err, "Move directory")
return nil, fmt.Errorf("Move directory: %w", err)
}
}
@@ -964,7 +964,7 @@ func (f *Fs) move(ctx context.Context, isFile bool, id, oldLeaf, newLeaf, oldDir
if doRenameLeaf {
item, err = f.updateItem(ctx, id, newLeaf, "", nil)
if err != nil {
return nil, errors.Wrap(err, "Move rename leaf")
return nil, fmt.Errorf("Move rename leaf: %w", err)
}
}
@@ -1079,7 +1079,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dst fs.Obj
sameName := strings.ToLower(srcLeaf) == strings.ToLower(dstLeaf)
if sameName && srcParentID == dstParentID {
return nil, errors.Errorf("copy: can't copy to a file in the same directory whose name only differs in case: %q vs %q", srcLeaf, dstLeaf)
return nil, fmt.Errorf("copy: can't copy to a file in the same directory whose name only differs in case: %q vs %q", srcLeaf, dstLeaf)
}
// Discover whether we can just copy directly or not
@@ -1095,7 +1095,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dst fs.Obj
if err == fs.ErrorObjectNotFound || err == fs.ErrorDirNotFound {
directCopy = true
} else if err != nil {
return nil, errors.Wrap(err, "copy: failed to examine destination dir")
return nil, fmt.Errorf("copy: failed to examine destination dir: %w", err)
} else {
// otherwise need to copy via a temporary directory
}
@@ -1109,17 +1109,17 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dst fs.Obj
tmpDir := "rclone-temp-dir-" + random.String(16)
err = f.Mkdir(ctx, tmpDir)
if err != nil {
return nil, errors.Wrap(err, "copy: failed to make temp dir")
return nil, fmt.Errorf("copy: failed to make temp dir: %w", err)
}
defer func() {
rmdirErr := f.Rmdir(ctx, tmpDir)
if rmdirErr != nil && err == nil {
err = errors.Wrap(rmdirErr, "copy: failed to remove temp dir")
err = fmt.Errorf("copy: failed to remove temp dir: %w", rmdirErr)
}
}()
tmpDirID, err := f.dirCache.FindDir(ctx, tmpDir, false)
if err != nil {
return nil, errors.Wrap(err, "copy: failed to find temp dir")
return nil, fmt.Errorf("copy: failed to find temp dir: %w", err)
}
copyTargetDirID = tmpDirID
}
@@ -1221,7 +1221,7 @@ func (o *Object) Size() int64 {
// setMetaData sets the metadata from info
func (o *Object) setMetaData(info *api.Item) (err error) {
if info.Type != api.ItemTypeFile {
return errors.Wrapf(fs.ErrorNotAFile, "%q is %q", o.remote, info.Type)
return fmt.Errorf("%q is %q: %w", o.remote, info.Type, fs.ErrorNotAFile)
}
o.hasMetaData = true
o.size = info.Size
@@ -1302,7 +1302,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "open: fetch download specification")
return nil, fmt.Errorf("open: fetch download specification: %w", err)
}
fs.FixRangeOption(options, o.size)
@@ -1317,7 +1317,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "open")
return nil, fmt.Errorf("open: %w", err)
}
return resp.Body, err
}
@@ -1373,7 +1373,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return shouldRetry(ctx, resp, err)
})
if err != nil {
return errors.Wrap(err, "upload get specification")
return fmt.Errorf("upload get specification: %w", err)
}
// If file is large then upload in parts
@@ -1398,7 +1398,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return shouldRetry(ctx, resp, err)
})
if err != nil {
return errors.Wrap(err, "upload file")
return fmt.Errorf("upload file: %w", err)
}
return o.checkUploadResponse(ctx, &finish)
}
@@ -1434,7 +1434,7 @@ func (f *Fs) remove(ctx context.Context, id string) (err error) {
return shouldRetry(ctx, resp, err)
})
if err != nil {
return errors.Wrap(err, "remove")
return fmt.Errorf("remove: %w", err)
}
return nil
}
@@ -1443,7 +1443,7 @@ func (f *Fs) remove(ctx context.Context, id string) (err error) {
func (o *Object) Remove(ctx context.Context) error {
err := o.readMetaData(ctx)
if err != nil {
return errors.Wrap(err, "Remove: Failed to read metadata")
return fmt.Errorf("Remove: Failed to read metadata: %w", err)
}
return o.fs.remove(ctx, o.id)
}

View File

@@ -15,7 +15,6 @@ import (
"strings"
"sync"
"github.com/pkg/errors"
"github.com/rclone/rclone/backend/sharefile/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
@@ -55,7 +54,7 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
case "threaded":
streamed = false
default:
return nil, errors.Errorf("can't use method %q with newLargeUpload", info.Method)
return nil, fmt.Errorf("can't use method %q with newLargeUpload", info.Method)
}
threads := f.ci.Transfers
@@ -87,7 +86,7 @@ func (up *largeUpload) parseUploadFinishResponse(respBody []byte) (err error) {
err = json.Unmarshal(respBody, &finish)
if err != nil {
// Sometimes the unmarshal fails in which case return the body
return errors.Errorf("upload: bad response: %q", bytes.TrimSpace(respBody))
return fmt.Errorf("upload: bad response: %q", bytes.TrimSpace(respBody))
}
return up.o.checkUploadResponse(up.ctx, &finish)
}
@@ -240,7 +239,7 @@ outer:
// check size read is correct
if eof && err == nil && up.size >= 0 && up.size != offset {
err = errors.Errorf("upload: short read: read %d bytes expected %d", up.size, offset)
err = fmt.Errorf("upload: short read: read %d bytes expected %d", up.size, offset)
}
// read any errors

View File

@@ -3,6 +3,7 @@ package sia
import (
"context"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
@@ -11,18 +12,16 @@ import (
"strings"
"time"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/lib/encoder"
"github.com/pkg/errors"
"github.com/rclone/rclone/backend/sia/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/rest"
)
@@ -460,7 +459,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
if opt.APIPassword != "" {
opt.APIPassword, err = obscure.Reveal(opt.APIPassword)
if err != nil {
return nil, errors.Wrap(err, "couldn't decrypt API password")
return nil, fmt.Errorf("couldn't decrypt API password: %w", err)
}
f.srv.SetUserPass("", opt.APIPassword)
}
@@ -474,7 +473,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
}
_, err := f.NewObject(ctx, remote)
if err != nil {
if errors.Cause(err) == fs.ErrorObjectNotFound || errors.Cause(err) == fs.ErrorNotAFile {
if errors.Is(err, fs.ErrorObjectNotFound) || errors.Is(err, fs.ErrorNotAFile) {
// File doesn't exist so return old f
f.root = root
return f, nil
@@ -493,7 +492,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
func errorHandler(resp *http.Response) error {
body, err := rest.ReadBody(resp)
if err != nil {
return errors.Wrap(err, "error when trying to read error body")
return fmt.Errorf("error when trying to read error body: %w", err)
}
// Decode error response
errResponse := new(api.Error)

View File

@@ -1,18 +1,18 @@
//go:build !plan9
// +build !plan9
// Package tardigrade provides an interface to Tardigrade decentralized object storage.
package tardigrade
// Package storj provides an interface to Storj decentralized object storage.
package storj
import (
"context"
"errors"
"fmt"
"io"
"path"
"strings"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
@@ -31,16 +31,17 @@ const (
)
var satMap = map[string]string{
"us-central-1.tardigrade.io": "12EayRS2V1kEsWESU9QMRseFhdxYxKicsiFmxrsLZHeLUtdps3S@us-central-1.tardigrade.io:7777",
"europe-west-1.tardigrade.io": "12L9ZFwhzVpuEKMUNUqkaTLGzwY9G24tbiigLiXpmZWKwmcNDDs@europe-west-1.tardigrade.io:7777",
"asia-east-1.tardigrade.io": "121RTSDpyNZVcEU84Ticf2L1ntiuUimbWgfATz21tuvgk3vzoA6@asia-east-1.tardigrade.io:7777",
"us-central-1.storj.io": "12EayRS2V1kEsWESU9QMRseFhdxYxKicsiFmxrsLZHeLUtdps3S@us-central-1.tardigrade.io:7777",
"europe-west-1.storj.io": "12L9ZFwhzVpuEKMUNUqkaTLGzwY9G24tbiigLiXpmZWKwmcNDDs@europe-west-1.tardigrade.io:7777",
"asia-east-1.storj.io": "121RTSDpyNZVcEU84Ticf2L1ntiuUimbWgfATz21tuvgk3vzoA6@asia-east-1.tardigrade.io:7777",
}
// Register with Fs
func init() {
fs.Register(&fs.RegInfo{
Name: "tardigrade",
Description: "Tardigrade Decentralized Cloud Storage",
Name: "storj",
Description: "Storj Decentralized Cloud Storage",
Aliases: []string{"tardigrade"},
NewFs: NewFs,
Config: func(ctx context.Context, name string, m configmap.Mapper, configIn fs.ConfigIn) (*fs.ConfigOut, error) {
provider, _ := m.Get(fs.ConfigProvider)
@@ -64,12 +65,12 @@ func init() {
access, err := uplink.RequestAccessWithPassphrase(context.TODO(), satellite, apiKey, passphrase)
if err != nil {
return nil, errors.Wrap(err, "couldn't create access grant")
return nil, fmt.Errorf("couldn't create access grant: %w", err)
}
serializedAccess, err := access.Serialize()
if err != nil {
return nil, errors.Wrap(err, "couldn't serialize access grant")
return nil, fmt.Errorf("couldn't serialize access grant: %w", err)
}
m.Set("satellite_address", satellite)
m.Set("access_grant", serializedAccess)
@@ -78,16 +79,15 @@ func init() {
config.FileDeleteKey(name, "api_key")
config.FileDeleteKey(name, "passphrase")
} else {
return nil, errors.Errorf("invalid provider type: %s", provider)
return nil, fmt.Errorf("invalid provider type: %s", provider)
}
return nil, nil
},
Options: []fs.Option{
{
Name: fs.ConfigProvider,
Help: "Choose an authentication method.",
Required: true,
Default: existingProvider,
Name: fs.ConfigProvider,
Help: "Choose an authentication method.",
Default: existingProvider,
Examples: []fs.OptionExample{{
Value: "existing",
Help: "Use an existing access grant.",
@@ -99,23 +99,21 @@ func init() {
{
Name: "access_grant",
Help: "Access grant.",
Required: false,
Provider: "existing",
},
{
Name: "satellite_address",
Help: "Satellite address.\n\nCustom satellite address should match the format: `<nodeid>@<address>:<port>`.",
Required: false,
Provider: newProvider,
Default: "us-central-1.tardigrade.io",
Default: "us-central-1.storj.io",
Examples: []fs.OptionExample{{
Value: "us-central-1.tardigrade.io",
Value: "us-central-1.storj.io",
Help: "US Central 1",
}, {
Value: "europe-west-1.tardigrade.io",
Value: "europe-west-1.storj.io",
Help: "Europe West 1",
}, {
Value: "asia-east-1.tardigrade.io",
Value: "asia-east-1.storj.io",
Help: "Asia East 1",
},
},
@@ -123,13 +121,11 @@ func init() {
{
Name: "api_key",
Help: "API key.",
Required: false,
Provider: newProvider,
},
{
Name: "passphrase",
Help: "Encryption passphrase.\n\nTo access existing objects enter passphrase used for uploading.",
Required: false,
Provider: newProvider,
},
},
@@ -145,7 +141,7 @@ type Options struct {
Passphrase string `config:"passphrase"`
}
// Fs represents a remote to Tardigrade
// Fs represents a remote to Storj
type Fs struct {
name string // the name of the remote
root string // root of the filesystem
@@ -163,11 +159,12 @@ var (
_ fs.Fs = &Fs{}
_ fs.ListRer = &Fs{}
_ fs.PutStreamer = &Fs{}
_ fs.Mover = &Fs{}
)
// NewFs creates a filesystem backed by Tardigrade.
// NewFs creates a filesystem backed by Storj.
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (_ fs.Fs, err error) {
// Setup filesystem and connection to Tardigrade
// Setup filesystem and connection to Storj
root = norm.NFC.String(root)
root = strings.Trim(root, "/")
@@ -188,24 +185,24 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (_ fs.Fs,
if f.opts.Access != "" {
access, err = uplink.ParseAccess(f.opts.Access)
if err != nil {
return nil, errors.Wrap(err, "tardigrade: access")
return nil, fmt.Errorf("storj: access: %w", err)
}
}
if access == nil && f.opts.SatelliteAddress != "" && f.opts.APIKey != "" && f.opts.Passphrase != "" {
access, err = uplink.RequestAccessWithPassphrase(ctx, f.opts.SatelliteAddress, f.opts.APIKey, f.opts.Passphrase)
if err != nil {
return nil, errors.Wrap(err, "tardigrade: access")
return nil, fmt.Errorf("storj: access: %w", err)
}
serializedAccess, err := access.Serialize()
if err != nil {
return nil, errors.Wrap(err, "tardigrade: access")
return nil, fmt.Errorf("storj: access: %w", err)
}
err = config.SetValueAndSave(f.name, "access_grant", serializedAccess)
if err != nil {
return nil, errors.Wrap(err, "tardigrade: access")
return nil, fmt.Errorf("storj: access: %w", err)
}
}
@@ -237,7 +234,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (_ fs.Fs,
if bucketName != "" && bucketPath != "" {
_, err = project.StatBucket(ctx, bucketName)
if err != nil {
return f, errors.Wrap(err, "tardigrade: bucket")
return f, fmt.Errorf("storj: bucket: %w", err)
}
object, err := project.StatObject(ctx, bucketName, bucketPath)
@@ -263,7 +260,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (_ fs.Fs,
return f, nil
}
// connect opens a connection to Tardigrade.
// connect opens a connection to Storj.
func (f *Fs) connect(ctx context.Context) (project *uplink.Project, err error) {
fs.Debugf(f, "connecting...")
defer fs.Debugf(f, "connected: %+v", err)
@@ -274,7 +271,7 @@ func (f *Fs) connect(ctx context.Context) (project *uplink.Project, err error) {
project, err = cfg.OpenProject(ctx, f.access)
if err != nil {
return nil, errors.Wrap(err, "tardigrade: project")
return nil, fmt.Errorf("storj: project: %w", err)
}
return
@@ -683,3 +680,43 @@ func newPrefix(prefix string) string {
return prefix + "/"
}
// Move src to this remote using server-side move operations.
//
// This is stored with the remote path given
//
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantMove
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object)
if !ok {
fs.Debugf(src, "Can't move - not same remote type")
return nil, fs.ErrorCantMove
}
// Move parameters
srcBucket, srcKey := bucket.Split(srcObj.absolute)
dstBucket, dstKey := f.absolute(remote)
options := uplink.MoveObjectOptions{}
// Do the move
err := f.project.MoveObject(ctx, srcBucket, srcKey, dstBucket, dstKey, &options)
if err != nil {
// Make sure destination bucket exists
_, err := f.project.EnsureBucket(ctx, dstBucket)
if err != nil {
return nil, fmt.Errorf("rename object failed to create destination bucket: %w", err)
}
// And try again
err = f.project.MoveObject(ctx, srcBucket, srcKey, dstBucket, dstKey, &options)
if err != nil {
return nil, fmt.Errorf("rename object failed: %w", err)
}
}
// Read the new object
return f.NewObject(ctx, remote)
}

View File

@@ -1,15 +1,15 @@
//go:build !plan9
// +build !plan9
package tardigrade
package storj
import (
"context"
"errors"
"io"
"path"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/bucket"
@@ -18,7 +18,7 @@ import (
"storj.io/uplink"
)
// Object describes a Tardigrade object
// Object describes a Storj object
type Object struct {
fs *Fs
@@ -32,7 +32,7 @@ type Object struct {
// Check the interfaces are satisfied.
var _ fs.Object = &Object{}
// newObjectFromUplink creates a new object from a Tardigrade uplink object.
// newObjectFromUplink creates a new object from a Storj uplink object.
func newObjectFromUplink(f *Fs, relative string, object *uplink.Object) *Object {
// Attempt to use the modified time from the metadata. Otherwise
// fallback to the server time.

View File

@@ -1,20 +1,20 @@
//go:build !plan9
// +build !plan9
// Test Tardigrade filesystem interface
package tardigrade_test
// Test Storj filesystem interface
package storj_test
import (
"testing"
"github.com/rclone/rclone/backend/tardigrade"
"github.com/rclone/rclone/backend/storj"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestTardigrade:",
NilObject: (*tardigrade.Object)(nil),
RemoteName: "TestStorj:",
NilObject: (*storj.Object)(nil),
})
}

View File

@@ -1,4 +1,4 @@
//go:build plan9
// +build plan9
package tardigrade
package storj

View File

@@ -14,6 +14,7 @@ To work around this we use the remote "TestSugarSync:Test" to test with.
import (
"context"
"errors"
"fmt"
"io"
"net/http"
@@ -25,7 +26,6 @@ import (
"sync"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/backend/sugarsync/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
@@ -79,7 +79,7 @@ func init() {
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, errors.Wrap(err, "failed to read options")
return nil, fmt.Errorf("failed to read options: %w", err)
}
switch config.State {
@@ -124,7 +124,7 @@ func init() {
// return shouldRetry(ctx, resp, err)
//})
if err != nil {
return nil, errors.Wrap(err, "failed to get token")
return nil, fmt.Errorf("failed to get token: %w", err)
}
opt.RefreshToken = resp.Header.Get("Location")
m.Set("refresh_token", opt.RefreshToken)
@@ -309,7 +309,7 @@ func (f *Fs) readMetaDataForID(ctx context.Context, ID string) (info *api.File,
if resp != nil && resp.StatusCode == http.StatusNotFound {
return nil, fs.ErrorObjectNotFound
}
return nil, errors.Wrap(err, "failed to get authorization")
return nil, fmt.Errorf("failed to get authorization: %w", err)
}
return info, nil
}
@@ -343,7 +343,7 @@ func (f *Fs) getAuthToken(ctx context.Context) error {
return shouldRetry(ctx, resp, err)
})
if err != nil {
return errors.Wrap(err, "failed to get authorization")
return fmt.Errorf("failed to get authorization: %w", err)
}
f.opt.Authorization = resp.Header.Get("Location")
f.authExpiry = authResponse.Expiration
@@ -391,7 +391,7 @@ func (f *Fs) getUser(ctx context.Context) (user *api.User, err error) {
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "failed to get user")
return nil, fmt.Errorf("failed to get user: %w", err)
}
return user, nil
}
@@ -445,7 +445,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
if strings.HasSuffix(f.opt.RootID, "/contents") {
f.opt.RootID = f.opt.RootID[:len(f.opt.RootID)-9]
} else {
return nil, errors.Errorf("unexpected rootID %q", f.opt.RootID)
return nil, fmt.Errorf("unexpected rootID %q", f.opt.RootID)
}
// Cache the results
f.m.Set("root_id", f.opt.RootID)
@@ -497,13 +497,13 @@ var findError = regexp.MustCompile(`<h3>(.*?)</h3>`)
func errorHandler(resp *http.Response) (err error) {
body, err := rest.ReadBody(resp)
if err != nil {
return errors.Wrap(err, "error reading error out of body")
return fmt.Errorf("error reading error out of body: %w", err)
}
match := findError.FindSubmatch(body)
if match == nil || len(match) < 2 || len(match[1]) == 0 {
return errors.Errorf("HTTP error %v (%v) returned body: %q", resp.StatusCode, resp.Status, body)
return fmt.Errorf("HTTP error %v (%v) returned body: %q", resp.StatusCode, resp.Status, body)
}
return errors.Errorf("HTTP error %v (%v): %s", resp.StatusCode, resp.Status, match[1])
return fmt.Errorf("HTTP error %v (%v): %s", resp.StatusCode, resp.Status, match[1])
}
// rootSlash returns root with a slash on if it is empty, otherwise empty string
@@ -596,7 +596,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
return "", err
}
if !found {
return "", errors.Errorf("couldn't find ID for newly created directory %q", leaf)
return "", fmt.Errorf("couldn't find ID for newly created directory %q", leaf)
}
}
@@ -636,7 +636,7 @@ OUTER:
return shouldRetry(ctx, resp, err)
})
if err != nil {
return found, errors.Wrap(err, "couldn't list files")
return found, fmt.Errorf("couldn't list files: %w", err)
}
if fileFn != nil {
for i := range result.Files {
@@ -873,7 +873,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
srcPath := srcObj.fs.rootSlash() + srcObj.remote
dstPath := f.rootSlash() + remote
if strings.ToLower(srcPath) == strings.ToLower(dstPath) {
return nil, errors.Errorf("can't copy %q -> %q as are same name when lowercase", srcPath, dstPath)
return nil, fmt.Errorf("can't copy %q -> %q as are same name when lowercase", srcPath, dstPath)
}
// Create temporary object
@@ -1247,7 +1247,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
if o.id == "" {
o.id, err = o.fs.createFile(ctx, directoryID, leaf, fs.MimeType(ctx, src))
if err != nil {
return errors.Wrap(err, "failed to create file")
return fmt.Errorf("failed to create file: %w", err)
}
if o.id == "" {
return errors.New("failed to create file: no ID")
@@ -1280,7 +1280,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return shouldRetry(ctx, resp, err)
})
if err != nil {
return errors.Wrap(err, "failed to upload file")
return fmt.Errorf("failed to upload file: %w", err)
}
o.hasMetaData = false

View File

@@ -5,6 +5,7 @@ import (
"bufio"
"bytes"
"context"
"errors"
"fmt"
"io"
"net/url"
@@ -15,7 +16,6 @@ import (
"github.com/google/uuid"
"github.com/ncw/swift/v2"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
@@ -381,7 +381,7 @@ func swiftConnection(ctx context.Context, opt *Options, name string) (*swift.Con
if opt.EnvAuth {
err := c.ApplyEnvironment()
if err != nil {
return nil, errors.Wrap(err, "failed to read environment variables")
return nil, fmt.Errorf("failed to read environment variables: %w", err)
}
}
StorageUrl, AuthToken := c.StorageUrl, c.AuthToken // nolint
@@ -423,7 +423,7 @@ func swiftConnection(ctx context.Context, opt *Options, name string) (*swift.Con
func checkUploadChunkSize(cs fs.SizeSuffix) error {
const minChunkSize = fs.SizeSuffixBase
if cs < minChunkSize {
return errors.Errorf("%s is less than %s", cs, minChunkSize)
return fmt.Errorf("%s is less than %s", cs, minChunkSize)
}
return nil
}
@@ -499,7 +499,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
}
err = checkUploadChunkSize(opt.ChunkSize)
if err != nil {
return nil, errors.Wrap(err, "swift: chunk size")
return nil, fmt.Errorf("swift: chunk size: %w", err)
}
c, err := swiftConnection(ctx, opt, name)
@@ -670,7 +670,7 @@ func (f *Fs) listContainers(ctx context.Context) (entries fs.DirEntries, err err
return shouldRetry(ctx, err)
})
if err != nil {
return nil, errors.Wrap(err, "container listing failed")
return nil, fmt.Errorf("container listing failed: %w", err)
}
for _, container := range containers {
f.cache.MarkOK(container.Name)
@@ -754,22 +754,34 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
}
// About gets quota information
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
var containers []swift.Container
var err error
err = f.pacer.Call(func() (bool, error) {
containers, err = f.c.ContainersAll(ctx, nil)
return shouldRetry(ctx, err)
})
if err != nil {
return nil, errors.Wrap(err, "container listing failed")
}
func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
var total, objects int64
for _, c := range containers {
total += c.Bytes
objects += c.Count
if f.rootContainer != "" {
var container swift.Container
err = f.pacer.Call(func() (bool, error) {
container, _, err = f.c.Container(ctx, f.rootContainer)
return shouldRetry(ctx, err)
})
if err != nil {
return nil, fmt.Errorf("container info failed: %w", err)
}
total = container.Bytes
objects = container.Count
} else {
var containers []swift.Container
err = f.pacer.Call(func() (bool, error) {
containers, err = f.c.ContainersAll(ctx, nil)
return shouldRetry(ctx, err)
})
if err != nil {
return nil, fmt.Errorf("container listing failed: %w", err)
}
for _, c := range containers {
total += c.Bytes
objects += c.Count
}
}
usage := &fs.Usage{
usage = &fs.Usage{
Used: fs.NewUsageValue(total), // bytes in use
Objects: fs.NewUsageValue(objects), // objects in use
}

View File

@@ -2,11 +2,12 @@ package union
import (
"context"
"fmt"
"io"
"io/ioutil"
"sync"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/backend/union/upstream"
"github.com/rclone/rclone/fs"
)
@@ -82,7 +83,13 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
multithread(len(entries), func(i int) {
if o, ok := entries[i].(*upstream.Object); ok {
err := o.Update(ctx, readers[i], src, options...)
errs[i] = errors.Wrap(err, o.UpstreamFs().Name())
if err != nil {
errs[i] = fmt.Errorf("%s: %w", o.UpstreamFs().Name(), err)
if len(entries) > 1 {
// Drain the input buffer to allow other uploads to continue
_, _ = io.Copy(ioutil.Discard, readers[i])
}
}
} else {
errs[i] = fs.ErrorNotAFile
}
@@ -101,7 +108,9 @@ func (o *Object) Remove(ctx context.Context) error {
multithread(len(entries), func(i int) {
if o, ok := entries[i].(*upstream.Object); ok {
err := o.Remove(ctx)
errs[i] = errors.Wrap(err, o.UpstreamFs().Name())
if err != nil {
errs[i] = fmt.Errorf("%s: %w", o.UpstreamFs().Name(), err)
}
} else {
errs[i] = fs.ErrorNotAFile
}
@@ -120,7 +129,9 @@ func (o *Object) SetModTime(ctx context.Context, t time.Time) error {
multithread(len(entries), func(i int) {
if o, ok := entries[i].(*upstream.Object); ok {
err := o.SetModTime(ctx, t)
errs[i] = errors.Wrap(err, o.UpstreamFs().Name())
if err != nil {
errs[i] = fmt.Errorf("%s: %w", o.UpstreamFs().Name(), err)
}
} else {
errs[i] = fs.ErrorNotAFile
}

View File

@@ -39,7 +39,7 @@ func (p *EpLus) lus(upstreams []*upstream.Fs) (*upstream.Fs, error) {
}
func (p *EpLus) lusEntries(entries []upstream.Entry) (upstream.Entry, error) {
var minUsedSpace int64
var minUsedSpace int64 = math.MaxInt64
var lusEntry upstream.Entry
for _, e := range entries {
space, err := e.UpstreamFs().GetFreeSpace()

View File

@@ -2,12 +2,12 @@ package policy
import (
"context"
"fmt"
"math/rand"
"path"
"strings"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/backend/union/upstream"
"github.com/rclone/rclone/fs"
)
@@ -44,7 +44,7 @@ func registerPolicy(name string, p Policy) {
func Get(name string) (Policy, error) {
p, ok := policies[strings.ToLower(name)]
if !ok {
return nil, errors.Errorf("didn't find policy called %q", name)
return nil, fmt.Errorf("didn't find policy called %q", name)
}
return p, nil
}

Some files were not shown because too many files have changed in this diff Show More