1
0
mirror of https://github.com/rclone/rclone.git synced 2025-12-06 00:03:32 +00:00

Compare commits

...

1367 Commits

Author SHA1 Message Date
Nick Craig-Wood
77c7077458 Version v1.62.0 2023-03-14 12:42:23 +00:00
Nick Craig-Wood
ffd4ab222c docs: add idrive e2 as a major sponsor 2023-03-14 12:37:34 +00:00
Nick Craig-Wood
676277e255 docs: move FUSE-T docs from auto generated file to source file
Docs commited in wrong place in

c0a5283416 docs: rclone mount on macOS with macFUSE and  FUSE-T
2023-03-14 12:37:34 +00:00
Justin Winokur
c0a5283416 docs: rclone mount on macOS with macFUSE and FUSE-T 2023-03-13 10:55:39 +00:00
Nick Craig-Wood
e405ca7733 vfs: make uploaded files retain modtime with non-modtime backends
Before this change if a file was uploaded to a backend which didn't
support modtimes, the time of the file read after the upload had
completed would change to the time the file was uploaded on the
backend.

When using `--vfs-cache-mode writes` or `full` this time would be
different by the `--vfs-write-back` delay which would cause
applications to think the file had been modified.

This changes uses the last modification time read by the OS as a
virtual modtime for backends which don't support setting modtimes. It
does not change the modtime to that actually uploaded.

This means that as long as the file remains in the directory cache it
will have the expected modtime.

See: https://forum.rclone.org/t/saving-files-causes-wrong-modified-time-to-be-set-for-a-few-seconds-on-webdav-mount-with-bitrix24/36451
2023-03-10 15:00:01 +00:00
Nick Craig-Wood
580d72f0f6 operations: skip --max-delete tests on chunker integration tests
The recent changes to remove race conditions from --max-delete have
made these tests fail on chunker with s3 because they do copy then
delete and the deletes are being counted in the --max-delete(-size)
counts.
2023-03-10 12:13:44 +00:00
Nick Craig-Wood
22daeaa6f3 build: update dependencies
This fixes the azureblob backend so it builds again after the SDK
changes.

This doesn't update bazil.org/fuse because it doesn't build on FreeBSD

https://github.com/bazil/fuse/issues/295
2023-03-10 11:15:07 +00:00
Nick Craig-Wood
ca9ad7935a Add dependabot[bot] to contributors 2023-03-10 11:15:07 +00:00
Nick Craig-Wood
dd6e229327 move: if --check-first and --order-by are set then delete with perfect ordering
If using rclone move and --check-first and --order-by then rclone uses
the transfer routine to delete files to ensure perfect ordering.

This will cause the transfer stats to have a larger than expected
number of items in it so we don't enable this by default.

Fixes #6033
2023-03-10 08:23:32 +00:00
dependabot[bot]
4edcd16f5f build(deps): bump github.com/gdamore/tcell/v2 from 2.5.4 to 2.6.0
Bumps [github.com/gdamore/tcell/v2](https://github.com/gdamore/tcell) from 2.5.4 to 2.6.0.
- [Release notes](https://github.com/gdamore/tcell/releases)
- [Changelog](https://github.com/gdamore/tcell/blob/main/CHANGESv2.md)
- [Commits](https://github.com/gdamore/tcell/compare/v2.5.4...v2.6.0)

---
updated-dependencies:
- dependency-name: github.com/gdamore/tcell/v2
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2023-03-09 18:38:47 +00:00
dependabot[bot]
534e3acd06 build(deps): bump github.com/iguanesolutions/go-systemd/v5
Bumps [github.com/iguanesolutions/go-systemd/v5](https://github.com/iguanesolutions/go-systemd) from 5.1.0 to 5.1.1.
- [Release notes](https://github.com/iguanesolutions/go-systemd/releases)
- [Commits](https://github.com/iguanesolutions/go-systemd/compare/v5.1.0...v5.1.1)

---
updated-dependencies:
- dependency-name: github.com/iguanesolutions/go-systemd/v5
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2023-03-09 18:38:04 +00:00
dependabot[bot]
cf75ddabd3 build(deps): bump golang.org/x/term from 0.5.0 to 0.6.0
Bumps [golang.org/x/term](https://github.com/golang/term) from 0.5.0 to 0.6.0.
- [Release notes](https://github.com/golang/term/releases)
- [Commits](https://github.com/golang/term/compare/v0.5.0...v0.6.0)

---
updated-dependencies:
- dependency-name: golang.org/x/term
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2023-03-09 18:37:23 +00:00
dependabot[bot]
6edcacf932 build(deps): bump github.com/Azure/azure-sdk-for-go/sdk/azidentity
Bumps [github.com/Azure/azure-sdk-for-go/sdk/azidentity](https://github.com/Azure/azure-sdk-for-go) from 1.2.0 to 1.2.2.
- [Release notes](https://github.com/Azure/azure-sdk-for-go/releases)
- [Changelog](https://github.com/Azure/azure-sdk-for-go/blob/sdk/azidentity/v1.2.2/CHANGELOG.md)
- [Commits](https://github.com/Azure/azure-sdk-for-go/compare/v1.2...sdk/azidentity/v1.2.2)

---
updated-dependencies:
- dependency-name: github.com/Azure/azure-sdk-for-go/sdk/azidentity
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2023-03-09 18:36:23 +00:00
dependabot[bot]
51506a7ccd build(deps): bump github.com/Azure/azure-sdk-for-go/sdk/azcore
Bumps [github.com/Azure/azure-sdk-for-go/sdk/azcore](https://github.com/Azure/azure-sdk-for-go) from 1.3.0 to 1.4.0.
- [Release notes](https://github.com/Azure/azure-sdk-for-go/releases)
- [Changelog](https://github.com/Azure/azure-sdk-for-go/blob/main/documentation/release.md)
- [Commits](https://github.com/Azure/azure-sdk-for-go/compare/sdk/azcore/v1.3.0...sdk/azcore/v1.4.0)

---
updated-dependencies:
- dependency-name: github.com/Azure/azure-sdk-for-go/sdk/azcore
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2023-03-09 18:33:40 +00:00
Ryan Caezar Itang
a50fd2a2a2 ci: add dependabot 2023-03-09 15:05:15 +00:00
Ryan Caezar Itang
efac7e18fb ci: add winget releaser workflow 2023-03-09 14:56:37 +00:00
Ryan Caezar Itang
02dd8eacea docs: add winget installation method 2023-03-09 14:56:37 +00:00
Nick Craig-Wood
e2984227bb fs: fix race conditions in --max-delete and --max-delete-size 2023-03-09 09:25:31 +00:00
Nick Craig-Wood
a35ee30d9f Add Leandro Sacchet to contributors 2023-03-09 09:25:31 +00:00
Leandro Sacchet
f689db4422 fs: Add --max-delete-size a delete size threshold
Fixes #3329
2023-03-08 17:12:31 +00:00
Nick Craig-Wood
fb4600f6f9 tree: fix display of files with illegal Windows file system names
Before this change, files with illegal Windows names (eg those
containing \) would not be displayed properly in tree.

This change adds the local encoding to the Windows file names so \
will be displayed as its wide unicode equivalent.

See: https://forum.rclone.org/t/error-with-build-v1-61-1-tree-command-panic-runtime-error-invalid-memory-address-or-nil-pointer-dereference/35922/
2023-03-07 15:30:11 +00:00
Nick Craig-Wood
1d0c75b0c2 ftp: retry errors when initiating downloads
This adds a retry loop to the Open() call in the FTP server so it can
retry failures opening files.

This should make downloading multipart files more reliable.

See: https://forum.rclone.org/t/downloads-fail-from-remote-server-error-426-failure-writing-network-stream/33839/
2023-03-07 12:34:20 +00:00
Nick Craig-Wood
2e435af4de ftp: retry 426 errors
Before this change we didn't retry 426 errors which are

    426 Connection closed; transfer aborted.

Or in this particular case

    426 Failure writing network stream.

These seem like they might be temporary so retry them.

See: https://forum.rclone.org/t/downloads-fail-from-remote-server-error-426-failure-writing-network-stream/33839/
2023-03-07 12:34:20 +00:00
Nick Craig-Wood
62a7765e57 smb: allow SPN (service principal name) to be configured
This enables connection to clusters.

Fixes #6515
2023-03-07 12:18:32 +00:00
Nick Craig-Wood
5ad942ed87 local: fix exclusion of dangling symlinks with -L/--copy-links
Before this fix, a dangling symlink was erroring the sync. It was
writing an ERROR log and causing rclone to exit with an error. The
List method wasn't returning an error though.

This fix makes sure that we don't log or report a global error on a
file/directory that has been excluded.

This feature was first implemented in:

a61d219bc local: fix -L/--copy-links with filters missing directories

Then fixed in:

8d1fff9a8 local: obey file filters in listing to fix errors on excluded files

This commit also adds test cases for the failure modes of those commits.

See #6376
2023-03-07 12:15:10 +00:00
Nick Craig-Wood
96609e3d6e ftp: revert to upstream github.com/jlaffaye/ftp now fix is merged
This reverts to using the upstream now the patch to fix hang when
using ExplicitTLS to certain servers is merged.

Fixes #6426
2023-03-07 12:12:07 +00:00
Nick Craig-Wood
28a8ebce5b vfs: fix rename of directory containing files to be uploaded
Before this change, if you renamed a directory containg files yet to
be uploaded then deleted the directory the files would still be
uploaded.

This fixes the problem by changing the directory path in all the file
objects in a directory when it is renamed. This wasn't necessary until
we introduced virtual files and directories which lived beyond the
directory flush mechanism.

Fixes #6809
2023-03-07 11:40:50 +00:00
Nick Craig-Wood
17854663de vfs: log size of File and Dir in tests for optimization 2023-03-07 11:40:50 +00:00
Nick Craig-Wood
a4a6b5930a Add Peter Brunner to contributors 2023-03-07 11:40:50 +00:00
Nick Craig-Wood
e9ae620844 Add Ryan Caezar Itang to contributors 2023-03-07 11:40:50 +00:00
Nick Craig-Wood
e7cfb8ad8e Add Ninh Pham to contributors 2023-03-07 11:40:50 +00:00
Nick Craig-Wood
786a1c212c Add Peter Brunner to contributors 2023-03-07 11:40:50 +00:00
Peter Brunner
03bc270730 gcs: fix google cloud storage provider help 2023-03-07 11:39:02 +00:00
Ryan Caezar Itang
7cef042231 docs: add scoop installation method 2023-03-07 11:36:07 +00:00
Ninh Pham
1155cc0d3f drive: Make --drive-stop-on-upload-limit to respond to storageQuotaExceeded
Before this change, if a "--drive-stop-on-upload-limit" was set,
rclone would not stop the upload if a "storageQuotaExceeded" error occurred.

This fix now checks for the "storageQuotaExceeded" error
and "--drive-stop-on-upload-limit", and fails fast.
2023-03-07 11:00:08 +00:00
Peter Brunner
13c3f67ab0 gcs: add env_auth to pick up IAM credentials from env/instance
This change provides the ability to pass `env_auth` as a parameter to
the google cloud storage provider. This enables the provider to pull IAM
credentials from the environment or instance metadata. Previously if no
auth method was given it would default to requesting oauth.
2023-03-06 18:18:33 +00:00
Nick Craig-Wood
ab2cdd840f serve ftp: fix timestamps older than 1 year in listings
Fixes #6785
2023-03-06 15:59:56 +00:00
Nick Craig-Wood
143285e2b7 vfs: fix incorrect modtime on fs which don't support setting modtime
Before this change we were using the Precision literally to round the
precision of the mod times.

However fs.ModTimeNotSupported is 100y on backends which don't support
setting modtimes so rounding to 100y was producing very strange
results.

See: https://forum.rclone.org/t/saving-files-causes-wrong-modified-time-to-be-set-for-a-few-seconds-on-webdav-mount-with-bitrix24/36451/
2023-03-06 10:54:21 +00:00
Nick Craig-Wood
19e8c8d42a s3: make purge remove directory markers too
See: https://forum.rclone.org/t/cannot-purge-aws-s3/36169/
2023-03-03 15:51:00 +00:00
Nick Craig-Wood
de9c4a3611 s3: use bucket.Join instead of path.Join to preserve paths
Before this change, path.Join would remove the trailing / from objects
which had them. The simplified bucket.Join does not.
2023-03-03 15:51:00 +00:00
Nick Craig-Wood
d7ad13d929 bucket: add Join function for a simplified path.Join 2023-03-03 15:51:00 +00:00
albertony
f9d50f677d lib/terminal: enable windows console virtual terminal sequences processing (ANSI/VT100 colors)
This ensures the virtual terminal processing mode is enabled on the rclone process
for Windows 10 consoles (by using Windows Console API functions GetConsoleMode/SetConsoleMode
and flag ENABLE_VIRTUAL_TERMINAL_PROCESSING), which adds native support for ANSI/VT100
escape sequences. This mode is default in many cases, e.g. when using the Windows
Terminal application, but in other cases it is not, and the default can also be
controlled with registry setting (see below), and therefore configuring it on the process
seem to be the only reliable way of ensuring it is enabled when supported.

[HKEY_CURRENT_USER\Console]
"VirtualTerminalLevel"=dword:00000001
2023-03-03 12:37:01 +01:00
albertony
3641993fab tree: fix colored output on windows
Since rclone version 1.61.0 the tree command uses ANSI color sequences in output by
default, but this lead to issues in Windows terminals that were not handling these (#6668).

This commit ensures the tree command uses the terminal package for output. It relies on
go-colorable to properly handle ANSI color sequences: If stdout is connected to a terminal
the escape sequences are decoded and the text are written with color formatting using
Windows Console API. If stdout is not connected to a terminal, e.g. redirected to file,
the escape sequences are stripped off. The tree command has its own method for writing
directly to a file, specified with flag --output, and then the output is not passed
through the terminal package and must therefore be written without ansi codes.
2023-03-03 12:37:01 +01:00
Nick Craig-Wood
93d3ae04c7 deletefile: return error code 4 if file does not exist
Before this change `rclone deletefile` would return error code 1 if
the file it was trying to delete does not exist.

Rclone can't actually tell at this point whether the file doesn't
exist or what you tried to delete is a directory, but it seems more
logical to return error code 4 "object not found" here.

See: https://forum.rclone.org/t/rclone-deletefile-cmd-return-exit-code-1-when-file-not-found-in-remote-why-1-and-not-exit-code-4/
2023-03-03 09:51:23 +00:00
Nick Craig-Wood
e25e9fbf22 Add NodudeWasTaken to contributors 2023-03-03 09:51:23 +00:00
NodudeWasTaken
fe26d6116d mega: add --mega-use-https flag
Some ISPs throttle HTTP which MEGA uses by default, so some users may find using HTTPS beneficial.
2023-03-02 20:28:10 +00:00
Fred
06e1e18793 seafile: fix for flaky tests #6799 2023-03-02 20:03:25 +00:00
Nick Craig-Wood
23d17b76be onedrive: default onedrive personal to QuickXorHash
Before this change the hash used for Onedrive Personal was SHA1. From
July 2023 Microsoft is phasing out SHA1 hashes in favour of
QuickXorHash in Onedrive Personal. Onedrive Business and Sharepoint
remain using QuickXorHash as before.

This choice can be changed using the --onedrive-hash-type flag (and
config option) so that SHA1 can be selected while it is still
available in the transition period.

See: https://forum.rclone.org/t/microsoft-is-switching-onedrive-personal-to-quickxorhash-from-sha1/36296/
2023-03-02 19:32:35 +00:00
Nick Craig-Wood
dfe4e78a77 onedrive: add --onedrive-hash-type to change the hash in use
In preparation for Microsoft removing the SHA1 hash on OneDrive
Personal this allows the hash type to be set on OneDrive.

See: https://forum.rclone.org/t/microsoft-is-switching-onedrive-personal-to-quickxorhash-from-sha1/36296/
2023-03-02 19:32:35 +00:00
Nick Craig-Wood
59e7982040 s3: add --s3-sts-endpoint to specify STS endpoint
See: https://forum.rclone.org/t/s3-profile-failing-when-explicit-s3-endpoint-is-present/36063/
2023-03-02 09:56:09 +00:00
Nick Craig-Wood
c6b0587dc0 s3: fix AWS STS failing if --s3-endpoint is set
Before this change if an --s3-profile was set which used AWS STS (eg
to assume a role) and --s3-endpoint was set then rclone would use the
value from --s3-endpoint to contact the STS server which did not work.

This fix implements an endpoint resolver which only overrides the "s3"
service if --s3-endpoint is set. It sends the "sts" service (and any
other service) to the default resolver.

Fixes #6443
See: https://forum.rclone.org/t/s3-profile-failing-when-explicit-s3-endpoint-is-present/36063/
2023-03-01 16:24:40 +00:00
Nick Craig-Wood
9baa4d1c3c accounting: show checking tag if available even on transfers 2023-03-01 11:10:38 +00:00
Nick Craig-Wood
a5390dbbeb sync,operations: fix correct concurrency: use --checkers unless transferring files
There were some places (e.g. deleting files) where we were using
--transfers instead of --checkers to control the concurrency when
files weren't being transferred.

These have been updated to use --checkers.
2023-03-01 11:10:38 +00:00
Nick Craig-Wood
019a486d5b accounting: Make checkers show what they are doing
Before this change, all types of checkers showed "checking" after the
file name despite the fact that not all of them were checking.

After this change, they can show

- checking
- deleting
- hashing
- importing
- listing
- merging
- moving
- renaming

See: https://forum.rclone.org/t/what-is-rclone-checking-during-a-purge/35931/
2023-03-01 11:10:38 +00:00
Nick Craig-Wood
34ce11d2be Add ToBeFree to contributors 2023-03-01 11:10:38 +00:00
Nick Craig-Wood
88e8ede0aa Add Gerard Bosch to contributors 2023-03-01 11:10:38 +00:00
Nick Craig-Wood
f6f250c507 Add logopk to contributors 2023-03-01 11:10:38 +00:00
Nick Craig-Wood
2c45e901f0 Add Hunter Wittenborn to contributors 2023-03-01 11:10:38 +00:00
ToBeFree
9e1443799a docs: crypt: fix typo 2023-02-28 11:50:53 +00:00
Gerard Bosch
dd72aff98a docs: bisync: clarification of --resync 2023-02-28 11:47:28 +00:00
logopk
5039f9be48 docker: fix volume plugin does not remount volume on docker restart
docker volume plugin restoreState: skip fs option if empty

Fixes #6769
Co-authored-by: Peter Kreuser <logo@kreuser.name>
2023-02-28 11:29:07 +00:00
Hunter Wittenborn
56b582cdb9 authorize: add support for custom templates
This adds support for providing custom Go templates for use in the
`rclone authorize` command.

Fixes #6741
2023-02-24 15:08:38 +00:00
Aaron Gokaslan
745c0af571 all: Apply codeql fixes 2023-02-23 10:31:51 +00:00
Nick Craig-Wood
2dabbe83ac serve http: tests for --auth-proxy 2023-02-23 10:28:13 +00:00
Nick Craig-Wood
90561176fb Add Matthias Baur to contributors 2023-02-23 10:28:13 +00:00
Matthias Baur
a0b5d77427 serve http: support --auth-proxy 2023-02-22 14:55:24 +00:00
Manoj Ghosh
ce8b1cd861 oracle-object-storage: bring your own encryption keys 2023-02-21 14:45:02 +00:00
Manoj Ghosh
5bd6e3d1e9 fix vulnerablities: upgrade golang.org/x/net@v0.5.0 to golang.org/x/net@v0.7.0 2023-02-21 10:11:16 +00:00
Nick Craig-Wood
d4d7a6a55e sftp: fix uploads being 65% slower than they should be with crypt
The block size for crypt is 64k + a few bytes. The default block size
for sftp is 32k. This means that the blocks for crypt get split over 3
sftp packets two of 32k and one of a few bytes.

However due to a bug in pkg/sftp it was sending 32k instead of just a
few bytes, leading to the 65% slowdown.

This was fixed in the upstream library.

This bug probably affected transfers from over the network sources
also.

Fixes #6763
See: https://github.com/pkg/sftp/pull/537
2023-02-14 15:47:19 +00:00
Nick Craig-Wood
b3e0672535 s3: Check multipart upload ETag when --s3-no-head is in use
Before this change if --s3-no-head was in use rclone didn't check the
multipart upload ETag at all. However the ETag is returned in the
final POST request when completing the object.

This change uses that ETag from the final POST if --s3-no-head is in
use, otherwise it uses the ETag from a fresh HEAD request.

See: https://forum.rclone.org/t/in-some-cases-rclone-does-not-use-etag-to-verify-files/36095/
2023-02-14 12:04:28 +00:00
Nick Craig-Wood
a407437e92 Add Simmon Li (he/him) to contributors 2023-02-14 12:04:28 +00:00
Manoj Ghosh
0164a4e686 add more documentation around oci authentication ways 2023-02-14 11:58:38 +00:00
Simmon Li (he/him)
b8ea79042c docs: drive: make clear "testing" apps have short token grant time 2023-02-13 14:30:20 +00:00
albertony
49a6533bc1 docs/mount: improve explanation of windows filesystem permissions 2023-02-10 23:21:33 +01:00
Nick Craig-Wood
21459f3cc0 tree: fix nil pointer exception on stat failure
This fixes the crash by updating the upstream.

See: https://forum.rclone.org/t/error-with-build-v1-61-1-tree-command-panic-runtime-error-invalid-memory-address-or-nil-pointer-dereference/35922/
See: https://github.com/a8m/tree/pull/21
2023-02-08 16:21:25 +00:00
albertony
04f7e52803 accounting: show human readable elapsed time when longer than a day - fixes #6748 2023-02-06 15:02:03 +01:00
Kaloyan Raev
25535e5eac storj: update satellite urls and labels
The docs and setup wizard still contained deprecated URLs and labels of
Storj satellites. This change updates them.
2023-02-06 13:18:15 +00:00
Nick Craig-Wood
c37b6b1a43 cache: fix lint error in latest golangci-lint 2023-02-06 10:44:40 +00:00
albertony
0328878e46 accounting: limit length of ETA string
No need to report hours, minutes, and even seconds when the
ETA is several years, e.g. "292y24w3d23h47m16s". Now only
reports the 3 most significant units, sacrificing precision,
e.g. "292y24w3d", "24w3d23h", "3d23h47m", "23h47m16s".

Fixes #6381
2023-02-04 17:29:08 +01:00
albertony
67132ecaec accounting: avoid negative ETA values for very slow speeds
Integer overflow would lead to ETA such as "-255y7w4h11m22s966ms",
as reported in #6381. Now the value will be clipped at the maximum
"292y24w3d23h47m16s", and it will be shown as infinity.
2023-02-04 17:29:08 +01:00
albertony
120cfcde70 install.sh: fix arm-v6 download 2023-02-04 13:32:26 +01:00
albertony
37db2a0e44 selfupdate: consider arm version 2023-02-04 13:32:26 +01:00
albertony
f92816899c version: report arm version 2023-02-04 13:32:26 +01:00
albertony
5386ffc8f2 build: correct building for ARMv5 and ARMv6
Explicitly set ARM version in GOARM build variable, to avoid relying
on some default value which differs when compiling natively and when
cross-compiling, and which is also incorrectly documented as being
6 when in reality it is 5.

Fix incorrect labelling of ARMv5 builds as ARMv6, and change
architecture of .rpm and .deb packages containing them to
match.

Add ARMv6 builds, to complement existing ARMv5 and ARMv7, and to
reduce disruption due to previous ARMv5 builds incorrectly being
identified as ARMv6, and to provide .rpm and .deb packages with the
same ARMv6 architectures as was previously also published
(then containing ARMv5 binaries).

See #6528

Background info:

https://github.com/golang/go/wiki/GoArm
https://go.dev/doc/install/source#environment
661e931dd1/src/cmd/dist/build.go (L140-L144)
661e931dd1/src/cmd/dist/util.go (L392-L422)
2023-02-04 13:32:26 +01:00
Anagh Kumar Baranwal
3898d534f3 build: update to go1.20
Signed-off-by: Anagh Kumar Baranwal <6824881+darthShadow@users.noreply.github.com>
2023-02-03 20:15:15 +00:00
Ole Frost
34333d9fa8 docs: added troubleshooting tips for Live Photos in OneDrive 2023-02-03 16:24:30 +00:00
Ole Frost
14e852ee9d s3: fix incorrect tier support for StorJ and IDrive when pointing at a file
Fixes #6734
2023-02-02 18:12:00 +00:00
albertony
37623732c6 build: avoid running workflow twice for pull requests with branch on main repo 2023-02-01 16:47:38 +01:00
Nick Craig-Wood
adbcc83fa5 filter: emit INFO message when can't work out directory filters
See: https://forum.rclone.org/t/rclone-scans-unwanted-folder/34437
2023-02-01 14:21:45 +00:00
Nick Craig-Wood
d4ea6632ca drive: note that --drive-acknowledge-abuse needs SA Manager permission
See: https://github.com/rclone/rclone/issues/2338#issuecomment-762820600
See: https://forum.rclone.org/t/bisync-already-add-drive-acknowledge-abuse-still-got-critical-error-cannotdownloadabusivefile/35604/
2023-02-01 12:11:46 +00:00
Nick Craig-Wood
21849fd0d9 webdav: fix interop with davrods server
The davrods server returns URLS with a double / in and the // confuses
rclone into thinking these files are in a directory called "".

The fix removes leading /s from the directory listing names.

See: https://forum.rclone.org/t/upload-to-webdav-does-not-check-if-files-already-exist/35756/
2023-02-01 12:00:25 +00:00
Nick Craig-Wood
ac20ee41ca Add happyxhw to contributors 2023-02-01 12:00:25 +00:00
happyxhw
d376fb1df2 smb: check smb connection is closed - fixes #6735 2023-02-01 08:25:25 +01:00
Nick Craig-Wood
8e63a08d7f docs: note that we have test Android builds 2023-01-31 14:11:50 +00:00
Nick Craig-Wood
3aee5b3c55 Add Simmon Li (he/him) to contributors 2023-01-31 14:11:50 +00:00
Nick Craig-Wood
0145d98314 Add LXY to contributors 2023-01-31 14:11:50 +00:00
Nick Craig-Wood
4c03c71a5f Add Bryan Kaplan to contributors 2023-01-31 14:11:50 +00:00
Simmon Li (he/him)
82e2801aae update drive.md
* Updates OAuth consent screen instructions to include adding scopes for backup purposes (create, edit and delete files).
* Updates instructions to keep app in testing mode (appropriate for most users). The previous instructions suggested this, but we don't need to "publish" the app at all in order to proceed with this step.
2023-01-27 15:25:17 +00:00
LXY
dc5d5de35c onedrive: improve speed of quickxorhash
This commits ports a fast C-implementation from https://github.com/namazso/QuickXorHash

It uses new crypto/subtle code from go1.20 to avoid the use of unsafe.

Typical speedups are about 25x  when using go1.20

    goos: linux
    goarch: amd64
    cpu: Intel(R) Celeron(R) N5105 @ 2.00GHz
    QuickXorHash-Before  2.49ms   422MB/s ±11%   100.00%
    QuickXorHash-Subtle  87.9µs 11932MB/s ± 5% +2730.83% + 42.17%

Co-Author: @namazso
2023-01-26 11:50:12 +00:00
Bryan Kaplan
41cc4530f3 docs: Improve bisync check-access & check-filename
This commit documents my learnings after having encountered a failure
I reported in the rclone forum[0].

I may be a fool for having failed to understand the previous
documentation, but I am likely not the only fool to get snared by it.

This commit therefore adds details to clarify what the user must do in
order to allow `--check-access` to succeed.

While at it, I've also added some basic documentation for `--check-filename`.

[0]: https://forum.rclone.org/t/bisync-check-file-check-failed/35682
2023-01-26 11:10:01 +00:00
albertony
c5acb10151 fspath: allow the symbols at and plus in remote names - fixes #6710 2023-01-25 13:37:24 +01:00
Manoj Ghosh
8c8ee9905c oracleobjectstorage: speed up operations by using S3 pacer and setting minsleep to 10ms
Uploading 100 files of each 1 MB took 20 seconds before. With above fix it takes around 2 seconds now.

10x time improvement in line with pacer's sleep reduction from 100ms to 10ms
2023-01-25 10:48:16 +00:00
albertony
e2afd00118 mount: avoid incorrect or premature overlap check on windows
See: #6234
2023-01-24 22:27:02 +01:00
albertony
5b82576dbf build: fix condition for manual workflow run
See #5275
2023-01-24 20:46:33 +01:00
albertony
b9d9f9edb0 docs: use --interactive instead of -i in examples to avoid confusion 2023-01-24 20:43:51 +01:00
Bryan Kaplan
c40b706186 docs: Fix link in bisync doc
This commit fixes the `#check-access` anchor link in the bisync.md document.

`#check-access-option` does not exist in bisync.md; `#check-access` does.
2023-01-24 09:16:43 +01:00
Nick Craig-Wood
351fc609b1 b2: fix uploading files bigger than 1TiB
Before this change when uploading files bigger than 1TiB, the chunk
calculator would work out that the chunk size needed to be bigger than
the default 100 MiB to fit within the 10,000 parts limit.

However the uploader was still using the memory pool for the old chunk
size and this caused errors like

    panic: runtime error: slice bounds out of range [:122683392] with capacity 100663296

The fix for this is to make a temporary pool with the larger chunk
size and use it during the upload of the large file.

See: https://forum.rclone.org/t/rclone-cannot-complete-upload-to-b2-restarts-upload-frequently/35617/
2023-01-22 12:46:23 +00:00
Nick Craig-Wood
a6f6a9dcdf mount,mount2,cmount: fix --allow-non-empty #3562
Since version 3 of fuse libfuse no longer does anything when given the
nonempty option and it's default is to allow mounting over non empty
directories like normal mount does.

Some versions of libfuse give an error when using `--allow-non-empty`
which is annoying for the user.

We now do this check ourselves so we no longer need to pass the option
to libfuse.

Fixes #3562
2023-01-20 15:39:54 +00:00
Nick Craig-Wood
267a09001d mount: fix check for empty mount point on Linux #3562 2023-01-20 15:39:54 +00:00
Nick Craig-Wood
37db2abecd Add alankrit to contributors 2023-01-20 15:39:49 +00:00
albertony
0272d44192 mount: do not treat \\?\ prefixed paths as network share paths on windows
See: #6234
2023-01-20 15:40:03 +01:00
alankrit
6b17044f8e fs:Added multiple ca certificate support. 2023-01-17 12:16:11 +00:00
Nick Craig-Wood
844e8fb8bd lib/errors: add support for unwrapping go1.20 multi errors 2023-01-17 11:35:19 +00:00
Nick Craig-Wood
ca9182d6ae Add IMTheNachoMan to contributors 2023-01-17 11:35:19 +00:00
IMTheNachoMan
ec20c48523 googlephotos: fix grammar in docs (#6699) 2023-01-16 13:40:30 +01:00
Nick Craig-Wood
ec68b72387 lib/file: fix error message test after go1.20 upgrade 2023-01-16 11:19:16 +00:00
Nick Craig-Wood
2d1c2725e4 webdav: fix tests after go1.20 upgrade
Before this change we were sending webdav requests to the go http
FileServer. In go1.20 these (rightly) started returning errors which
caused the tests to fail.

The test has been changed to properly mock up an About query and
response so an end to end test of adding headers is possible.
2023-01-16 11:19:16 +00:00
Nick Craig-Wood
1680c5af8f build: update to go1.20rc3 and make go1.17 the minimum required version 2023-01-16 11:19:16 +00:00
Nick Craig-Wood
88c0d78639 build: update to fuse3 after bazil.org/fuse update 2023-01-16 11:19:16 +00:00
Nick Craig-Wood
559157cb58 azureblob: remove workarounds for SDK bugs after v0.6.1 update 2023-01-16 11:19:16 +00:00
Nick Craig-Wood
10bf8a769e build: update dependencies
This fixes the azureblob backend so it builds again after the SDK
changes.
2023-01-16 11:19:16 +00:00
Fred
f31ab6d178 seafile: renew library password - fixes #6662
Passwords for encrypted libraries are kept in memory in the server
and flushed after an hour.
This MR fixes an issue when the library password expires after 1 hour.
2023-01-15 16:26:29 +00:00
Kaloyan Raev
f08bb5bf66 storj: implement purge 2023-01-15 16:23:49 +00:00
Manoj Ghosh
e2886aaddf oracle-object-storage: expose the storage_tier option in config 2023-01-15 16:20:55 +00:00
albertony
71227986db docs: remove link to nonexistent uploadfile command - fixes #6693 2023-01-12 20:13:02 +01:00
Nick Craig-Wood
8c6ff1fa7e cmount: fix creating and renaming files on case insensitive backends
Before this fix, we told cgofuse/WinFSP that the backend was case
insensitive but didn't implement the Getpath backend function to
return the normalised case of a file.

Resently cgofuse started implementing case insensitive files properly
but since we hadn't implemented Getpath, the file names were taking
the default of all in UPPER CASE.

This patch implements Getpath for cgofuse which fixes the case
problems.

This problem came to light when we upgraded cgofuse and WinFSP (to
1.12) which had the code to implement Getpath.

Fixes #6682
2023-01-11 17:21:57 +00:00
Nick Craig-Wood
9d1b786a39 Add Kaloyan Raev to contributors 2023-01-11 17:21:57 +00:00
Nick Craig-Wood
8ee0e2efb1 Add piyushgarg to contributors 2023-01-11 17:21:57 +00:00
Alex Chen
d66f5e8db0 lib/oauthutil: handle fatal errors better
PR #6678
2023-01-12 00:50:14 +08:00
Ole Frost
02d6d28ec4 crypt: fix for unencrypted directory names on case insensitive remotes
rclone sync erroneously deleted folders renamed to a different case on
crypts where directory name encryption was disabled and the underlying
remote was case insensitive.

Example: Renaming the folder Test to tEST before a sync to a crypt having
remote=OneDrive:crypt and directory_name_encryption=false could result in
the folder and all its content being deleted. The following sync would
correctly create the tEST folder and upload all of the content.

Additional tests have revealed other potential issues when using
filename_encryption=off or directory_name_encryption=false on case
insensitive remotes. The documentation has been updated to warn about
potential problems when using these combinations.
2023-01-11 16:32:40 +00:00
Kaloyan Raev
1cafc12e8c storj: implement public link 2023-01-10 17:40:04 +00:00
piyushgarg
98fa93f6d1 webdav: Document Mapping/Accessing WebDAV shares on windows.
Fixes #6596

Co-authored-by: Piyush <piyushgarg80>
2022-12-30 11:22:46 +00:00
albertony
c6c67a29eb Add Marks Polakovs to contributors 2022-12-26 18:39:49 +01:00
Marks Polakovs
ad5395e953 backend/local: fix %!w(<nil>) in "failed to read directory" error 2022-12-26 18:37:32 +01:00
Nick Craig-Wood
1925ceaade Changelog updates from Version v1.61.1 2022-12-23 18:26:56 +00:00
Nick Craig-Wood
8aebf12797 docs: fix unescaped HTML 2022-12-23 16:53:43 +00:00
Nick Craig-Wood
ffeefe8a56 crypt: obey --ignore-checksum
Before this change the crypt backend would calculate and check upload
checksums regardless of the setting of --ignore-checksum.
2022-12-23 16:52:19 +00:00
Nick Craig-Wood
81ce5e4961 docs: correct RELEASE procedure for stable branch 2022-12-23 12:34:04 +00:00
Nick Craig-Wood
638058ef91 lib/http: shutdown all servers on exit to remove unix socket
Before this change only serve http was Shutting down its server which
was causing other servers such as serve restic to leave behind their
unix sockets.

This change moves the finalisation to lib/http so all servers have it
and removes it from serve http.

Fixes #6648
2022-12-23 12:28:07 +00:00
Nick Craig-Wood
b1b62f70d3 serve webdav: fix running duplicate Serve call
Before this change we were starting the server twice for webdav which
is inefficient and causes problems at exit.
2022-12-23 12:28:07 +00:00
Nick Craig-Wood
823d89af9a serve restic: don't serve via http if serving via --stdio
Before this change, we started the http listener even if --stdio was
supplied.

This also moves the log message so the user won't see the serving via
HTTP message unless they are really using that.

Fixes #6646
2022-12-23 12:28:07 +00:00
Nick Craig-Wood
448fff9a04 serve restic: fix immediate exit when not using stdio
In the lib/http refactor

    52443c2444 restic: refactor to use lib/http

We forgot to serve the data and wait for the server to finish. This is
not tested in the unit tests as it is part of the command line
handler.

Fixes #6644 Fixes #6647
2022-12-23 12:28:07 +00:00
Nick Craig-Wood
6257a6035c serve webdav: fix --baseurl handling after lib/http refactor
The webdav library was confused by the Path manipulation done by
lib/http when stripping the prefix.

This patch adds the prefix back before calling it.

Fixes #6650
2022-12-23 12:28:07 +00:00
Nick Craig-Wood
54c0f17f2a azureblob: fix "409 Public access is not permitted on this storage account"
This error was caused by rclone supplying an empty
`x-ms-blob-public-access:` header when creating a container for
private access, rather than omitting it completely.

This is a valid way of specifying containers should be private, but if
the storage account has the flag "Blob public access" unset then it
gives "409 Public access is not permitted on this storage account".

This patch fixes the problem by only supplying the header if the
access is set.

Fixes #6645
2022-12-23 12:28:07 +00:00
Kaloyan Raev
d049cbb59e s3/storj: update endpoints
Storj switched to a single global s3 endpoint backed by a BGP routing.
We want to stop advertizing the former regional endpoints and have the
global one as the only option.
2022-12-22 15:46:49 +00:00
Anagh Kumar Baranwal
00e853144e rc: set url to the first value of rc-addr since it has been converted to an array of strings now -- fixes #6641
Signed-off-by: Anagh Kumar Baranwal <6824881+darthShadow@users.noreply.github.com>
2022-12-22 09:02:20 +00:00
albertony
5ac8cfee56 docs: show only significant parts of version number in version introduced label 2022-12-21 12:41:47 +00:00
Nick Craig-Wood
496ae8adf6 Start v1.62.0-DEV development 2022-12-20 18:33:59 +00:00
Nick Craig-Wood
2001cc0831 Version v1.61.0 2022-12-20 17:16:14 +00:00
Ole Frost
a35490bf70 docs: Added note on Box API rate limits 2022-12-20 12:49:31 +00:00
Nick Craig-Wood
01877e5a0f s3: ignore versionIDs from uploads unless using --s3-versions or --s3-versions-at
Before this change, when a new object was created s3 returns its
versionID (on a versioned bucket) and rclone recorded it in the
object.

This means that when rclone came to delete the object it would delete
it with the versionID.

However it is common to forbid actions with versionIDs on buckets so
as to preserve the historical record and these operations would fail
whereas they succeeded in pre-v1.60.0 versions.

This patch fixes the problem by not recording versions of objects
supplied by the S3 API on upload unless `--s3-versions` or
`--s3-version-at` is used. This makes rclone behave as it did before
v1.60.0 when version support was introduced.

See: https://forum.rclone.org/t/s3-and-intermittent-403-errors-with-file-renames-and-drag-and-drop-operations-in-windows-explorer/34773
2022-12-17 10:24:56 +00:00
Nick Craig-Wood
614d79121a serve dlna: fix panic: Logger uninitialized.
Before this change we forgot to initialize the logger for the dlna
server. This meant when it needed to log something, it paniced
instead.

See: https://forum.rclone.org/t/rclone-serve-dlna-after-few-hours-of-idle-running-panic-logger-uninitialized-names/34835
2022-12-17 10:23:58 +00:00
Nick Craig-Wood
3a6f1f5cd7 filter: add metadata filters --metadata-include/exclude/filter and friends
Fixes #6353
2022-12-17 10:21:11 +00:00
Nick Craig-Wood
4a31961c4f filter: factor rules into its own file 2022-12-16 17:05:31 +00:00
Abdullah Saglam
7be9855a70 azureblob: implement --use-server-modtime
This patch implements --use-server-modtime for the Azureblob backend.

It does this by not reading the time from the metadata if the global
flag is set.
2022-12-15 15:58:36 +00:00
Nick Craig-Wood
6f8112ff67 Add Abdullah Saglam to contributors 2022-12-15 15:58:36 +00:00
Nick Craig-Wood
67fc227684 config: add config/setpath for setting config path via rc/librclone 2022-12-15 12:41:30 +00:00
Nick Craig-Wood
7edb4c0162 sftp: fix NewObject with leading /
This was breaking the use of operations/stat with remote with an
initial /

See: https://forum.rclone.org/t/rclone-rc-api-operations-stat-is-not-working-for-sftp-remotes/34560
2022-12-15 12:40:59 +00:00
Nick Craig-Wood
5db4493557 lib/http: fix race condition 2022-12-15 12:38:09 +00:00
Nick Craig-Wood
a85c0b0cc2 cmd/serve/httplib: remove as it is now replaced by lib/http 2022-12-15 12:38:09 +00:00
Nolan Woods
52443c2444 restic: refactor to use lib/http
Co-authored-by: Nick Craig-Wood <nick@craig-wood.com>
2022-12-15 12:38:09 +00:00
Nick Craig-Wood
4444d2d102 serve webdav: refactor to use lib/http 2022-12-15 12:38:09 +00:00
Nick Craig-Wood
08a1ca434b rcd: refactor rclone rc server to use lib/http 2022-12-15 12:38:09 +00:00
Nick Craig-Wood
a9ce86f9a3 lib/http: add UsingAuth method 2022-12-15 12:38:09 +00:00
Nick Craig-Wood
3167292c2f lib/http: remove unused Template from Config 2022-12-15 12:38:09 +00:00
Tom Mombourquette
ec7cc2b3c3 lib/http: Simplify server.go to export an http server rather than an interface
This also makes the implementation public.
2022-12-15 12:38:09 +00:00
Tom Mombourquette
2a2fcf1012 lib/http: rationalise names in test servers to be more consistent 2022-12-15 12:38:09 +00:00
Tom Mombourquette
6d62267227 serve http: support unix sockets and multiple listners
- add support for unix sockets (which skip the auth).
- add support for multiple listeners
- collapse unnecessary internal structure of lib/http so it can all be
  imported together
- moves files in sub directories of lib/http into the main lib/http
  directory and reworks the code that uses them.

See: https://forum.rclone.org/t/wip-rc-rcd-over-unix-socket/33619
Fixes: #6605
2022-12-15 12:38:09 +00:00
Nick Craig-Wood
dfd8ad2fff Add compiletest target to compile all the tests only 2022-12-15 12:38:09 +00:00
Nick Craig-Wood
43506f8086 test memory: read metadata if -M flag is specified 2022-12-15 12:37:19 +00:00
Nick Craig-Wood
ec3cee89d3 fstest: switch to port forwarding now Owncloud disallows wildcards
A recent security fix in the Owncloud container now causes it to
disallow wildcards in the OWNCLOUD_TRUSTED_DOMAINS setting.

This patch works around the problem by using port forwarding from the
host so we can keep the domain name constant.
2022-12-15 11:34:12 +00:00
Nick Craig-Wood
a171497a8b Add Jack to contributors 2022-12-15 11:34:12 +00:00
Jack
c6ad15e3b8 s3: make DigitalOcean name canonical 2022-12-14 16:35:05 +00:00
Jack
9a81885b51 s3: add DigitalOcean Spaces regions sfo3, fra1, syd1 2022-12-14 16:35:05 +00:00
Nick Craig-Wood
3d291da0f6 azureblob: fix directory marker detection after SDK upgrade
When the SDK was upgraded it started delivering metadata where the
keys were not in lower case as per the old SDK.

Rclone normalises the case of the keys for storage in the Object, but
the directory marker check was being done with the unnormalised keys
as it needs to be done before the Object is created.

This fixes the directory marker check to do a case insensitive compare
of the metadata keys.
2022-12-14 14:24:26 +00:00
Nick Craig-Wood
43bf177ff7 s3: fix excess memory usage when using versions
Before this change, we were taking the version ID straight from the
XML blob returned by the SDK and thus pinning the XML into memory
which bulked up the average memory per object from about 400 bytes to
4k.

Copying the string fixes the excess memory usage.
2022-12-14 14:24:26 +00:00
Nick Craig-Wood
c446651be8 Revert "s3: turn off list v2 support for Alibaba OSS since it does not work"
This reverts commit 4f386a1ccd.

It turns out that Alibaba OSS does support list v2 and the detection
code was wrong.

This means that users of the gov version of Alibaba will have to add
`list_version 1` to their config files.

See #6600
2022-12-14 14:24:26 +00:00
Nick Craig-Wood
6c407dbe15 s3: fix detection of listing routines which don't support v2 properly
In this commit

ab849b3613 s3: fix listing loop when using v2 listing on v1 server

The ContinuationToken was tested for existence, but it is the
NextContinuationToken that we are interested in.

See: #6600
2022-12-14 14:24:26 +00:00
albertony
5a59b49b6b drive: handle shared drives with leading/trailing space in name (related to #6618) 2022-12-14 10:18:12 +01:00
albertony
8b9f3bbe29 fspath: improved detection of illegal remote names starting with dash (related to #4261) 2022-12-14 10:18:12 +01:00
albertony
8e6a469f98 fspath: allow unicode numbers and letters in remote names
Previously it was limited to plain ASCII (0-9, A-Z, a-z).

Implemented by adding \p{L}\p{N} alongside the \w in the regex,
even though these overlap it means we can be sure it is 100%
backwards compatible.

Fixes #6618
2022-12-12 13:24:32 +00:00
albertony
f650a543ef docs: remote names may not start or end with space 2022-12-12 13:24:32 +00:00
albertony
683178a1f4 fspath: change remote name regex to not match when leading/trailing space 2022-12-12 13:24:32 +00:00
albertony
3937233e1e fspath: refactor away unnecessary constant for remote name regex 2022-12-12 13:24:32 +00:00
albertony
c571200812 fspath: remove unused capture group in remote name regex 2022-12-12 13:24:32 +00:00
albertony
04a663829b fspath: remove duplicate start-of-line anchor in remote name regex 2022-12-12 13:24:32 +00:00
albertony
6b4a2c1c4e fspath: remove superfluous underscore covered by existing word character class in remote name regex 2022-12-12 13:24:32 +00:00
albertony
f73be767a4 fspath: add unit tests for remote names with leading dash 2022-12-12 13:24:32 +00:00
albertony
4120dffcc1 fspath: add unit tests for remote names with space 2022-12-12 13:24:32 +00:00
Nick Craig-Wood
53ff5bb205 build: Update golang.org/x/net/http2 to fix GO-2022-1144
An attacker can cause excessive memory growth in a Go server accepting
HTTP/2 requests. HTTP/2 server connections contain a cache of HTTP
header keys sent by the client. While the total number of entries in
this cache is capped, an attacker sending very large keys can cause
the server to allocate approximately 64 MiB per open connection.
2022-12-12 12:49:12 +00:00
Nick Craig-Wood
397f428c48 Add vanplus to contributors 2022-12-12 12:49:12 +00:00
vanplus
c5a2c9b046 onedrive: document workaround for shared with me files 2022-12-12 12:04:28 +00:00
Kaloyan Raev
b98d7f6634 storj: implement server side Copy 2022-12-12 12:02:38 +00:00
Ole Frost
beea4d5119 lib/oauthutil: Improved usability of config flows needing web browser
The config question "Use auto config?" confused many users and lead to
recurring forum posts from users that were unaware that they were using
a remote or headless machine.

This commit makes the question and possible options more descriptive
and precise.

This commit also adds references to the guide on remote setup in the
documentation of backends using oauth as primary authentication.
2022-12-09 14:41:05 +00:00
Eng Zer Jun
8e507075d1 test: replace defer cleanup with t.Cleanup
Reference: https://pkg.go.dev/testing#T.Cleanup
Signed-off-by: Eng Zer Jun <engzerjun@gmail.com>
2022-12-09 14:38:05 +00:00
Nick Craig-Wood
be783a1856 dlna: properly attribute code used from https://github.com/anacrolix/dms
Fixes #4101
2022-12-09 14:27:10 +00:00
Nick Craig-Wood
450c366403 s3: fix nil pointer exception when using Versions
This was caused by

a9bd0c8de6 s3: reduce memory consumption for s3 objects

Which assumed that the StorageClass would always be set, but it isn't
set for Versions.
2022-12-09 12:23:51 +00:00
Matthew Vernon
1dbdc48a77 WASM: comply with wasm_exec.js licence terms
The BSD-style license that Go uses requires the license to be included
with the source distribution; so add it as LICENSE.wasmexec (to avoid
confusion with the other licenses in rclone) and note the location of
the license in wasm_exec.js itself.
2022-12-07 15:25:46 +00:00
Nick Craig-Wood
d7cb17848d azureblob: revamp authentication to include all methods and docs
The updates the authentication to include

- Auth from the environment
    1. Environment Variables
    2. Managed Service Identity Credentials
    3. Azure CLI credentials (as used by the az tool)
- Account and Shared Key
- SAS URL
- Service principal with client secret
- Service principal with certificate
- User with username and password
- Managed Service Identity Credentials

And rationalises the auth order.
2022-12-06 15:07:01 +00:00
Nick Craig-Wood
f3c8b7a948 azureblob: add --azureblob-no-check-container to assume container exists
Normally rclone will check the container exists before uploading if it
hasn't listed the container yet.

Often rclone will be running with a limited set of permissions which
means rclone can't create the container anyway, so this stops the
check.

This will save a transaction.
2022-12-06 15:07:01 +00:00
Nick Craig-Wood
914fbe242c azureblob: ignore AuthorizationFailure when trying to create a create a container
If we get AuthorizationFailure when trying to create a container, then
assume the container has already been created
2022-12-06 15:07:01 +00:00
Nick Craig-Wood
f746b2fe85 azureblob: port old authentication methods to new SDK
Co-authored-by: Brad Ackerman <brad@facefault.org>
2022-12-06 15:07:01 +00:00
Nick Craig-Wood
a131da2c35 azureblob: Port to new SDK
This commit switches from using the old Azure go modules

    github.com/Azure/azure-pipeline-go/pipeline
    github.com/Azure/azure-storage-blob-go/azblob
    github.com/Azure/go-autorest/autorest/adal

To the new SDK

    github.com/Azure/azure-sdk-for-go/

This stops rclone using deprecated code and enables the full range of
authentication with Azure.

See #6132 and #5284
2022-12-06 15:07:01 +00:00
Nick Craig-Wood
60e4cb6f6f Add MohammadReza to contributors 2022-12-06 15:06:51 +00:00
MohammadReza
0a8b1fe5de s3: add Liara LOS to provider list 2022-12-06 12:25:23 +00:00
asdffdsazqqq
b24c83db21 restic: fix typo in docs 'remove' should be 'remote' 2022-12-06 12:14:25 +00:00
Nick Craig-Wood
4f386a1ccd s3: turn off list v2 support for Alibaba OSS since it does not work
See: #6600
2022-12-06 12:11:21 +00:00
Nick Craig-Wood
ab849b3613 s3: fix listing loop when using v2 listing on v1 server
Before this change, rclone would enter a listing loop if it used v2
listing on a v1 server and the list exceeded 1000 items.

This change detects the problem and gives the user a helpful message.

Fixes #6600
2022-12-06 12:11:21 +00:00
Nick Craig-Wood
10aee3926a Add Kevin Verstaen to contributors 2022-12-06 12:11:21 +00:00
Nick Craig-Wood
4583b61e3d Add Erik Agterdenbos to contributors 2022-12-06 12:11:06 +00:00
Nick Craig-Wood
483e9e1ee3 Add ycdtosa to contributors 2022-12-06 12:11:06 +00:00
Kevin Verstaen
c2dfc3e5b3 fs: Add global flag '--color' to control terminal colors
* fs: add TerminalColorMode type
* fs: add new config(flags) for TerminalColorMode
* lib/terminal: use TerminalColorMode to determine how to handle colors
* Add documentation for '--terminal-color-mode'
* tree: remove obsolete --color replaced by global --color

This changes the default behaviour of tree. It now displays colors by
default instead of only displaying them when the flag -C/--color was
active. Old behaviour (no color) can be achieved by setting --color to
'never'.

Fixes: #6604
2022-12-06 12:07:06 +00:00
Erik Agterdenbos
a9bd0c8de6 s3: reduce memory consumption for s3 objects
Copying the storageClass string instead of using a pointer to the original string.
This prevents the Go garbage collector from keeping large amounts of
XMLNode structs and references in memory, created by xmlutil.XMLToStruct()
from the aws-sdk-go.
2022-12-05 23:07:08 +00:00
Anthony Pessy
1628ca0d46 ftp: Improve performance to speed up --files-from and NewObject
This commit uses the MLST command (where available) to get the status
for single files rather than listing the parent directory and looking
for the file. This makes actions such as using `--files-from` much quicker.

* use getEntry to lookup remote files when supported
*  findItem now expects the full path directly

It makes the expected argument similar to the getInfo method, the
difference now is that one is returning a FileInfo whereas
the other is returning an ftp Entry.

Fixes #6225

Co-authored-by: Nick Craig-Wood <nick@craig-wood.com>
2022-12-05 16:19:04 +00:00
albertony
313493d51b docs: remove minimum versions from command pages of pre v1 commands 2022-12-03 18:58:55 +01:00
albertony
6d18f60725 docs: add minimum versions to the command pages 2022-12-03 18:58:55 +01:00
albertony
d74662a751 docs: add badge showing version introduced and experimental/beta/deprecated status to command doc pages 2022-12-03 18:58:55 +01:00
albertony
d05fd2a14f docs: add badge for experimental/beta/deprecated status next to version in backend docs 2022-12-03 18:58:55 +01:00
albertony
097be753ab docs: minor cleanup of headers in backend docs 2022-12-03 18:58:55 +01:00
ycdtosa
50c9678cea ftp: update help text of implicit/explicit TLS options to refer to FTPS instead of FTP 2022-11-29 14:58:46 +01:00
eNV25
7672cde4f3 cmd/ncdu: use negative values for key runes
The previous version used values after the maximum Unicode code-point
to encode a key. This could lead to an overflow since a key is a int16,
a rune is int32 and the maximum Unicode code-point is larger than int16.

A better solution is to simply use negative runes for keys.
2022-11-28 10:51:11 +00:00
eNV25
a4c65532ea cmd/ncdu: use tcell directly instead of the termbox wrapper
Following up on 36add0af, which switched from termbox
to tcell's termbox wrapper.
2022-11-25 14:42:19 +00:00
Nick Craig-Wood
46b080c092 vfs: Fix IO Error opening a file with O_CREATE|O_RDONLY in --vfs-cache-mode not full
Before this fix, opening a file with `O_CREATE|O_RDONLY` caused an IO error to
be returned when using `--vfs-cache-mode off` or `--vfs-cache-mode writes`.

This was because the file was opened with read intent, but the `O_CREATE`
implies write intent to create the file even though the file is opened
`O_RDONLY`.

This fix sets write intent for the file if `O_CREATE` is passed in which fixes
the problem for all the VFS cache modes.

It also extends the exhaustive open flags testing to `--vfs-cache-mode writes`
as well as `--vfs-cache-mode full` which would have caught this problem.

See: https://forum.rclone.org/t/i-o-error-trashing-file-on-sftp-mount/34317/
2022-11-24 17:04:36 +00:00
Nick Craig-Wood
0edf6478e3 Add Nathaniel Wesley Filardo to contributors 2022-11-24 17:04:36 +00:00
Nathaniel Wesley Filardo
f7cdf318db azureblob: support simple "environment credentials"
As per
https://learn.microsoft.com/en-us/dotnet/api/azure.identity.environmentcredential?view=azure-dotnet

This supports only AZURE_CLIENT_SECRET-based authentication, as with the
existing service principal support.

Co-authored-by: Nick Craig-Wood <nick@craig-wood.com>
2022-11-24 12:06:14 +00:00
Nathaniel Wesley Filardo
6f3682c12f azureblob: make newServicePrincipalTokenRefresher take parsed principal structure 2022-11-24 12:06:14 +00:00
Nick Craig-Wood
e3d593d40c build: update dependencies 2022-11-24 11:05:54 +00:00
Nick Craig-Wood
83551bb02e cmount: update cgofuse for FUSE-T support for mounting volumes on Mac
See: https://forum.rclone.org/t/fr-fuse-t-support-for-mounting-volumes-on-mac/33110/
2022-11-24 10:51:16 +00:00
Nick Craig-Wood
430bf0d5eb crypt: fix compress wrapping crypt giving upload errors
Before this fix a chain compress -> crypt -> s3 was giving errors

    BadDigest: The Content-MD5 you specified did not match what we received.

This was because the crypt backend was encrypting the underlying local
object to calculate the hash rather than the contents of the metadata
stream.

It did this because the crypt backend incorrectly identified the
object as a local object.

This fixes the problem by making sure the crypt backend does not
unwrap anything but fs.OverrideRemote objects.

See: https://forum.rclone.org/t/not-encrypting-or-compressing-before-upload/32261/10
2022-11-21 08:02:09 +00:00
Nick Craig-Wood
dd71f5d968 fs: move operations.NewOverrideRemote to fs.NewOverrideRemote 2022-11-21 08:02:09 +00:00
albertony
7db1c506f2 smb: fix issue where spurious dot directory is created 2022-11-20 17:12:02 +00:00
Nick Craig-Wood
959cd938bc docs: Add minimum versions to all the backend pages and some of the other pages 2022-11-18 14:41:24 +00:00
Nick Craig-Wood
03b07c280c Changelog updates from Version v1.60.1 2022-11-17 16:32:25 +00:00
Nick Craig-Wood
705e8f2fe0 smb: fix Failed to sync: context canceled at the end of syncs
Before this change we were putting connections into the connection
pool which had a local context in.

This meant that when the operation had finished the context was
cancelled and the connection became unusable.

See: https://forum.rclone.org/t/failed-to-sync-context-canceled/34017/
2022-11-16 10:55:25 +00:00
Nick Craig-Wood
591fc3609a vfs: fix deadlock caused by cache cleaner and upload finishing
Before this patch a deadlock could occur if the cache cleaner was
running when an object upload finished.

This fixes the problem by delaying marking the object as clean until
we have notified the VFS layer. This means that the cache cleaner
won't consider the object until **after** the VFS layer has been
notified, thus avoiding the deadlock.

See: https://forum.rclone.org/t/rclone-mount-deadlock-when-dir-cache-time-strikes/33486/
2022-11-15 18:01:36 +00:00
Nick Craig-Wood
b4a3d1b9ed Add asdffdsazqqq to contributors 2022-11-15 18:01:36 +00:00
asdffdsazqqq
84219b95ab docs: faq: how to use a proxy server that requires a username and password - fixes #6565 2022-11-15 17:58:43 +00:00
Nick Craig-Wood
2c78f56d48 webdav: fix Move/Copy/DirMove when using -server-side-across-configs
Before this change, when using -server-side-across-configs rclone
would direct Move/Copy/DirMove to the destination server.

However this should be directed to the source server. This is a little
unclear in the RFC, but the name of the parameter "Destination:" seems
clear and this is how dCache and Rucio have implemented it.

See: https://forum.rclone.org/t/webdav-copy-request-implemented-incorrectly/34072/
2022-11-15 09:51:30 +00:00
Nick Craig-Wood
a61d219bcd local: fix -L/--copy-links with filters missing directories
In this commit

8d1fff9a82 local: obey file filters in listing to fix errors on excluded files

We introduced the concept of local backend filters.

Unfortunately the filters were being applied before we had resolved
the symlink to point to a directory. This meant that symlinks pointing
to directories were filtered out when they shouldn't have been.

This was fixed by moving the filter check until after the symlink had
been resolved.

See: https://forum.rclone.org/t/copy-links-not-following-symlinks-on-1-60-0/34073/7
2022-11-14 18:03:40 +00:00
Nick Craig-Wood
652d3cdee4 vfs: windows: fix slow opening of exe files by not truncating files when not necessary
Before this change we truncated files in the backing store regardless
of whether we needed to or not.

After, we check to see if the file is the right size and don't
truncate if it is.

Apparently Windows Defender likes to check executables each time they
are modified, and truncating a file to its existing size is enough to
trigger the Windows Defender scan. This was causing a big slowdown for
operations which opened and closed the file a lot, such as looking at
properties on an executable.

See: https://forum.rclone.org/t/for-mount-sftp-why-right-click-on-exe-file-is-so-slow-until-it-freezes/33830
2022-11-14 17:05:51 +00:00
Nick Craig-Wood
bb1fc5b86d Add Kamui to contributors 2022-11-14 17:05:51 +00:00
Kamui
efd3c6449b rcserver: avoid generating default credentials with htpasswd - fixes #4839 2022-11-14 15:26:44 +00:00
Nick Craig-Wood
0ac5795f8c fs: make all duration flags take y, M, w, d etc suffixes
Fixes #6556
2022-11-14 15:13:49 +00:00
Nick Craig-Wood
2f77651f64 Add rkettelerij to contributors 2022-11-14 15:13:49 +00:00
Nick Craig-Wood
8daacc2b99 Add techknowlogick to contributors 2022-11-14 15:13:49 +00:00
rkettelerij
87fa9f8e46 azureblob: Add support for custom upload headers 2022-11-14 15:12:28 +00:00
albertony
1392793334 sftp: auto-detect shell type for fish
Fish is different from POSIX-based Unix shells such as bash,
and a bracketed variable references like we use for the
auto-detection echo command is not supported. The command
will return with zero exit code but produce no output on
stdout. There is a message on stderr, but we don't log it
due to the zero exit code:

fish: Variables cannot be bracketed. In fish, please use {$ShellId}.

Fixes #6552
2022-11-11 15:32:44 +00:00
techknowlogick
0e427216db s3: Add additional Wasabi locations 2022-11-11 14:39:12 +00:00
Anagh Kumar Baranwal
0c56c46523 rc: Add commands to set GC Percent & Memory Limit (1.19+)
Signed-off-by: Anagh Kumar Baranwal <6824881+darthShadow@users.noreply.github.com>
2022-11-10 12:07:18 +00:00
Nick Craig-Wood
617c5d5e1b rcat: preserve metadata when Copy falls back to Rcat
Before this change if we copied files of unknown size, then they lost
their metadata.

This was particularly noticeable using --s3-decompress.

This change adds metadata to Rcat and RcatSized and changes Copy to
pass the metadata in when it calls Rcat for an unknown sized input.

Fixes #6546
2022-11-10 12:04:35 +00:00
Nick Craig-Wood
ec2024b907 fstest: use WithMetadata / WithMimeType 2022-11-10 12:04:35 +00:00
Nick Craig-Wood
458845ce89 fs/object: add WithMetadata and WithMimetype to static and memory objects 2022-11-10 12:04:35 +00:00
Nick Craig-Wood
57bde20acd Add Aaron Gokaslan to contributors 2022-11-10 12:04:35 +00:00
Aaron Gokaslan
b0248e8070 s3: fix for unchecked err value in s3 listv2 2022-11-10 11:52:59 +00:00
Nick Craig-Wood
b285efb476 mailru: allow timestamps to be before the epoch 1970-01-01
Fixes #6547
2022-11-10 11:27:01 +00:00
Nick Craig-Wood
be6f29930b dedupe: make dedupe obey the filters
See: https://forum.rclone.org/t/dial-tcp-lookup-api-pcloud-com-no-such-host/33910/
2022-11-10 09:56:02 +00:00
Nick Craig-Wood
653bc23728 dedupe: count Checks in the stats while scanning for duplicates
This allows the user to see rclone has not hung.

See: https://forum.rclone.org/t/dial-tcp-lookup-api-pcloud-com-no-such-host/33910/
2022-11-10 09:56:02 +00:00
Nick Craig-Wood
47b04580db accounting: make it so we can account directories as well as files 2022-11-10 09:56:02 +00:00
Nick Craig-Wood
919e28b8bf lib/cache: fix alias backend shutting down too soon
Before this patch, when an alias backend was created it would be
renamed to be canonical and in the process Shutdown would be called on
it. This was particularly noticeable with the dropbox backend which
gave this error when uploading files after the backend was Shutdown.

    Failed to copy: upload failed: batcher is shutting down

This patch fixes the cache Rename code not to finalize objects if the
object that is being overwritten is the same as the existing object.

See: https://forum.rclone.org/t/upload-failed-batcher-is-shutting-down/33900
2022-11-09 16:29:23 +00:00
Nick Craig-Wood
3a3bc5a1ae mailru: note that an app password is now needed - fixes #6398 2022-11-08 20:33:11 +00:00
Nick Craig-Wood
133c006c37 Add Roel Arents to contributors 2022-11-08 20:33:11 +00:00
Roel Arents
e455940f71 azureblob: allow emulator account/key override 2022-11-08 20:24:06 +00:00
Nick Craig-Wood
65528fd009 docs: remove link to rclone slack as it is no longer supported 2022-11-08 16:11:34 +00:00
Nick Craig-Wood
691159fe94 s3: allow Storj to server side copy since it seems to work now - fixes #6550 2022-11-08 16:05:24 +00:00
Nick Craig-Wood
09858c0c5a Add Arnie97 to contributors 2022-11-08 16:05:24 +00:00
Nick Craig-Wood
5fd0abb2b9 Add x3-apptech to contributors 2022-11-08 16:05:24 +00:00
Arnie97
36c37ffec1 backend/http: rename stat to decodeMetadata 2022-11-08 13:04:17 +00:00
Arnie97
6a5b7664f7 backend/http: support content-range response header 2022-11-08 13:04:17 +00:00
Arnie97
ebac854512 backend/http: do not update object size based on range requests 2022-11-08 13:04:17 +00:00
Arnie97
cafce96185 backend/http: parse get responses when no_head is set 2022-11-08 13:04:17 +00:00
João Henrique Franco
92ffcf9f86 wasm: fix walltime link error by adding up-to-date wasm_exec.js
Solves link error while running rclone's wasm version. Go's `walltime1` function was renamed to `walltime`. This commit updates wasm_exec.js with the new name.
2022-11-07 12:13:23 +00:00
albertony
64cdbb67b5 ncdu: add support for modification time 2022-11-07 11:57:44 +00:00
albertony
528fc899fb ncdu: fallback to sort by name also for sort by average size 2022-11-07 11:57:44 +00:00
x3-apptech
d452f502c3 cmd: Enable SIGINFO (Ctrl-T) handler on FreeBSD, NetBSD, OpenBSD and Dragonfly BSD 2022-11-07 11:45:04 +00:00
albertony
5d6b8141ec Replace deprecated ioutil
As of Go 1.16, the same functionality is now provided by package io or
package os, and those implementations should be preferred in new code.
2022-11-07 11:41:47 +00:00
albertony
776e5ea83a docs: fix character that was incorrectly interpreted as markdown 2022-11-07 08:59:40 +01:00
albertony
c9acc06a49 Add Clément Notin to contributors 2022-11-07 08:51:49 +01:00
Clément Notin
a2dca02594 docs: fix character that was incorrectly interpreted as markdown 2022-11-07 08:50:21 +01:00
Joda Stößer
210331bf61 docs: fix typo remove in rclone_serve_restic command 2022-11-07 08:46:05 +01:00
Nick Craig-Wood
5b5fdc6bc5 s3: add provider quirk --s3-might-gzip to fix corrupted on transfer: sizes differ
Before this change, some files were giving this error when downloaded
from Cloudflare and other providers.

    ERROR corrupted on transfer: sizes differ NNN vs MMM

This is because these providers auto gzips the object when rclone
wasn't expecting it to. (AWS does not gzip objects without their being
uploaded gzipped).

This patch adds a quirk to for fix the problem and a flag to control
it. The quirk `might_gzip` is set to `true` for all providers except
AWS.

See: https://forum.rclone.org/t/s3-error-corrupted-on-transfer-sizes-differ-nnn-vs-mmm/33694/
Fixes: #6533
2022-11-04 16:53:32 +00:00
Nick Craig-Wood
0de74864b6 Add dgouju to contributors 2022-11-04 16:53:32 +00:00
dgouju
7042a11875 sftp: add configuration options to set ssh Ciphers / MACs / KeyExchange 2022-11-03 17:11:28 +00:00
Nick Craig-Wood
028832ce73 s3: if bucket or object ACL is empty string then don't add X-Amz-Acl: header - fixes #5730
Before this fix it was impossible to stop rclone generating an
X-Amx-Acl: header which is incompatible with GCS with uniform access
control and is generally deprecated at AWS.
2022-11-03 17:06:24 +00:00
Philip Harvey
c7c9356af5 s3: stop setting object and bucket ACL to "private" if it is an empty string #5730 2022-11-03 17:06:24 +00:00
Nick Craig-Wood
3292c112c5 Add Philip Harvey to contributors 2022-11-03 17:06:24 +00:00
Nick Craig-Wood
126d71b332 Add Anthony Pessy to contributors 2022-11-03 17:06:24 +00:00
Nick Craig-Wood
df9be72a82 Add coultonluke to contributors 2022-11-03 17:06:24 +00:00
Nick Craig-Wood
6aa8f7409a Add Samuel Johnson to contributors 2022-11-03 17:06:24 +00:00
Anthony Pessy
10c884552c s3: use different strategy to resolve s3 region
The API endpoint GetBucketLocation requires
top level permission.

If we do an authenticated head request to a bucket, the bucket location will be returned in the HTTP headers.

Fixes #5066
2022-11-02 11:48:08 +00:00
albertony
2617610741 docs: add direct download link for windows arm64 2022-10-31 21:14:10 +01:00
coultonluke
53dd174f3d docs: corrected download links in windows install docs 2022-10-31 21:09:53 +01:00
albertony
65987f5970 lib/file: improve error message for create dir on non-existent network host on windows (#6420) 2022-10-28 21:00:22 +02:00
Manoj Ghosh
1fc864fb32 oracle-object-storage: doc fix
See #6521
2022-10-28 20:32:17 +02:00
albertony
22abcc9fd2 build: update golang.org/x/net dependency
This fixes vulnerability GO-2022-0969 reported by govulncheck:

HTTP/2 server connections can hang forever waiting for a clean
shutdown that was preempted by a fatal error. This condition can
be exploited by a malicious client to cause a denial of service.

Call stacks in your code:
Error: cmd/serve/restic/restic.go:150:22: github.com/rclone/rclone/cmd/serve/restic.init$1$1 calls golang.org/x/net/http2.Server.ServeConn

Found in: golang.org/x/net/http2@v0.0.0-20220805013720-a33c5aa5df48
Fixed in: golang.org/x/net/http2@v0.0.0-20220906165146-f3363e06e74c
More info: https://pkg.go.dev/vuln/GO-2022-0969
2022-10-26 12:59:31 +02:00
albertony
178cf821de build: add vulnerability testing using govulncheck 2022-10-26 12:59:31 +02:00
albertony
f4a571786c local: clean absolute paths - fixes #6493 2022-10-25 21:09:56 +02:00
albertony
c0a8ffcbef build: setup-go v3 improved semver notation 2022-10-25 20:25:39 +02:00
albertony
76eeca9eae build: setup-go v3 dropped the stable input 2022-10-25 20:25:39 +02:00
Samuel Johnson
8114744bce docs: Update faq.md with bisync
Updated FAQ to clarify that experimental bi-sync is now available.
2022-10-23 11:15:09 +01:00
Nick Craig-Wood
db5d582404 Start v1.61.0-DEV development 2022-10-21 16:15:53 +01:00
Nick Craig-Wood
01dbbff62e Version v1.60.0 2022-10-21 15:06:08 +01:00
Nick Craig-Wood
afa61e702c docs: remove hosted by tag as server has moved 2022-10-21 12:49:10 +01:00
Nick Craig-Wood
546dc82793 Add Robert Newson to contributors 2022-10-21 12:49:10 +01:00
Nick Craig-Wood
d9c4d95ab3 Add Tom Mombourquette to contributors 2022-10-21 12:49:10 +01:00
Nick Craig-Wood
0fb1b75a02 Add Manoj Ghosh to contributors 2022-10-21 12:49:10 +01:00
Tom Mombourquette
38f1f5b177 rc: Fix mount/listmounts not returning the full Fs entered in mount/mount 2022-10-21 12:48:27 +01:00
Ole Frost
0d2a62a927 docs: Describe connection strings in alias backend 2022-10-21 12:47:51 +01:00
Manoj Ghosh
b75c207208 oracle-object-storage: overview, docs update 2022-10-21 12:47:03 +01:00
Ole Frost
dff223f195 install.sh: fix arm-v7 download 2022-10-21 12:35:58 +01:00
Robert Newson
d2fef05fe4 httplib: Add --xxx-min-tls-version option to select minimum tls values for HTTP servers
This allows administrators to disable TLS 1.0 and 1.1, for example.

Example:

rclone rcd --rc-min-tls-version=tls1.2 --rc-cert <cert> --rc-key <key>
2022-10-19 17:13:12 +01:00
Tom Mombourquette
188b9f8cf1 rc: corrected mount/unmountall help msg and title 2022-10-17 17:34:48 +01:00
Manoj Ghosh
daf3162bcf oracle-object-storage: minor docs update 2022-10-17 17:08:44 +01:00
Nick Craig-Wood
5e59e7f442 ftp: Fix hang when using ExplicitTLS to certain servers.
It was discovered that doing the tls Handshake immediately on
connection causes some FTP servers (proftpd and pureftpd) to hang.

This imports a fix for it by temporarily hard forking jlaffaye/ftp to
include the fix submitted as a pull request.

See: https://forum.rclone.org/t/rclone-ftps-explicit-rclone-touch-empty-files-proftpd-unable-to-build-data-connection-operation-not-permitted/22522
See: https://github.com/rclone/rclone/issues/6426#issuecomment-1243993039
See: https://github.com/jlaffaye/ftp/pull/283
See: https://github.com/jlaffaye/ftp/issues/282
2022-10-14 12:10:03 +01:00
Nick Craig-Wood
fce22c0065 s3: add --s3-no-system-metadata to suppress read and write of system metadata
See: https://forum.rclone.org/t/problems-with-content-disposition-and-backblaze-b2-using-s3/33292/
2022-10-14 11:12:04 +01:00
Nick Craig-Wood
bb3272e837 Add Bachue Zhou to contributors 2022-10-14 11:11:56 +01:00
Nick Craig-Wood
cb5b5635c7 Add Manoj Ghosh to contributors 2022-10-14 11:11:56 +01:00
Bachue Zhou
66ed0ca726 s3: add Qiniu KODO to s3 provider list - fixes #6195 2022-10-13 15:49:22 +01:00
Manoj Ghosh
b16e50851a Add a native backend for oracle object storage - fixes #6299 2022-10-13 13:04:56 +01:00
Nick Craig-Wood
90d23139f6 s3: drop binary metadata with an ERROR message
Before this change, rclone would attempt to upload metadata with
binary contents which fail to be uploaded by net/http.

This checks the keys and values for validity as http header values
before uploading.

See: https://forum.rclone.org/t/invalid-metadata-key-names-result-in-a-failure-to-transfer-xattr-results-in-failure-to-upload-net-http-invalid-header-field-value-for-x-amz-meta-samba-pai/33406/
2022-10-13 12:00:45 +01:00
Nick Craig-Wood
5ea9398b63 swift: add --swift-no-large-objects to reduce HEAD requests
Supplying the flag --swift-no-large-objects is a promise to the swift
backend that there are no dynamic or static large objects stored.

Using that knowledge rclone can speed its operations up reducing the
number of HEAD requests.

See: https://forum.rclone.org/t/handling-or-not-of-large-objects-in-swift/33389/
See: https://forum.rclone.org/t/swift-sync-checksum-calls-head-on-every-object-so-is-very-slow/30322
2022-10-13 11:58:19 +01:00
Isaac Aymerich
3f804224f4 rc: validate Daemon option is not set when mounting a volume via RC - fixes #6469 2022-10-12 12:07:48 +01:00
Nick Craig-Wood
cf0bf159ab s3: try to keep the maximum precision in ModTime with --user-server-modtime
Before this change if --user-server-modtime was in use the ModTime
could change for an object as we receive it accurate to the nearest ms
in listings, but only accurate to the nearest second in HEAD and GET
requests.

Normally AWS returns the milliseconds as .000 in listings, but if
versions are in use it may not. Storj S3 also seems to return
milliseconds.

This patch tries to keep the maximum precision in the last modified
time, so it doesn't update a last modified time with a truncated
version if the times were the same to the nearest second.

See: https://forum.rclone.org/t/cache-fingerprint-miss-behavior-leading-to-false-positive-stalen-cache/33404/
2022-10-12 09:18:10 +01:00
Lesmiscore
6654b66114 union: propagate SlowHash feature 2022-10-10 07:58:01 +01:00
Nick Craig-Wood
9bf78d0373 local: fix "Failed to read metadata: function not implemented" on old Linux kernels
Before this change rclone used statx() to read the metadata for files
from the local filesystem when `-M` was in use.

Unfortunately statx() was only introduced in kernel 4.11 which was
released in April 2017 so there are current systems (eg Centos 7)
still on kernel versions which don't support statx().

This patch checks to see if statx() is available and if it isn't, it
falls back to using fstatat() which was introduced in Linux 2.6.16
which is guaranteed for all Go versions.

See: https://forum.rclone.org/t/metadata-from-linux-local-s3-failed-to-copy-failed-to-read-metadata-from-source-object-function-not-implemented/33233/
2022-10-07 14:14:16 +01:00
Nick Craig-Wood
0c1fb8b2b7 Add YanceyChiew to contributors 2022-10-07 14:14:08 +01:00
YanceyChiew
966654e23a dlna: run assets_generate to make new icons 2022-10-06 16:59:51 +01:00
YanceyChiew
13b65104eb dlna: add SSDP AnnounceInterval flag option
The current default AnnounceInterval is too short, causing the
multicast domain to be flooded with NOTIFY announcements,
which may prevent other dlna devices from sleeping.

This change allows users to set the announcement interval,
and it's default value also increased to 12 minutes.

Even within the interval, rclone can still passively respond to
M-SEARCH requests from other devices.
2022-10-06 16:59:51 +01:00
YanceyChiew
4a35aff33c dlna: add verification of addresses
Verify the http service listening address and the SSDP server
announcement address to prevent accidental listening of IPv6 addresses
that do not support dlna yet and may be globally accessible.

Unlistened addresses on the interface will also be filtered out of the
SSDP announcement to avoid misleading other services in the multicast domain.
2022-10-06 16:59:51 +01:00
YanceyChiew
09b6d939f5 dlna: add support for more external subtitle 2022-10-06 16:59:51 +01:00
Nick Craig-Wood
4e79de106a hubic: remove backend as service has now shutdown - fixes #6481 2022-10-05 13:33:37 +01:00
Nick Craig-Wood
b437d9461a Add Isaac Aymerich to contributors 2022-10-05 13:33:29 +01:00
Nick Craig-Wood
910af597a1 Add Lorenzo Milesi to contributors 2022-10-05 13:33:29 +01:00
Nick Craig-Wood
c10965ecfb Add Dimitri Papadopoulos Orfanos to contributors 2022-10-05 13:33:29 +01:00
albertony
5efb880772 Remove LICENSE 2022-10-04 15:40:37 +02:00
albertony
6c3b7d5820 Create LICENSE 2022-10-04 15:38:58 +02:00
Isaac Aymerich
c5109408c0 rc: handle external unmount when mounting
Before this change, if the a mount was created via the rc but unmounted
externally with `fusermount -u` say, rclone would still believe the mount
was active when it wasn't.
2022-10-03 11:24:58 +01:00
Marco Molteni
a3c06b9bbe docs/content: remove duplicate Scaleway C14 Glacier
Scaleway S3/C14 is now called S3/Glacier. Since Glacier is already
mentioned in the Rclone Scaleway section, let's just remove this
entry from here.
2022-10-02 21:58:16 +01:00
Lesmiscore
2aa264b33c smb: backend to support SMB - fixes #2042 2022-09-30 16:10:57 +01:00
albertony
4e078765f9 docs: improve description of make command in install docs 2022-09-28 16:14:12 +02:00
albertony
7fbc928a19 docs: remove "After" in systemd mount example
See #6459
2022-09-26 19:14:10 +02:00
Lorenzo Milesi
27096323db docs: remove "After" in automount example
According to [systemd.automount](https://www.freedesktop.org/software/systemd/man/systemd.automount.html) manual

> Note that automount units are separate from the mount itself, so you should 
> not set After= or Requires= for mount dependencies here. 
> For example, you should not set After=network-online.target or 
> similar on network filesystems. Doing so may result in an ordering cycle.
2022-09-26 19:11:29 +02:00
Dimitri Papadopoulos Orfanos
7e547822d6 build: update GitHub actions to latest versions 2022-09-19 19:51:07 +01:00
Nick Craig-Wood
67625b1dbd ftp: increase timeouts on tests as they were failing locally 2022-09-19 19:45:52 +01:00
Nick Craig-Wood
88086643f7 ftp: adapt to library changes to fix connection errors #6426
In https://github.com/jlaffaye/ftp/commit/212daf295f the upstream FTP
library changed the way adding your own dialer works which meant that
connections when using explicit FTP were failing.

This patch reworks our connection code to bring it into the
expectations of the library.
2022-09-18 11:31:11 +01:00
Nick Craig-Wood
5f13d84135 compress: add extra debugging in case we have a repeat of #6434 2022-09-18 11:31:11 +01:00
Nick Craig-Wood
07efdb55fa compress: fix error handling to not use or return nil objects #6434 2022-09-18 11:31:11 +01:00
Nick Craig-Wood
fb6ddd680c compress: fix crash due to nil metadata #6434
Before this fix, if an error ocurred reading the metadata, it could be
set as nil and then used, causing a crash.

This fix changes the readMetadata function so it returns an error, and
the error is always set if the metadata returned is nil.
2022-09-18 11:31:11 +01:00
Nick Craig-Wood
bc09105d2e Add Richard Bateman to contributors 2022-09-18 11:31:11 +01:00
Richard Bateman
4f374bc264 s3: add --s3-sse-customer-key-base64 to supply keys with binary data
Fixes #6400
2022-09-17 17:28:44 +01:00
Nick Craig-Wood
1c99661d8c onedrive: disable change notify in China region since it is not supported
Fixes #6444
2022-09-16 16:57:29 +01:00
Nick Craig-Wood
04b54bbb1e Add Alexander Knorr to contributors 2022-09-16 16:57:23 +01:00
Nick Craig-Wood
90cda2d6c2 Add Dmitry Deniskin to contributors 2022-09-16 16:57:23 +01:00
Nick Craig-Wood
dbd9ce78e6 Add Øyvind Heddeland Instefjord to contributors 2022-09-16 16:57:23 +01:00
Nick Craig-Wood
cbc18e2693 docs: update install docs to make more consistent
This also adds repology badges where appropriate to show versions in
external repositories.
2022-09-16 16:56:00 +01:00
Alexander Knorr
67c675d7ad docs: add cholateley package manager to install instructions 2022-09-15 16:12:39 +01:00
Dmitry Deniskin
c080b39e47 s3: add support for IONOS Cloud Storage 2022-09-15 16:04:34 +01:00
Nick Craig-Wood
8504da496b Changelog updates from Version v1.59.2 2022-09-15 11:57:07 +01:00
Lesmiscore
67240bd541 sftp: fix directory creation races
If mkdir fails then before this change it would have thrown an
error.

After this change, if the error indicated that the directory
already exists then the error is not returned to the user.

This fixes a race condition when two rclone threads are trying to
create the same directory.
2022-09-14 16:45:35 +01:00
albertony
6ce0168ba5 docs: better alignment of icons
Fixes issue with spacing between icon and text in backend docs headers.

This reverts the changes from PR #5889 and #5701, which aligned menu/dropdown items when
icons have different sizes, and implements an alternative fix which gives slightly better
results, and also is more of a native Font Awesome solution:

Font Awesome icons are designed on grid and share a consistent height. But they vary in
width depending on how wide or narrow each symbol is. If you prefer to work with icons
that have a consistent width, adding fa-fw will render each icon using the same width.
2022-09-14 12:19:57 +01:00
albertony
67f5f04a77 build: fix lint option max-issues-per-linter 2022-09-14 12:11:54 +01:00
Øyvind Heddeland Instefjord
91f8894285 ftp: Add force_list_hidden option
Forces the use of `LIST -a` command
when listing a directory which should
list all hidden folders and files.
2022-09-14 12:10:58 +01:00
partev
655d63b4fd docs: fix a typo: aftering -> after 2022-09-14 11:14:32 +01:00
Nick Craig-Wood
d3d843a11d fs: warn the user when using a remote name without a colon
A very common mistake for new users of rclone is to use a remote name
without a colon. This can be on the command line or in the config when
setting up a crypt backend.

This change checks to see if the user uses a path which matches a
remote name and gives an NOTICE like this if they do

    NOTICE: "remote" refers to a local folder, use "remote:" to refer to your remote or "./remote" to hide this warning

See: https://forum.rclone.org/t/sync-to-onedrive-personal-lands-file-in-localfilesystem-but-not-in-onedrive/32956
2022-09-13 18:06:19 +01:00
Nick Craig-Wood
57803bee22 build: update tidy-beta to new layout 2022-09-12 20:32:17 +01:00
Nick Craig-Wood
be53dcc9c9 docs: add more information about --track-renames
See: https://forum.rclone.org/t/feature-question-how-does-rclone-track-renames-and-moves/32911/4
2022-09-12 11:54:35 +01:00
Nick Craig-Wood
bd787e8f45 filter: Fix incorrect filtering with UseFilter context flag and wrapping backends
In this commit

8d1fff9a82 local: obey file filters in listing to fix errors on excluded files

We started using filters in the local backend so the user could short
circuit troublesome files/directories at a low level.

However this caused a number of integration tests to fail. This turned
out to be in backends wrapping the local backend. For example the
combine backend test failed because it changes the paths passed to the
local backend so they no longer match the paths in the current filter.

To fix this, a new feature flag `FilterAware` was added and the
UseFilter context flag is only passed to backends which support it. As
the wrapping backends don't support the flag, this fixes the problems
in the integration tests.

In future the wrapping backends could modify the active filters to
match the path modifications and then they could set the FilterAware
flag.

See #6376
2022-09-05 16:19:50 +01:00
Nick Craig-Wood
3cb7734eac config: move locking to fix fatal error: concurrent map read and map write
Before this change we assumed that github.com/Unknwon/goconfig was
threadsafe as documented.

However it turns out it is not threadsafe and looking at the code it
appears that making it threadsafe might be quite hard.

So this change increases the lock coverage in configfile to cover the
goconfig uses also.

Fixes #6378
2022-09-05 12:11:06 +01:00
Nick Craig-Wood
d08ed7d1e9 ftp: add notes on how to avoid deadlocks with concurrency - fixes #6370 2022-09-05 12:11:06 +01:00
Nick Craig-Wood
f279e4ab01 Add Josh Soref to contributors 2022-09-05 12:10:59 +01:00
albertony
35349657cd docs/sftp: document use of chunk_size option in sftp remote paired with serve sftp
Related to 0008cb4934
2022-08-31 00:04:04 +02:00
Josh Soref
ce3b65e6dc all: fix spelling across the project
* abcdefghijklmnopqrstuvwxyz
* accounting
* additional
* allowed
* almost
* already
* appropriately
* arise
* bandwidth
* behave
* bidirectional
* brackets
* cached
* characters
* cloud
* committing
* concatenating
* configured
* constructs
* current
* cutoff
* deferred
* different
* directory
* disposition
* dropbox
* either way
* error
* excess
* experiments
* explicitly
* externally
* files
* github
* gzipped
* hierarchies
* huffman
* hyphen
* implicitly
* independent
* insensitive
* integrity
* libraries
* literally
* metadata
* mimics
* missing
* modification
* multipart
* multiple
* nightmare
* nonexistent
* number
* obscure
* ourselves
* overridden
* potatoes
* preexisting
* priority
* received
* remote
* replacement
* represents
* reproducibility
* response
* satisfies
* sensitive
* separately
* separator
* specifying
* string
* successful
* synchronization
* syncing
* šenfeld
* take
* temporarily
* testcontents
* that
* the
* themselves
* throttling
* timeout
* transaction
* transferred
* unnecessary
* using
* webbrowser
* which
* with
* workspace

Signed-off-by: Josh Soref <2119212+jsoref@users.noreply.github.com>
2022-08-30 11:16:26 +02:00
albertony
0008cb4934 docs: document that serve sftp uses chunk size 32 KiB
See #6404
2022-08-30 10:57:25 +02:00
albertony
2ea5b4f0b8 Add YFdyh000 to contributors 2022-08-30 10:26:03 +02:00
YFdyh000
b5818454f7 onedrive: cleanup brand name 2022-08-30 10:23:29 +02:00
albertony
555def2da7 build: add package comments to silence revive linter 2022-08-28 13:43:51 +02:00
albertony
02b7613104 docs/jottacloud: improve description of the standard authentication 2022-08-28 10:31:39 +02:00
albertony
b342c6cf9c docs/ftp: improve documentation of anonymous ftp - fixes #5650 2022-08-28 10:22:29 +02:00
albertony
8a6857c295 Add Simon Bos to contributors 2022-08-28 10:19:39 +02:00
albertony
21fd13f10d Add Ryan Morey to contributors 2022-08-28 10:18:33 +02:00
albertony
5cc7797f9e Add anonion to contributors 2022-08-28 10:18:14 +02:00
albertony
8bf2d6b6c8 Add João Henrique Franco to contributors 2022-08-28 10:16:37 +02:00
João Henrique Franco
85eb9776bd crypt: fix typo in comment
strign -> string
2022-08-22 10:43:54 +02:00
anonion
47539ec0e6 docs: fix minor typo in onedrive docs 2022-08-21 22:09:33 +02:00
Ryan Morey
58b327a9f6 docs: fix typo in filter pattern example 2022-08-18 21:14:26 +02:00
Simon Bos
1107da7247 dlna: specify SSDP interface names from command line 2022-08-13 12:06:03 +01:00
Nick Craig-Wood
8d1fff9a82 local: obey file filters in listing to fix errors on excluded files
Fixes #6376
2022-08-11 12:23:06 +01:00
Nick Craig-Wood
2c5923ab1a filter: make sure we check --files-from when looking for a single file 2022-08-11 12:20:17 +01:00
Nick Craig-Wood
1ad22b8881 gcs: add --gcs-endpoint flag and config parameter
See: https://forum.rclone.org/t/how-to-modify-google-cloud-storage-endpoint-uri/32342
2022-08-09 17:33:21 +01:00
Nick Craig-Wood
0501773db1 azureblob,b2,s3: fix chunksize calculations producing too many parts
Before this fix, the chunksize calculator was using the previous size
of the object, not the new size of the object to calculate the chunk
sizes.

This meant that uploading a replacement object which needed a new
chunk size would fail, using too many parts.

This fix fixes the calculator to take the size explicitly.
2022-08-09 12:57:38 +01:00
Nick Craig-Wood
cb8842941b Add Mark Trolley to contributors 2022-08-09 12:57:38 +01:00
Mark Trolley
5439a2c5c6 docs: fix script installation command on downloads page
Script installation instructions in `downloads.md` differ from those in
`install.md` and fail on MacOS.
2022-08-09 11:58:04 +01:00
Nick Craig-Wood
d347ac0154 local: disable xattr support if the filesystems indicates it is not supported
Before this change, if rclone was run with `-M` on a filesystem
without xattr support, it would error out.

This patch makes rclone detect the not supported errors and disable
xattrs from then on. It prints one ERROR level message about this.

See: https://forum.rclone.org/t/metadata-update-local-s3/32277/7
2022-08-09 09:27:56 +01:00
Nick Craig-Wood
9f33eb2e65 Changelog updates from Version 1.59.1 2022-08-08 19:01:11 +01:00
Nick Craig-Wood
fe801b8fef Add Joram Schrijver to contributors 2022-08-08 19:01:11 +01:00
albertony
6b158f33a3 serve sftp: document legacy code for checksum detection
See #6351
2022-08-06 20:46:38 +02:00
Joram Schrijver
5a6d233924 dlna: fix SOAP action header parsing - fixes #6354
Changes in github.com/anacrolix/dms changed upnp.ServiceURN to include a
namespace identifier. This identifier was previously hardcoded, but is
now parsed out of the URN. The old SOAP action header parsing logic was
duplicated in rclone and did not handle this field. Resulting responses
included a URN with an empty namespace identifier, breaking clients.
2022-08-06 17:23:37 +01:00
Nick Craig-Wood
df513ca90a build: update dependencies 2022-08-05 17:43:53 +01:00
Nick Craig-Wood
49bb640bae accounting: fix panic in core/stats-reset with unknown group - fixes #6327
This also adds tests for the rc commands for stats groups
2022-08-05 17:30:55 +01:00
Nick Craig-Wood
98fd00a655 serve sftp: fix checksum detection - Fixes #6351
Before this change, rclone serve sftp operating with a new rclone
after the md5sum/sha1sum detection was reworked to just run a plain
`md5sum`/`sha1sum` command in

3ea82032e7 sftp: support md5/sha1 with rsync.net #3254

Failed to signal to the remote that md5sum/sha1sum wasn't supported as
in

71e172a139 serve/sftp: support empty "md5sum" and "sha1sum" commands

We unconditionally return good hashes even if the remote being served
doesn't support the hash type in question.

This fix checks the hash type is supported and returns an error

    MD5 hash not supported

When the backend is first contacted this will cause the sftp backend
to detect that the hash type isn't available.

Unfortunately this may have cached the wrong state so editing or
remaking the config may be necessary to fix it.
2022-08-05 17:16:23 +01:00
Nick Craig-Wood
16039b350d fs: fix parsing of times and durations of the form "YYYY-MM-DD HH:MM:SS"
Before this fix, the parsing code gave an error like this

    parsing "2022-08-02 07:00:00" as fs.Time failed: expected newline

This was due to the Scan call failing to read all the data.

This patch fixes that, and redoes the tests
2022-08-05 16:45:23 +01:00
Nick Craig-Wood
ebe86c6cec s3: add --s3-decompress flag to download gzip-encoded files
Before this change, if an object compressed with "Content-Encoding:
gzip" was downloaded, a length and hash mismatch would occur since the
go runtime automatically decompressed the object on download.

If --s3-decompress is set, this change erases the length and hash on
compressed objects so they can be downloaded successfully, at the cost
of not being able to check the length or the hash of the downloaded
object.

If --s3-decompress is not set the compressed files will be downloaded
as-is providing compressed objects with intact size and hash
information.

See #2658
2022-08-05 16:45:23 +01:00
Nick Craig-Wood
1f5e7ce598 lib/readers: add GzipReader 2022-08-05 16:45:23 +01:00
Nick Craig-Wood
4b981100db s3: refactor to use generated code instead of reflection to copy structs 2022-08-05 16:45:23 +01:00
Nick Craig-Wood
4344a3e2ea s3: implement --s3-version-at flag - Fixes #1776 2022-08-05 16:45:23 +01:00
Nick Craig-Wood
1542a979f9 s3: refactor f.list() to take an options struct as it had too many parameters 2022-08-05 16:45:23 +01:00
Nick Craig-Wood
81d242473a s3: implement Purge to purge versions and backend cleanup-hidden 2022-08-05 16:45:23 +01:00
Nick Craig-Wood
0ae171416f s3: implement --s3-versions flag - See #1776 2022-08-05 16:45:23 +01:00
Nick Craig-Wood
a59fa2977d s3: factor different listing versions into separate objects 2022-08-05 16:42:30 +01:00
Nick Craig-Wood
7243918069 s3: implement backend versioning command to get/set bucket versioning 2022-08-05 16:42:30 +01:00
Nick Craig-Wood
fa49971d49 docs: move time/duration option docs to the main docs 2022-08-05 16:42:30 +01:00
Nick Craig-Wood
77e3512714 fstests: Make InternalTestFiles so the internal tests know the current state 2022-08-05 16:42:30 +01:00
Nick Craig-Wood
337b43e7e4 fstests: make ReadObject publically accessible 2022-08-05 16:42:30 +01:00
Nick Craig-Wood
6fd9e3d717 build: reformat comments to pass go1.19 vet
See: https://go.dev/doc/go1.19#go-doc
2022-08-05 16:35:41 +01:00
Nick Craig-Wood
876f791ecd Revert "build: lint with go1.18 until golangci-lint is updated"
This reverts commit 2170376d1b.
2022-08-05 16:04:54 +01:00
Nick Craig-Wood
918bd6d3c3 dropox: fix ChangeNotify was unable to decrypt errors
Before this fix, the dropbox backend wasn't decoding the file names
received in changenotify events into rclone standard format.

This meant that changenotify events for filenames which had encoded
characters were failing to be decrypted properly if wrapped in crypt.

See: https://forum.rclone.org/t/rclone-vfs-cache-says-file-name-too-long/31535
2022-08-04 10:26:25 +01:00
Nick Craig-Wood
f49be033c6 mega: Fix nil pointer exception when bad node received
Fixes: #6336
2022-08-04 10:22:57 +01:00
Nick Craig-Wood
2a817e21cb vfs: fix excess CPU used by VFS cache cleaner looping
Before this change the VFS cache cleaner would loop indefinitely while
the cache was above quota. This used up all the CPU.

This fix prevents the cache cleaner from looping. It will be kicked on
ENOSPACE and run in its scheduled time otherwise so this should be
sufficient.

See: https://forum.rclone.org/t/vfs-keeps-checking-same-files/32120
2022-08-04 10:19:47 +01:00
Nick Craig-Wood
a07d376fb1 vfs: reduce memory usage by re-ordering commonly used structures 2022-08-04 10:19:47 +01:00
Nick Craig-Wood
e749bc58f4 vfs: reduce memory use by embedding sync.Cond 2022-08-04 10:19:47 +01:00
Nick Craig-Wood
821e084f28 combine: fix errors with backends shutting down while in use
Before this patch backends could be shutdown when they fell out of the
cache when they were in use with combine. This was particularly
noticeable with the dropbox backend which gave this error when
uploading files after the backend was Shutdown.

    Failed to copy: upload failed: batcher is shutting down

This patch gets the combine remote to pin them until it is finished.

See: https://forum.rclone.org/t/rclone-combine-upload-failed-batcher-is-shutting-down/32168
2022-08-04 10:13:41 +01:00
Nick Craig-Wood
2170376d1b build: lint with go1.18 until golangci-lint is updated
See: https://github.com/golangci/golangci-lint/pull/3037
2022-08-03 18:04:58 +01:00
Nick Craig-Wood
8125b1cf08 build: update to v3 of golangci-lint action 2022-08-03 18:04:58 +01:00
Nick Craig-Wood
ba60984f33 build: update to go1.19 and make go1.17 the minimum required version 2022-08-03 18:04:58 +01:00
Nick Craig-Wood
a875320e37 sync,operations: optimise --copy-dest and --compare-dest
Before this change --compare-dest and --copy-dest would check to see
if the compare/copy object existed first, before seeing if the
destination object was present.

This is inefficient, because in most --copy-dest syncs the destination
will be present and the compare/copy object need never be tested.
--compare-dest syncs may also be speeded up if they are done to the
same directory repeatedly.

This fixes the problem by re-arranging the logic so if the transfer is
not needed then the compare/copy object is never tested.

See: https://forum.rclone.org/t/union-with-copy-dest-enabled-is-slower-than-expected/32172
2022-08-03 17:44:47 +01:00
Nick Craig-Wood
639624184d build: fix android build after GitHub actions change
Before this change the android build started failing with

    gomobile: ANDROID_NDK_HOME specifies /usr/local/lib/android/sdk/ndk/25.0.8775105
    which is unusable: unsupported API version 16 (not in 19..33)

This was caused by a change to github actions, but is ultimately due
to an issue in gomobile with the newest version of the SDK.

This change fixes the problem by declaring a minimum API version of 21
and using version 21 compilers to build everything and using the
default NDK in github actions.

See: https://github.com/actions/virtual-environments/issues/5930
See: https://github.com/lightningnetwork/lnd/issues/6651
2022-08-03 17:22:35 +01:00
Nick Craig-Wood
fe84cca1ad Revert "build: disable revive linter pending a fix in golangci-lint"
This reverts commit 7a24c173f6.
2022-08-03 13:14:51 +01:00
Nick Craig-Wood
9d3958bd0b build: fix formatting after golangci-lint update 2022-08-03 10:11:16 +01:00
Nick Craig-Wood
3a8e52de74 dropbox: fix infinite loop on uploading a corrupted file
Before this change, if rclone attempted to upload a file which read
more bytes than the size it declared then the uploader would enter an
infinite loop.

See: https://forum.rclone.org/t/transfer-percentages-100-again/32109
2022-07-29 17:40:05 +01:00
albertony
72227a0151 jottacloud: do not store username in config when using standard auth
Previously, with standard auth, the username would be stored in config - but only after
entering the non-standard device/mountpoint sequence during config (a feature introduced
with #5926). Regardless of that, rclone always requests the username from the api at
startup (NewFS).

In #6270 (commit 9dbed02329) this was changed to always
store username in config (consistency), and then also use it to avoid the repeated
customer info request in NewFs (performance). But, as reported in #6309, it did not work
with legacy auth, where user enters username manually, if user entered an email address
instead of the internal username required for api requests. This change was therefore
recently reverted.

The current commit takes another step back to not store the username in config during
the non-standard device/mountpoint config sequence (consistentcy). The username will
now only be stored in config when using legacy auth, where it is an input parameter.
2022-07-25 18:23:09 +01:00
Nick Craig-Wood
9f40cb114a Revert "jottacloud: always store username in config and use it to avoid initial api request"
This reverts commit 9dbed02329.

See: #6309
2022-07-25 18:23:09 +01:00
Lesmiscore
2f461f13e3 internetarchive: handle hash symbol in the middle of filename 2022-07-22 13:08:42 +01:00
Nick Craig-Wood
7a24c173f6 build: disable revive linter pending a fix in golangci-lint
The revive linter got extremely slow in golangci-lint 1.47.1 causing
the CI to time out.

Disable for the time being until it is fixed.

See: https://github.com/golangci/golangci-lint/issues/2997
2022-07-20 23:07:20 +01:00
Nick Craig-Wood
fb60aeddae Add Jordi Gonzalez Muñoz to contributors 2022-07-20 23:07:02 +01:00
Nick Craig-Wood
695736d1e4 Add Steve Kowalik to contributors 2022-07-20 23:07:02 +01:00
albertony
f0396070eb sftp: fix issue with WS_FTP by working around failing RealPath 2022-07-20 18:07:50 +01:00
Jordi Gonzalez Muñoz
f1166757ba librclone: add PHP bindings and test program 2022-07-20 17:20:12 +01:00
Steve Kowalik
9b76434ad5 drive: make --drive-stop-on-upload-limit obey quota exceeded error
Extend the shouldRetry function by also checking for the quotaExceeded
reason, and since this function appeared to be untested, add a test case
for the existing errors and this new one.

Fixes #615
2022-07-20 10:37:34 +01:00
Nick Craig-Wood
440d0cd179 s3: fix --s3-no-head panic: reflect: Elem of invalid type s3.PutObjectInput
In

22abd785eb s3: implement reading and writing of metadata #111

The reading information of objects was refactored to use the
s3.HeadObjectOutput structure.

Unfortunately the code branch with `--s3-no-head` was not tested
otherwise this panic would have been discovered.

This shows that this is path is not integration tested, so this adds a
new integration test.

Fixes #6322
2022-07-18 23:38:50 +01:00
Nick Craig-Wood
a047d30eca Add Yen Hu to contributors 2022-07-18 23:38:50 +01:00
Yen Hu
03d0f331f7 onedrive: rename Onedrive(cn) 21Vianet to Vnet Group
The old site had shown a redirect page to the new one since 2021-4-21.
https://www.21vianet.com
The official site had renamed to Vnet Group also.
https://www.vnet.com/en/about
2022-07-17 17:07:23 +01:00
Lesmiscore
049674aeab backend/internetarchive: ignore checksums for files using the different method 2022-07-17 14:02:40 +01:00
Nick Craig-Wood
50f053cada dropbox: fix hang on quit with --dropbox-batch-mode off
This problem was created by the fact that we are much more diligent
about calling Shutdown now, and the dropbox backend had a hang if the
batch mode was "off" in the Shutdown method.

See: https://forum.rclone.org/t/dropbox-lsjson-in-1-59-stuck-on-commiting-upload/31853
2022-07-17 12:51:44 +01:00
Nick Craig-Wood
140af43c26 build: add 32 bit test runner to avoid problems like #6311 2022-07-14 20:13:03 +01:00
Nick Craig-Wood
f467188876 Add Evan Spensley to contributors 2022-07-14 20:13:03 +01:00
Evan Spensley
4a4379b312 jobs: add ability to stop group
Adds new rc call to stop all running jobs in a group. Fixes #5561
2022-07-13 18:13:31 +01:00
Nick Naumann
8c02fe7b89 sync: update docs and error messages to reflect fixes to overlap checks 2022-07-13 16:04:53 +01:00
Nick Naumann
11be920e90 sync: add filter-sensitivity to --backup-dir option
The old Overlapping function and corresponding tests have been removed, as it has been completely replaced by the OverlappingFilterCheck function.
2022-07-13 16:04:53 +01:00
albertony
8c19b355a5 docs: fix links to mount command from install docs 2022-07-13 12:33:54 +02:00
r-ricci
67fd60275a union: fix panic due to misalignment of struct field in 32 bit architectures
`FS.cacheExpiry` is accessed through sync/atomic.
According to the documentation, "On ARM, 386, and 32-bit MIPS, it is
the caller's responsibility to arrange for 64-bit alignment of 64-bit
words accessed atomically. The first word in a variable or in an
allocated struct, array, or slice can be relied upon to be 64-bit
aligned."
Before commit 1d2fe0d856 this field was
aligned, but then a new field was added to the structure, causing the
test suite to panic on linux/386.
No other field is used with sync/atomic, so `cacheExpiry` can just be
placed at the beginning of the stuct to ensure it is always aligned.
2022-07-11 18:34:06 +01:00
Nick Craig-Wood
b310490fa5 union: fix multiple files being uploaded when roots don't exist
See: https://forum.rclone.org/t/union-backend-copying-to-all-remotes-while-it-shouldnt/31781
2022-07-11 18:19:36 +01:00
Nick Craig-Wood
0ee0812a2b union: fix duplicated files when using directories with leading /
See: https://forum.rclone.org/t/union-backend-copying-to-all-remotes-while-it-shouldnt/31781
2022-07-11 18:19:36 +01:00
Nick Craig-Wood
55bbff6346 operations: add --server-side-across-configs global flag for any backend 2022-07-11 18:17:42 +01:00
Nick Craig-Wood
9c6cfc1ff0 combine: throw error if duplicate directory name is specified
See: https://forum.rclone.org/t/v1-59-combine-qs/31814
2022-07-10 15:40:30 +01:00
Nick Craig-Wood
f753d7cd42 combine: fix docs showing remote= instead of upstreams=
See: https://forum.rclone.org/t/v1-59-combine-qs/31814
2022-07-10 15:34:48 +01:00
Nick Craig-Wood
f5be1d6b65 Start v1.60.0-DEV development 2022-07-09 20:43:17 +01:00
Nick Craig-Wood
00a684d877 Version v1.59.0 2022-07-09 18:09:25 +01:00
Nick Craig-Wood
1c4ee2feee gcs: add --gcs-decompress flag to download gzip-encoded files
By default these will be downloaded compressed.

This changes the default of the previous commit

2781f8e2f1 gcs: Fix download of "Content-Encoding: gzip" compressed objects

But will fit in better with the metadata framework when copying
gzip-encoded objects from backend to backend.
2022-07-09 17:31:12 +01:00
Nick Craig-Wood
876f12f2c4 Add Ovidiu Victor Tatar to contributors 2022-07-09 12:35:59 +01:00
Nick Craig-Wood
6e9c1eebd9 Add Claudio Maradonna to contributors 2022-07-09 12:35:59 +01:00
Lesmiscore (Naoya Ozaki)
42dfadfa1b internetarchive: add support for Metadata 2022-07-08 23:47:50 +01:00
Ovidiu Victor Tatar
b4d847cadd new backend: hidrive - fixes #1069 2022-07-08 18:24:54 +01:00
Ovidiu Victor Tatar
502226bfc8 pacer: add ZeroDelayCalculator 2022-07-08 18:24:54 +01:00
Ovidiu Victor Tatar
53400d7edc oauthlib: add method to set a token as expired
This can be used by backends to trigger a refresh of an access token if
they detect an invalid token.
2022-07-08 18:24:54 +01:00
Claudio Maradonna
62bcc84f6f vfs: add --vfs-disk-space-total-size option to manually set the total disk space
Now you can specify --vfs-disk-space-total-size to set the total disk
space (default to -1)

fixes #3270
2022-07-08 17:26:54 +01:00
Nick Craig-Wood
2e54b56a01 rcat: check checksums by default like copy does #6305
Before this change we were calculating the checksum for an rcat
transfer but never checking it.

See: https://forum.rclone.org/t/optimize-rclone-on-raspberry-pi-4-8gb/31741
2022-07-07 16:09:09 +01:00
Nick Craig-Wood
2515039e18 Add Lorenzo Maiorfi to contributors 2022-07-07 16:09:09 +01:00
Nick Craig-Wood
a9c531b9eb Add Paul Norman to contributors 2022-07-07 16:09:09 +01:00
Nick Craig-Wood
0db50ecb2f Add zzr93 to contributors 2022-07-07 16:09:09 +01:00
Nick Craig-Wood
388da82762 Add Anthrazz to contributors 2022-07-07 16:09:09 +01:00
Lorenzo Maiorfi
b5efffee9d azureblob: allow remote emulator (azurite) - fixes #6290 2022-07-06 11:54:04 +01:00
Paul Norman
3ec07d5db9 docs: fix typo in license webpage 2022-07-06 11:25:31 +01:00
albertony
5c6a958ad8 go mod tidy: github.com/pkg/xattr should be direct 2022-07-04 11:24:59 +02:00
albertony
ad8c94e982 staticcheck: redundant return statement 2022-07-04 11:24:59 +02:00
albertony
e5bf6a813c staticcheck: google api New is deprecated: please use NewService instead 2022-07-04 11:24:59 +02:00
albertony
f18095b004 staticcheck: ignore deprecations that are not relevant 2022-07-04 11:24:59 +02:00
albertony
c70e890966 staticcheck: TLS config NameToCertificate is deprecated, should instead let library select the first compatible chain from Certificates 2022-07-04 11:24:59 +02:00
albertony
986bb17656 staticcheck: awserr.BatchError is deprecated: Replaced with BatchedErrors 2022-07-04 11:24:59 +02:00
albertony
92a43c5f7b staticcheck: should use a simple channel send/receive instead of select with a single case 2022-07-04 11:24:59 +02:00
albertony
9612ca6110 staticcheck: ignore unused if platform dependent 2022-07-04 11:24:59 +02:00
albertony
1f9560e873 selfupdate: replace deprecated x/crypto/openpgp package with ProtonMail/go-crypto 2022-07-04 11:24:59 +02:00
albertony
c9d67c86fb staticcheck: ignore suggestion to use context.TODO instead of nil when testing nil Context 2022-07-04 11:24:59 +02:00
albertony
3e9c5eca3b yandex: handle api error on server-side move 2022-07-04 11:24:59 +02:00
albertony
a1fd60ec2b staticcheck: empty branch 2022-07-04 11:24:59 +02:00
albertony
7b8c974dec staticcheck: ineffective break statement 2022-07-04 11:24:59 +02:00
albertony
5b579cea47 staticcheck: use golang.org/x/text/cases instead of deprecated strings.Title
strings.Title has been deprecated since Go 1.18 and an alternative has been
available since Go 1.0. The rule Title uses for word boundaries does not handle
Unicode punctuation properly. Use golang.org/x/text/cases instead.
2022-07-04 11:24:59 +02:00
albertony
7822df565e staticcheck: unused func 2022-07-04 11:24:59 +02:00
albertony
3435bf7f34 staticcheck: no value of type int64 is greater than math.MaxInt64 2022-07-04 11:24:59 +02:00
albertony
0772cae314 staticcheck: use result of type assertion to simplify cases 2022-07-04 11:24:59 +02:00
zzr93
060c8dfff0 operations: use correct src/dst in some log messages
Most of the time this will make no difference to user logs, however
the difference may be visible in JSON logs and on the rare occasions
src and dst are pointing to different file names.
2022-07-04 10:18:04 +01:00
Anthrazz
424a1f39eb sftp: add Hetzner Storage Boxes to supported sftp backends 2022-07-04 10:08:33 +01:00
Nick Craig-Wood
06182a3443 s3: actually compress the payload for content-type gzip test 2022-07-04 09:42:49 +01:00
Nick Craig-Wood
a58b482061 fstests: fix Metadata tests on remotes with additional config 2022-07-04 09:42:49 +01:00
Nick Craig-Wood
accf91742c fs: add Type and FindFromFs to manage Fs and RegInfo 2022-07-04 09:42:49 +01:00
albertony
9dbed02329 jottacloud: always store username in config and use it to avoid initial api request
Existing version did save username in config, but only when entering the custom
device/mountpoint sequence in config. Regardless of that, it did always look up the
username at startup with an api request.

This commit improves it so that the username will always be stored in config,
and when using standard authentication it picks it from the login token instead of
requesting it from the remote api, and also in fs constructor it picks it from config
instead of requesting it from remote api (again).
2022-07-03 12:56:25 +02:00
albertony
73e3bb09d7 http: fix missing response when using custom auth handler 2022-07-02 15:29:50 +02:00
Nick Craig-Wood
7e7a8a95e9 hasher: support metadata 2022-06-29 17:30:37 +01:00
Nick Craig-Wood
ed87ae51c0 union: support metadata 2022-06-29 17:30:37 +01:00
Nick Craig-Wood
bf4a16ae30 crypt: support metadata 2022-06-29 17:30:37 +01:00
Nick Craig-Wood
c198700812 compress: support metadata 2022-06-29 17:30:37 +01:00
Nick Craig-Wood
8c483daf85 combine: support metadata 2022-06-29 17:30:37 +01:00
Nick Craig-Wood
ba5760ff38 chunker: mark as not supporting metadata 2022-06-29 17:30:37 +01:00
Nick Craig-Wood
cd1735bb10 cache: mark as not supporting metadata 2022-06-29 17:30:37 +01:00
Nick Craig-Wood
866c873daa backend: allow wrapping backend tests to run in make quicktest 2022-06-29 17:30:37 +01:00
Nick Craig-Wood
c556e98f49 local: add Metadata support #111 2022-06-29 14:29:36 +01:00
Nick Craig-Wood
22abd785eb s3: implement reading and writing of metadata #111 2022-06-29 14:29:36 +01:00
Nick Craig-Wood
a692bd2cd4 s3: change metadata storage to normal map with lowercase keys 2022-06-29 14:29:36 +01:00
Nick Craig-Wood
776a083892 lsf: add metadata support with M flag 2022-06-29 14:29:36 +01:00
Nick Craig-Wood
d823a38ce5 lsjson: add --metadata/-M flag
Note that this removes the `-M` flag from `--encrypted` as it
conflicted with the global flag and adds it to `--metadata`.
2022-06-29 14:29:36 +01:00
Nick Craig-Wood
78d52882ca fs: add --metadata/-M flag to control whether metadata is copied 2022-06-29 14:29:36 +01:00
Nick Craig-Wood
c4451bc43a fs: add --metadata-set flag to specify metadata for uploads 2022-06-29 14:29:36 +01:00
Nick Craig-Wood
0652ec95db fs: implement MetadataInfo to show info about metadata in help and rc
Info about this will appear in operations/fsinfo and in the backend
help (`rclone help backend s3`).
2022-06-29 14:29:36 +01:00
Nick Craig-Wood
6a0e021dac fs: implement optional Metadata interface for Objects #111
This implements integration tests for the feature also.
2022-06-29 11:21:29 +01:00
Nick Craig-Wood
461d041c4d fstest: remove spurious contents return from PutTestContents and friends 2022-06-29 11:18:02 +01:00
Nick Craig-Wood
35f24d5b84 Add vyloy to contributors 2022-06-29 10:55:03 +01:00
Nick Craig-Wood
370c8fa220 Add mirekphd to contributors 2022-06-29 10:55:03 +01:00
Nick Craig-Wood
0fca4d2c86 Add buda to contributors 2022-06-29 10:55:03 +01:00
Martin Czygan
5de9278650 fs/cache: make sure we call the Shutdown method on backends
This change ensures we call the Shutdown method on backends when
they drop out of the fs/cache and at program exit.

Some backends implement the optional fs.Shutdowner interface. Until now,
Shutdown is only checked and called, when a backend is wrapped (e.g.
crypt, compress, ...).

To have a general way to perform operations at the end of the backend
lifecycle with proper error handling, we can call Shutdown at cache
clear time.

We add a finalize hook to the cache which will be called when values
drop out of the cache.

Previous discussion: https://forum.rclone.org/t/31336
2022-06-28 12:51:59 +01:00
vyloy
326c43ab3f s3: add IDrive e2 to provider list 2022-06-28 09:12:36 +01:00
mirekphd
32006033e6 docs: note wider impact of --checkers=N on parallelism #6280 2022-06-27 19:33:07 +01:00
buda
517e7d9271 accounting: fix unknown length file transfers count 3 transfers each #6213
This was caused by nested calls to NewTransfer/Done.

This fixes the problem by only incrementing transfers if the remote is
present in the transferMap which means we only increment it once.
2022-06-27 17:56:03 +01:00
albertony
fdd2f8e6d2 Error strings should not be capitalized
Reported by staticcheck 2022.1.2 (v0.3.2)

See: staticcheck.io
2022-06-23 23:26:02 +02:00
Abhiraj
027746ef6e drive: moved rclone_folder_id to advanced section - fixes #3463 2022-06-23 22:08:09 +02:00
albertony
53f831f40a docs: add missing code section formatting to commands and flags 2022-06-21 23:43:00 +02:00
albertony
9f81b4df4f docs/lsjson: fix code block indentation 2022-06-21 23:43:00 +02:00
albertony
bf54c909c9 docs: improved capitalization 2022-06-21 23:43:00 +02:00
albertony
dbf1234edf docs: skip "Connection" suffix from FTP, SSH/SFTP and HTTP backend names 2022-06-21 23:43:00 +02:00
albertony
70d9d75801 docs: improve serve command descriptions 2022-06-21 23:43:00 +02:00
albertony
bc70a95fca docs: consistent capitalization of WebDAV, DLNA, HTTP 2022-06-21 23:43:00 +02:00
albertony
ee87e919c5 docs/ncdu: note that refresh screen shortcut will fix screen corruption 2022-06-21 23:43:00 +02:00
albertony
4f0eae366f docs/ncdu: fix inconsistency in key map help text
Ctrl+L was listed as ^L, while Ctrl+c was listed as c-C. Changed the latter to ^c.
2022-06-21 23:43:00 +02:00
albertony
de5ccaab8e docs: cross link doc pages for related commands 2022-06-21 23:43:00 +02:00
albertony
4b7dc35cf4 Fix sync docs incorrect merge
This copies the changes from an autogenerated section in the following commit:
f2a15a174f
2022-06-21 23:43:00 +02:00
Nick Craig-Wood
bc705e14d8 vfscache: fix fatal error: sync: unlock of unlocked mutex error
This message is a double panic and was actually caused by an assertion
panic in:

vfs/vfscache/downloaders/downloaders.go

This is triggered by the code added relatively recently to fix a bug
with renaming files:

ec72432cec vfs: fix failed to _ensure cache internal error: downloaders is nil error

So it appears that item.o may be nil at this point.

This patch detects item.o being nil and fetches it again with NewObject.

Fixes #6190 Fixes #6235
2022-06-21 14:28:53 +01:00
Nick Craig-Wood
ea5bb79366 drive: document export for google apps scripts better
This also adds some more mime types from the code.

See: https://forum.rclone.org/t/how-do-i-copy-google-apps-scripts-with-rclone/31373
2022-06-21 14:28:53 +01:00
Nick Craig-Wood
e95dff2fa1 drive: add backend commands exportformats and importformats for debugging 2022-06-21 14:28:53 +01:00
Nick Craig-Wood
99dfe1eeae Add Martin Czygan to contributors 2022-06-21 14:28:53 +01:00
Nick Craig-Wood
ed92bf335d Add J-P Treen to contributors 2022-06-21 14:28:53 +01:00
Nick Craig-Wood
3d55b537c6 Add Caleb to contributors 2022-06-21 14:28:53 +01:00
Nick Craig-Wood
d03fffdf8d Add eNV25 to contributors 2022-06-21 14:28:53 +01:00
Martin Czygan
7a909ebfb0 fs/cache: fix cache unpin 2022-06-20 12:14:58 +01:00
buengese
ac0dc9922e copyurl: add tests for the option to honor the HTTP header filename directive 2022-06-20 12:06:09 +01:00
J-P Treen
8b8802a078 copyurl: Add option to honor the HTTP header filename directive.
Implemented --header-filename for use with copyurl.

For specifically setting preferred download filenames for HTTP requests, RFC 6226
specifies a 'filename' directive, available within 'Content-Disposition'
header. We can handle with 'mime.ParseMediaType'.

See below for details:
https://httpwg.org/specs/rfc6266.html#disposition.parameter.filename
https://httpwg.org/specs/rfc6266.html#advice.generating

Co-authored-by: buengese <buengese@protonmail.com>
2022-06-20 12:06:09 +01:00
Caleb
f2a15a174f docs: grammatical clarification in sync docs 2022-06-19 15:26:53 +02:00
buengese
21c746a56c fichier: parse api error codes and them accordingly 2022-06-19 15:07:33 +02:00
eNV25
36add0afbf ncdu: replace termbox with tcell's termbox wrapper
The https://github.com/nsf/termbox-go library is no longer maintained
so this change replaces it with the maintained
github.com/gdamore/tcell library which has a termbox backwards
compatibility layer.

There are a few minor changes from the termbox library:

- Using Clear with fg bg ColorDefault resulted in a white background for some reason.
    - Clear with fg ColorWhite bg ColorBlack was used instead.
- tcell's termbox wrapper doesn't support ColorLightYellow.
    - ColorYellow + 8 was used instead.
2022-06-19 11:22:45 +01:00
Nick Craig-Wood
14e0396fcb test_all: allow internet archive backend more time 2022-06-18 15:13:18 +01:00
Nick Craig-Wood
100acc570a test_all: fix -clean so it works on remotes with paths 2022-06-18 15:10:09 +01:00
Nick Craig-Wood
b9de37af80 test_all: Only run backend tests for Internet Archive as it is too slow 2022-06-17 16:52:30 +01:00
Nick Craig-Wood
f7c36ce0f9 s3: unwrap SDK errors to reveal underlying errors on upload
The SDK doesn't wrap errors in a Go standard way so they can't be
unwrapped and tested for - eg fatal error.

The code looks for a Serialization or RequestError and returns the
unwrapped underlying error if possible.

This fixes the fs/operations integration tests checking for fatal
errors being returned.
2022-06-17 16:52:30 +01:00
Nick Craig-Wood
f829ded456 Add Phil Shackleton to contributors 2022-06-17 16:52:30 +01:00
Nick Craig-Wood
2fac8fdde6 Add Scott Grimes to contributors 2022-06-17 16:52:30 +01:00
Phil Shackleton
8e2d9a4cb9 drive: update Internal OAuth consent screen docs
Updated instructions in `Making your own client_id` section to record process for selecting "Internal" OAuth consent screen.
2022-06-17 16:50:01 +01:00
Scott Grimes
295006f662 opendrive: resolve lag and truncate bugs fixes #5936
Co-authored-by: buengese <buengese@protonmail.com>
2022-06-17 16:48:03 +01:00
Lu Wang
dcc128c70d docs/onedrive: document creation of client ID for OneDrive Business 2022-06-17 07:33:56 +02:00
Nick Craig-Wood
c85fbebce6 s3: simplify PutObject code to use the Request.SetStreamingBody method
In this commit

e5974ac4b0 s3: use PutObject from the aws SDK to upload single part objects

rclone was made to upload objects to s3 using PUT requests rather than
using signed uploads.

However this change missed the fact that there is a supported way to
do this in the SDK using the SetStreamingBody method on the Request.

This therefore reverts a lot of the previous commit to do with making
an unsigned connection and other complication and uses the SDK
facility.
2022-06-16 23:26:19 +01:00
Nick Craig-Wood
e59801c69b Add Maciej Radzikowski to contributors 2022-06-16 23:26:19 +01:00
Nick Craig-Wood
5697dbc80f local: fix parsing of --local-nounc flag 2022-06-16 22:13:50 +01:00
Nick Craig-Wood
7d3648dc46 serve ftp: check --passive-port arguments are correct
See: https://forum.rclone.org/t/serve-ftp-passive-port-validity-check/27458
2022-06-16 22:13:50 +01:00
Nick Craig-Wood
a6ca4b3817 test info: check file name lengths using 1,2,3,4 byte unicode characters 2022-06-16 22:13:50 +01:00
Nick Craig-Wood
e57fe14b61 mount: log IO errors at ERROR level - fixes #6217 2022-06-16 22:13:50 +01:00
Nick Craig-Wood
115f1c2cc9 operations: speed up hash checking by aborting the other hash if first returns nothing
This speeds up hash checks when a Hash() function returns "" - this
means that the hash can be canceled for the other side.

In the common case of local hash vs remote hash empty this saves a lot
of time.

See: https://forum.rclone.org/t/rclone-s3-backend-copy-is-2x-slower-than-aws-s3-cp/27321/9
2022-06-16 22:13:50 +01:00
Nick Craig-Wood
5e4caa69ce local: make Hash function cancelable via context 2022-06-16 22:13:50 +01:00
Nick Craig-Wood
e7483b40b3 fshttp: add --disable-http-keep-alives to disable HTTP Keep Alives
See: https://forum.rclone.org/t/getting-rate-limited-before-advertised-limit-on-s3-compatible-object-storage/31010/
2022-06-16 22:13:50 +01:00
Nick Craig-Wood
fa48b880c2 s3: retry RequestTimeout errors
See: https://forum.rclone.org/t/s3-failed-upload-large-files-bad-request-400/27695
2022-06-16 22:13:50 +01:00
Nick Craig-Wood
4ac875a811 sync: fix --max-duration and --cutoff-mode soft
Before this change using --max-duration and --cutoff-mode soft would
work like --cutoff-mode hard.

This bug was introduced in this commit which made transfers be
cancelable - before that transfers couldn't be canceled.

122a47fba6 accounting: Allow transfers to be canceled with context #3257

This change adds the timeout to the input context for reading files
rather than the transfer context so the files transfers themselves
aren't canceled if --cutoff-mode soft is in action.

This also adds a test for cutoff mode soft and max duration which was
missing.

See: https://forum.rclone.org/t/max-duration-and-retries-not-working-correctly/27738
2022-06-16 22:13:50 +01:00
Nick Craig-Wood
3f61869179 cmount: add tracing for *xattr FUSE callbacks
See: https://github.com/winfsp/cgofuse/issues/66
See: https://forum.rclone.org/t/cannot-copy-files-to-mounted-azure-storage-windows/30092
2022-06-16 22:13:50 +01:00
Nick Craig-Wood
60d87185e1 sftp: add --sftp-set-env option to set environment variables
Fixes #6094
2022-06-16 22:13:50 +01:00
Nick Craig-Wood
78120d40d9 sftp: add --sftp-concurrency to improve high latency transfers
See: https://forum.rclone.org/t/increasing-sftp-transfer-speed/29928
2022-06-16 22:13:50 +01:00
Nick Craig-Wood
95e0934755 sftp: add --sftp-chunk-size to control packets sizes for high latency links
See: https://forum.rclone.org/t/increasing-sftp-transfer-speed/29928
2022-06-16 22:13:50 +01:00
Nick Craig-Wood
1651429041 union: add min_free_space option for lfs/eplfs policies - fixes #6071 2022-06-16 22:13:50 +01:00
Nick Craig-Wood
29e37749b3 union: fix eplus policy to select correct entry for existing files #6071 2022-06-16 22:13:50 +01:00
Nick Craig-Wood
1e1af46a12 union: fix get free space for remotes which don't support it #6071
Before this fix GetFreeSpace returned math.MaxInt64 for remotes which
don't support reading free space, however this is used in various
comparison routines as a too large value, meaning that remotes of size
math.MaxInt64 were never being selected.

This fixes GetFreeSpace to return math.MaxInt64 - 1 so then can be selected.

It also fixes GetUsedSpace the same way however as the default for not
supported was 0 this was very unlikely to have ever caused a problem.
2022-06-16 22:13:50 +01:00
Nick Craig-Wood
1d2fe0d856 union: enable passing of options to upstreams and policies #6071
This factors out the options into a sub package so they can be passed
to upstreams and used in policies.
2022-06-16 22:13:50 +01:00
Nick Craig-Wood
4f94b27800 check: implement --no-traverse and --no-unicode-normalization
See: https://forum.rclone.org/t/rclone-check-head-or-list-object-from-source/30400
2022-06-16 22:13:50 +01:00
Nick Craig-Wood
4d72abf389 dropbox: fix nil pointer exception on dropbox impersonate user not found
Fixes #6139
2022-06-16 22:13:50 +01:00
Nick Craig-Wood
411013dbdc drive: add --drive-resource-key for accessing link-shared files 2022-06-16 22:13:50 +01:00
Nick Craig-Wood
e87e331f4c drive: make --drive-shared-with-me work with shared drives
Fixes #6247
2022-06-16 22:13:50 +01:00
Maciej Radzikowski
2e91287b2e docs/s3: add note about chunk size decreasing progress accuracy 2022-06-16 22:29:36 +02:00
Nick Craig-Wood
a0cb3bbd02 mount: allow tests to run on CI 2022-06-16 16:48:09 +01:00
Nick Craig-Wood
4a382c09ec mount: run tests in a subprocess to fix deadlock - fixes #3259
Before this change we ran the tests and the mount in the same process.
This could cause deadlocks and often did, and made the mount tests
very unreliable.

This fixes the problem by running the mount in a seperate process and
commanding it via a pipe over stdin/stdout.
2022-06-16 16:48:09 +01:00
Nick Craig-Wood
626a416ff8 vfs: factor out the VFS option initialization for re-use #3259 2022-06-16 16:48:09 +01:00
Nick Craig-Wood
6c832a72ee Add CrossR to contributors 2022-06-16 16:48:09 +01:00
Nick Craig-Wood
c390098262 Add Sven Gerber to contributors 2022-06-16 16:48:09 +01:00
Nick Craig-Wood
41f3ceb67d Add Art M. Gallagher to contributors 2022-06-16 16:48:09 +01:00
Nick Craig-Wood
592358148d Add m00594701 to contributors 2022-06-16 16:48:09 +01:00
buengese
93a25498cf docs/pcloud: document the cleanup issues 2022-06-16 15:45:42 +02:00
buengese
32f913ffbd pcloud: fix cleanup - fixes #3853 2022-06-16 15:45:42 +02:00
buengese
621c4ebe15 bin/make_backend_docs: allow generation of docs for just one backend 2022-06-16 15:45:32 +02:00
CrossR
0279bf3abb ncdu: implement multi selection
Co-authored-by: buengese <buengese@protonmail.com>
2022-06-14 13:57:35 +02:00
Sven Gerber
50c2e37aac onedrive: add access scopes option
By default, rclone always requests read and write permissions. No matter what settings you configure in the AAD application. This option allows to explicitly request readonly permissions

Migrated read only option to access scope option and set disable_site_permission option to hidden.
2022-06-14 10:21:23 +01:00
Art M. Gallagher
6602e1a851 mega: document using MEGAcmd to help with login failures
Added extra subsection under Failure to log-in 

See: https://forum.rclone.org/t/30935
2022-06-14 09:55:40 +01:00
m00594701
02b4638a22 backend: add Huawei OBS to s3 provider list 2022-06-14 09:21:01 +01:00
albertony
ec117593f1 Fix lint issues reported by staticcheck
Used staticcheck 2022.1.2 (v0.3.2)

See: staticcheck.io
2022-06-13 21:13:50 +02:00
buengese
74bd7f3381 pcloud: fix about with no free space left 2022-06-13 20:40:15 +02:00
albertony
afa30abd33 mount: remove legacy OS X remnants 2022-06-13 18:06:38 +01:00
albertony
70d1d8d760 mount: replace deprecated fuse.ENOSYS with syscall.ENOSYS 2022-06-13 17:59:44 +01:00
albertony
5006ede266 mailru: use variable type int instead of time.Duration for keeping number of seconds
Fixes problem reported by staticcheck lint tool (v0.3.2):
Poorly chosen name for variable of type time.Duration (ST1011).
2022-06-13 17:42:24 +01:00
Roberto Ricci
0f41e91d41 cmd/ncdu: display correct path in delete confirmation dialog
If the remote on the command line is "remote:subdir", when
deleting "filename", the confirmation message shows the path
"remote:subdirfilename".
Using fspath.JoinRootPath() fixes this. Also use this function
and fs.ConfigString() in other parts of the file, since they
are more appropriate.
2022-06-13 17:40:59 +01:00
buengese
3a20929db4 uptobox: fix root path handling - fixes #5903 2022-06-12 22:36:46 +02:00
buengese
cee79f27ee zoho: add Japan and China regions 2022-06-12 15:37:30 +02:00
buengese
aeb5dc2892 docs/zoho: add a section explaining client_id setup 2022-06-12 15:32:31 +02:00
Nick Craig-Wood
cfe0911e0d sync: fix tests for overlapping with filter
In commit

3ccf222acb sync: overlap check is now filter-sensitive

The tests were attempting to write invalid objects on some backends
due to a leading / on the object name.

This fix also adds a few more test cases and makes sure the tests can
be run individually.
2022-06-09 14:26:43 +01:00
Nick Craig-Wood
7c1f2d7c84 dirtree: fix tests with -fast-list
In commit

da404dc0f2 sync,copy: Fix --fast-list --create-empty-src-dirs and --exclude

The fix caused DirTree.AddDir to be called with the root directory.
This in turn caused a spurious directory entry in the DirTree which
caused tests with the -fast-list flag to fail with directory not found
errors.
2022-06-09 14:26:43 +01:00
albertony
5db9a2f831 sftp: use vendor-specific VFS statistics extension for about if available
See #5763
2022-06-08 21:14:53 +02:00
albertony
b4091f282a sftp: add support for about and hashsum on windows server
Windows shells like cmd and powershell needs to use different quoting/escaping
of strings and paths than the unix shell, and also absolute paths must be fixed
by removing leading slash that the POSIX formatted paths have
(e.g. /C:/Users does not work in shell, it must be converted to C:/Users).

Tries to autodetect shell type (cmd, powershell, unix) on first use.

Implemented default builtin powershell functions for hashsum and about when remote
shell is powershell.

See #5763

Fixes #5758
2022-06-08 21:14:53 +02:00
albertony
218bf2183d docs: add missing backends from listing of optional features (Akamai, Koofr and Sia) 2022-06-08 20:47:58 +02:00
Nick Craig-Wood
bb6edb3c39 build: update dependencies
Also:

- azureblob: fix compile after API change in upstream library
2022-06-08 18:29:42 +01:00
Nick Craig-Wood
6c2331ffd7 mount: skip tests on CI even if >= 2 processors 2022-06-08 18:29:42 +01:00
Nick Craig-Wood
08a897424b Add Noah Hsu to contributors 2022-06-08 18:29:42 +01:00
albertony
acd7ad9190 config: ajust section headers to improve readability
See #6211
2022-06-08 19:24:38 +02:00
albertony
b246584a02 config: more readable listing of remote options
Differentiate output of 'config show remote' command from listing options as part
of interactive config process for consistency: 'config show remote' consistent with
'config show', while listing in interactive config consistent with other output.

See #6211
2022-06-08 19:24:38 +02:00
albertony
61a75bfe07 config: add empty line between sections to improve readability
See #6211
2022-06-08 19:24:38 +02:00
Noah Hsu
ef089dd867 webdav: add SharePoint in other specific regions support 2022-06-08 17:24:35 +01:00
albertony
e3d44612c1 jottacloud: error strings should not be capitalized 2022-06-08 17:56:37 +02:00
albertony
b2388f1294 jottacloud: refactor endpoint paths 2022-06-08 17:56:37 +02:00
albertony
a571c1fb46 jottacloud: refactor naming of different api urls 2022-06-08 17:56:37 +02:00
albertony
01340acad2 jottacloud: add support for upload to custom device and mountpoint
See #5926
2022-06-08 17:56:37 +02:00
albertony
700ca23a71 config: add utility function for backend config with list and custom input 2022-06-08 17:56:37 +02:00
albertony
f4f0e444bf filter: allow multiple --exclude-if-present flags - fixes #6219 2022-06-08 17:11:52 +02:00
albertony
20aaeba547 docs: clarify backend support for setting modtime only (#5638) 2022-06-08 16:29:35 +02:00
Nick Craig-Wood
4b358ff43b combine: backend to combine multiple remotes in one directory tree
Fixes #5600
2022-06-08 14:57:25 +01:00
Nick Craig-Wood
e58d75e4d7 drive: make backend config -o config add a combined AllDrives remote
This adjusts

    rclone backend drives -o config drive:

So that it also emits a config section called `AllDrives` which uses
the combine backend to make a backend which combines all the shared
drives into one.

It also makes sure that all the shared drive names are valid rclone
config names, deduplicating if necessary.

Fixes #4506
2022-06-08 14:57:25 +01:00
Nick Craig-Wood
fb58737142 fstests: check for wrapped errors in ListR test 2022-06-08 14:57:25 +01:00
Nick Craig-Wood
26db80c270 ftp: revert to upstream github.com/jlaffaye/ftp from our fork
...now all of our patches have been merged #5810
2022-06-08 11:58:32 +01:00
Nick Craig-Wood
9eb3470c9c Add Matthew Vernon to contributors 2022-06-08 11:58:32 +01:00
Nick Craig-Wood
a449dd7d1c Add Jason Zheng to contributors 2022-06-08 11:58:32 +01:00
Nick Craig-Wood
fc4fe33703 Add Nick to contributors 2022-06-08 11:58:32 +01:00
Matthew Vernon
e11bfacfcf docs: note use of regexp filtering prevents directory optimisation
Discussed in the forum:
https://forum.rclone.org/t/rclone-regex-in-filter-causes-spurious-directory-catch-all-filter/30985

Signed-off-by: Matthew Vernon <mvernon@wikimedia.org>
2022-06-07 17:59:34 +01:00
Jason Zheng
a9c49c50a0 ftp: add support for disable_utf8 option - fixes #6209 2022-06-01 19:09:37 +01:00
albertony
8979337313 Add Eric Wolf to contributors 2022-06-01 20:02:25 +02:00
Eric Wolf
7ffab5d998 docs: clarify use of verbosity environment variable (#6208)
RCLONE_VERBOSE does not use a true/false setting, instead using a 0,1,2 setting.
2022-06-01 20:00:43 +02:00
Nick
3ccf222acb sync: overlap check is now filter-sensitive
Previously, the overlap check was based on simple prefix checks of the source and destination paths. Now it actually checks whether the destination is excluded via any filter rule or a "--exclude-if-present"-file.
2022-06-01 18:24:54 +01:00
Nick Craig-Wood
2781f8e2f1 gcs: Fix download of "Content-Encoding: gzip" compressed objects
Before this change, if an object compressed with "Content-Encoding:
gzip" was downloaded, a length and hash mismatch would occur since the
as the go runtime automatically decompressed the object on download.

This change erases the length and hash on compressed objects so they
can be downloaded successfully, at the cost of not being able to check
the length or the hash of the downloaded object.

This also adds the --gcs-download-compressed flag to allow the
compressed files to be downloaded as-is providing compressed objects
with intact size and hash information.

Fixes #2658
2022-05-31 12:10:21 +01:00
Nick Craig-Wood
3d55f69338 dropbox: add logs to show when poll interval limits are exceeded 2022-05-31 12:09:50 +01:00
Nick Craig-Wood
cc9bc2cb80 Add Andrey to contributors 2022-05-31 12:09:50 +01:00
Nick Craig-Wood
80ac59ee5b Add Rob Pickerill to contributors 2022-05-31 12:09:50 +01:00
m8rge
5d6a6dd6c0 dropbox: migrate from deprecated api
Change UploadSessionFinishBatch usage to UploadSessionFinishBatchV2. Change in sdk was made in https://github.com/dropbox/dropbox-sdk-go-unofficial/pull/106
2022-05-30 17:24:18 +01:00
Andrey
c676e2139d bisync: docs: add Yandex Disk as tested bisync backend 2022-05-30 17:10:25 +01:00
albertony
7361c98b2d Add Mr-Kanister to contributors 2022-05-27 10:34:58 +02:00
Mr-Kanister
5cc47de912 docs/bisync: fixed typo (#6196)
Fixed a little typo in /docs/content/bisync.md
2022-05-27 10:32:25 +02:00
Rob Pickerill
6d342a3c5b azureblob: case insensitive access tier 2022-05-24 09:19:08 +01:00
Nick Craig-Wood
336051870e build: add linux/arm/v6 to docker images - fixes #6192 2022-05-23 18:04:55 +01:00
Nick Craig-Wood
38c6d022bd Add Hugo Laloge to contributors 2022-05-23 18:04:55 +01:00
Hugo Laloge
c138367df6 onedrive: Implement --poll-interval for onedrive
Implement ChangeNotifier for onedrive.
Use drive delta queries to listen for modifications.
2022-05-23 11:30:43 +01:00
Nick Craig-Wood
da404dc0f2 sync,copy: Fix --fast-list --create-empty-src-dirs and --exclude
Before this change, if --fast-list was in use while doing a sync or
copy with --create-empty-src-dirs and --exclude excluded all the files
from the directory (but not the directory), then the directory would
not be created.

This is also visible with `rclone tree` which uses the same tree
building approach as `rclone sync --fast-list` where the directories
would go missing from the tree view.

This was caused by not adding the parents of excluded files to the
directory tree.

See: https://forum.rclone.org/t/create-empty-src-dirs-issue-with-b2/30856
2022-05-23 10:10:55 +01:00
Nick Craig-Wood
28e43fe7af Add SimonLiu to contributors 2022-05-23 10:10:55 +01:00
SimonLiu
3ec25f437b Update remote_setup.md
Add another option (utilizing SSH Tunnel) for Linux/macOs users to complete the auth on headless box.
2022-05-19 14:13:38 +01:00
Alex JOST
a34276e9b3 s3: Add Warsaw location for Scaleway
Add new location in Warsaw (Poland) to endpoints for Scaleway.

More Information:
https://blog.scaleway.com/scaleway-is-now-in-warsaw/
https://www.scaleway.com/en/docs/storage/object/how-to/create-a-bucket/
2022-05-19 14:06:16 +01:00
Nick Craig-Wood
c2baacc0a4 union: fix uploading files to union of all bucket based remotes
Before this fix, if uploading to a union consisting of all bucket
based remotes (eg s3), uploads failed with:

    Failed to copy: object not found

This was because the union backend was relying on parent directories
being created to work out which files to upload. If all the upstreams
were bucket based backends which can't hold empty directories, no
directories were created and the upload failed.

This fixes the problem by returning the upstreams used when creating
the directory for the upload, rather than searching for them again
after they've been created.

This will also make the union backend a little more efficient.

Fixes #6170
2022-05-19 13:23:41 +01:00
Nick Craig-Wood
fcec4bedbe drive: fix 404 errors on copy/server side copy objects from public folder
Before this change, copying objects from a public folder shared with a
resource key failed with a 404 error.

This is because rclone wasn't supplying the resource Key where it
should have been.

After this change rclone adds the resource Key when trying to download
or server side copy an object.

There may be more places rclone needs to supply the resource key as
this is barely documented in the API documentation.

See: https://forum.rclone.org/t/copying-files-from-a-publicly-accessible-google-drive-folder-added-as-a-shortcut-getting-error-404-server-side-copies-are-disabled/30811
2022-05-18 18:12:19 +01:00
Nick Craig-Wood
813a5e0931 s3: Remove bucket ACL configuration for Cloudflare R2
Bucket ACLs are not supported by Cloudflare R2. All buckets are
private and must be shared using a Cloudflare Worker.
2022-05-17 15:57:09 +01:00
Nick Craig-Wood
bd4abb15a3 Add Derek Battams to contributors 2022-05-17 15:57:09 +01:00
Nick Craig-Wood
7f84283539 Add Erik van Velzen to contributors 2022-05-17 15:57:09 +01:00
albertony
47b1a0d6fa docs: improve guide for installing from source - fixes #5809 2022-05-17 12:11:20 +01:00
albertony
ce168ecac2 Configurable version suffix independent of version number 2022-05-17 12:10:01 +01:00
Eng Zer Jun
4f0ddb60e7 refactor: replace strings.Replace with strings.ReplaceAll
strings.ReplaceAll(s, old, new) is a wrapper function for
strings.Replace(s, old, new, -1). But strings.ReplaceAll is more
readable and removes the hardcoded -1.

Signed-off-by: Eng Zer Jun <engzerjun@gmail.com>
2022-05-17 11:08:37 +01:00
albertony
b929a56f46 Add Christian Galo to contributors 2022-05-16 13:01:55 +02:00
Christian Galo
74af6409d4 docs/rc: added missing global flags (#6083) 2022-05-16 13:00:19 +02:00
albertony
0e77072dcc vfs: error strings should not be capitalized 2022-05-16 12:43:43 +02:00
albertony
2437eb3cce vfs: fix incorrect detection of root in parent directory utility function
When using filepath.Dir, a difference to path.Dir is that it returns os PathSeparator
instead of slash when the path consists entirely of separators.

Also fixed casing of the function name, use OS in all caps instead of Os
as recommended here: https://github.com/golang/go/wiki/CodeReviewComments#initialisms
2022-05-16 12:43:43 +02:00
Michael C Tiernan - MIT-Research Computing Project
a12c94caff docs: replaced extended ascii smart quotes (#6171) 2022-05-16 12:34:54 +02:00
Kaspian
542c1616b8 docs: small style fixes 2022-05-13 23:06:45 +01:00
albertony
8697f0bd26 about: improved error message 2022-05-13 12:08:10 +01:00
albertony
a9f18f8093 Set proper exit code for errors that are not low-level retried (e.g. size/timestamp changing)
Fixes #5785
2022-05-13 12:02:55 +01:00
Derek Battams
8e5e230b81 b2: use chunksize lib to determine chunksize dynamically
Fixes #4643
2022-05-13 09:25:48 +01:00
Derek Battams
c0985e93b7 azureblob: use chunksize lib to determine chunksize dynamically 2022-05-13 09:25:48 +01:00
Derek Battams
fb4f7555c7 s3: use chunksize lib to determine chunksize dynamically 2022-05-13 09:25:48 +01:00
Derek Battams
f2e7a2e794 chunksize: initial implementation of chunksize helper lib 2022-05-13 09:25:48 +01:00
Erik van Velzen
9e4854955c storj: fix put
The "relative" argument was missing when Put'ing a file. This
sets an incorrect object entry in the cache, leading to the file being
unreadable when using mount functionality.

Fixes #6151
2022-05-12 20:43:54 +01:00
Vincent Murphy
319ac225e4 s3: backend restore command to skip non-GLACIER objects 2022-05-12 20:42:37 +01:00
albertony
a9d3283d97 jottacloud: fix listing output of remote with special characters
This fixes the failing integration test: TestIntegration/FsMkdir/FsPutFiles/FsIsFile
2022-05-12 20:41:07 +01:00
Nick Craig-Wood
edf0412464 test_all: correct path for Internet Archive test
This is to make it use the rclone project's account and to remove the
/ in the remote name which was crashing the integration tester!
2022-05-12 20:38:05 +01:00
Nick Craig-Wood
e6194a4b83 Add Hugal31 to contributors 2022-05-12 20:38:05 +01:00
Nick Craig-Wood
7f05990623 Add Werner to contributors 2022-05-12 20:38:05 +01:00
Nick Craig-Wood
e16f2a566f Add Kaspian to contributors 2022-05-12 20:38:05 +01:00
Hugal31
a36fef8a66 rclone.mount: ignore _netdev mount argument - FIxes #5808
Do not trigger an error upon parsing argument starting with underscores.

_netdev was already ignored after parsing.
2022-05-12 20:27:13 +01:00
Werner
6500e1d205 docs: bisync: minor grammar fix 2022-05-12 20:24:52 +01:00
Kaspian
9f7484e4e9 docs: faq: small grammar fix 2022-05-12 20:23:48 +01:00
Michael C Tiernan - MIT-Research Computing Project
0ba702ccf4 install: Pre verify sudo authorization "-v" before calling curl.
The way this was, the curl interferes with the bash invocation. Here it will authenticate first.
2022-05-12 20:19:33 +01:00
Nick Craig-Wood
6f91198b57 s3: Support Cloudflare R2 - fixes #5642 2022-05-12 08:49:20 +01:00
Nick Craig-Wood
cf0a72aecd fs: fix FixRangeOption to do nothing on unknown sized objects
FixRangeOption shouldn't be called on an object of unknown size, but
if it is, make sure it does nothing.

See: #5642
2022-05-12 08:49:20 +01:00
Nick Craig-Wood
f6fd6ee777 fs: fix FixRangeOption make SeekOptions into absolute RangeOptions
Cloudflare R2 doesn't support range options like `Range: bytes=21-`.

This patch makes FixRangeOption turn a SeekOption into an absolute
RangeOption like this `Range: bytes=21-25` to interoperate with R2.

See: #5642
2022-05-12 08:49:20 +01:00
Nick Craig-Wood
1e66d052fd fs: fix FixRangeOption to make fetch to end Range options absolute
Before this change FixRangeOption was leaving `Range: bytes=21-`
alone, thus not fulfilling its contract of making Range requests
absolute.

As it happens this form isn't supported by Cloudflare R2.

After this change the request is normalised to `Range: bytes=21-25`.

See: #5642
2022-05-12 08:49:20 +01:00
Nick Craig-Wood
e5974ac4b0 s3: use PutObject from the aws SDK to upload single part objects
Before this change rclone used presigned requests to upload single
part objects. This was because of a limitation in the SDK which didn't
allow non seekable io.Readers to be passed in.

This is incompatible with some S3 backends, and rclone wasn't adding
the `X-Amz-Content-Sha256: UNSIGNED-PAYLOAD` header which was
incompatible with other S3 backends.

The SDK now allows for this so rclone can use PutObject directly.

This sets the `X-Amz-Content-Sha256: UNSIGNED-PAYLOAD` flag on the PUT
request. However rclone will add a `Content-Md5` header if at all
possible so the body data is still protected.

Note that the old behaviour can still be configured if required with
the `use_presigned_request` config parameter.

Fixes #5422
2022-05-12 08:49:20 +01:00
Nick Craig-Wood
50a0c3482d lib/readers: add FakeSeeker to adapt io.Reader to io.ReadSeeker #5422 2022-05-12 08:49:20 +01:00
Nick Craig-Wood
389a29b017 Add Michael C Tiernan - MIT-Research Computing Project to contributors 2022-05-12 08:49:20 +01:00
Nick Craig-Wood
9dcf9375e8 Add Mateusz Puczyński to contributors 2022-05-12 08:49:20 +01:00
Lesmiscore
1d6d41fb91 backend/internetarchive: fix uploads can take very long time
* fill in empty values for non-wait mode
* add tracking metadata to observe file change
* completely remove getHashes
* remove unreliable update tests

Closes #6150
2022-05-07 21:54:14 +01:00
Michael C Tiernan - MIT-Research Computing Project
a3d4307892 install: set the modes on the files and/or directories on macOS
Changes made for macOS specific for that style of system.
Paths are established/defined singularly and modes are set automatically
when created. (Platform specific.)
2022-05-05 17:50:22 +01:00
ehsantdy
a446106041 s3: update Arvancloud default values and correct docs 2022-05-02 16:04:01 +01:00
Mateusz Puczyński
607172b6ec go: run go mod tidy and set version to minimum supported go - go1.16
This fixes `go mod tidy` needing manual intervention to build with
previous go versions.
2022-05-01 13:28:20 +01:00
Nick Craig-Wood
94757277bc build: fix go mod tidy removing golang.org/x/mobile dependency
Before this running `go mod tidy` caused the build to break because it
removed the dependency on golang.org/x/mobile and a command line tool
from this package is needed for the build.

This adds an explicit dependency which will mean the tool is always present.
2022-05-01 13:28:20 +01:00
Nick Craig-Wood
deab86867c build: support mount on windows/arm64 - all windows binaries now not cgo
This builds all windows binaries without CGO but with cmount.

cgofuse has a compile mode which works without CGO on Windows for
amd64/x86/arm64 architectures so switch to using that.
2022-04-29 18:04:21 +01:00
Nick Craig-Wood
c0c5b3bc6b build: add rclone version step 2022-04-29 18:04:21 +01:00
Nick Craig-Wood
a947f298e6 cmount: remove cgo from windows build requirements since it builds without 2022-04-29 18:04:21 +01:00
Nick Craig-Wood
1b0128ecb2 cmount: update winfsp/cgofuse dependency for windows/arm64 build 2022-04-29 18:04:21 +01:00
Nick Craig-Wood
c5395db1f1 Changelog updates from Version v1.58.1 2022-04-29 13:11:23 +01:00
Nick Craig-Wood
6e5382fc99 Add SwazRGB to contributors 2022-04-29 13:11:23 +01:00
Nick Craig-Wood
134592adaa Add ehsantdy to contributors 2022-04-29 13:11:23 +01:00
Nick Craig-Wood
36e614f550 Add Lesmiscore to contributors 2022-04-29 13:11:23 +01:00
Nick Craig-Wood
7bfed98b48 Add Zsolt Ero to contributors 2022-04-29 13:11:23 +01:00
Nick Craig-Wood
f471096fd0 Add Leroy van Logchem to contributors 2022-04-29 13:11:23 +01:00
SwazRGB
4cebade95d b2: Add b2-version-at flag to show file versions at time
Uses b2_list_file_versions to retrieve all file versions, and returns
the one that was active at the specified time

This is especially useful in combination with other backup tools, such
as restic, which may use rclone as a backend.
2022-04-28 16:29:13 +01:00
SwazRGB
a8cd18faf3 fs: Implement fs.Time
Similar to fs.Duration but parses into a timestamp instead

Supports parsing from:

* Any of the date formats in parseTimeDates
* A time.Duration offset from now
* parseDurationSuffixes offset from now
2022-04-28 16:29:13 +01:00
ehsantdy
e34c543660 s3: Add ArvanCloud AOS to provider list 2022-04-28 10:42:30 +01:00
Lesmiscore
598364ad0f backend/internetarchive: add support for Internet Archive
This adds support for Internet Archive (archive.org) Items.
2022-04-28 10:25:38 +01:00
Zsolt Ero
211dbe9aee docs: Add --multi-thread-streams note to --transfers. 2022-04-27 18:40:39 +01:00
albertony
4829527dac jottacloud: refactor timestamp handling
Jottacloud have several different apis and endpoints using a mix of different
timestamp formats. In existing code the list operation (after the recent liststream
implementation) uses format from golang's time.RFC3339 constant. Uploads (using the
allocate api) uses format from a hard coded constant with value identical to golang's
time.RFC3339. And then we have the classic JFS time format, which is similar to RFC3339
but not identical, using a different constant. Also the naming is a bit confusing,
since the term api is used both as a generic term and also as a reference to the
newer format used in the api subdomain where the allocate endpoint is located.
This commit refactors these things a bit.
2022-04-27 13:41:39 +02:00
albertony
cc8dde402f jottacloud: refactor SetModTime function
Now using the utility function for deduplication that was newly implemented to
fix an issue with server-side copy. This function uses the original, and generic,
"jfs" api (and its "cphash" feature), instead of the newer "allocate" api dedicated
for uploads. Both apis support similar deduplication functionaly that we rely on for
the SetModTime operation. One advantage of using the jfs variant is that the allocate
api is specialized for uploads, an initial request performs modtime-only changes and
deduplication if possible but if not possible it creates an incomplete file revision
and returns a special url to be used with a following request to upload missing content.
In the SetModTime function we only sent the first request, using metadata from existing
remote file but different timestamps, which lead to a modtime-only change. If, for some
reason, this should fail it would leave the incomplete revision behind. Probably not
a problem, but the jfs implementation used with this commit is simpler and
a more "standalone" request which either succeeds or fails without expecting additional
requests.
2022-04-27 12:06:36 +02:00
albertony
2b67ad17aa jottacloud: fix issue with server-side copy when destination is in trash
A strange feature (probably bug) in the api used by the server-side copy implementation
in Jottacloud backend is that if the destination file is in trash, the copy request
succeeds but the destination will still be in trash! When this situation occurs in
rclone, the copy command will fail with "Failed to copy: object not found" because
rclone verifies that the file info in the response from the copy request is valid,
and since it is marked as deleted it is treated as invalid.

This commit works around this problem by looking for this situation in the response
from the copy operation, and send an additional request to a built-in deduplication
endpoint that will restore the file from trash.

Fixes #6112
2022-04-27 12:06:36 +02:00
albertony
6da3522499 jottacloud: minor cleanup of upload response
The UploadResponse type included several properties that are no longer returned
by Jottacloud, and the backend implementation did not use them anyway.
2022-04-27 08:40:34 +02:00
albertony
97606bbdef ncdu: refactor accumulated attributes into a struct 2022-04-26 21:12:52 +02:00
albertony
a15885dd74 docs/ncdu: document flag prefixes 2022-04-26 21:12:52 +02:00
albertony
87c201c92a ncdu: fix issue where dir size is summed when file sizes are -1
Some backends may not provide size for all objects, and instead
return -1. Existing version included these in directory sums,
with strange results. With this commit rclone ncdu will consider
negative sizes as zero, but add a new prefix flag '~' with a
description that indicates the shown size is inaccurate.

Fixes #6084
2022-04-26 21:12:52 +02:00
albertony
d77736c21a docs/size: extend documentation of size command 2022-04-26 19:37:15 +02:00
albertony
86bd5f6922 size: warn about inaccurate results when objects with unknown size 2022-04-26 19:37:15 +02:00
albertony
fe271a4e35 docs/drive: mention that google docs count as empty files in directory totals 2022-04-26 19:34:37 +02:00
Leroy van Logchem
75455d4000 azureblob: Calculate Chunksize/blocksize to stay below maxUploadParts 2022-04-26 17:37:40 +01:00
Nick Craig-Wood
82e24f521f webdav: don't override Referer if user sets it - fixes #6040 2022-04-26 08:58:31 +01:00
Nick Craig-Wood
5605e34f7b mount: fix --devname and fusermount: unknown option 'fsname' when mounting via rc
In this commit

f4c40bf79d mount: add --devname to set the device name sent to FUSE for mount display

The --devname parameter was added. However it was soon noticed that
attempting to mount via the rc gave this error:

    mount helper error: fusermount: unknown option 'fsname'
    mount FAILED: fusermount: exit status 1

This was because the DeviceName (and VolumeName) parameter was never
being initialised when the mount was called via the rc.

The fix for this was to refactor the rc interface so it called the
same Mount method as the command line mount which initialised the
DeviceName and VolumeName parameters properly.

This also fixes the cmd/mount tests which were breaking in the same
way but since they aren't normally run on the CI we didn't notice.

Fixes #6044
2022-04-25 12:17:25 +01:00
Nick Craig-Wood
06598531e0 vfs: remove wording which suggests VFS is only for mounting
See: https://forum.rclone.org/t/solved-rclone-serve-read-only/30377/2
2022-04-25 12:17:25 +01:00
albertony
b1d43f8d41 jottacloud: fix scope in token request
The existing code in rclone set the value "offline_access+openid",
when encoded in body it will become "offline_access%2Bopenid". I think
this is wrong. Probably an artifact of "double urlencoding" mixup -
either in rclone or in the jottacloud cli tool version it was sniffed
from? It does work, though. The token received will have scopes "email
offline_access" in it, and the same is true if I change to only
sending "offline_access" as scope.

If a proper space delimited list of "offline_access openid"
is used in the request, the response also includes openid scope:
"openid email offline_access". I think this is more correct and this
patch implements this.

See: #6107
2022-04-22 12:52:00 +01:00
Sơn Trần-Nguyễn
b53c38c9fd fs/rc/js: correct RC method names 2022-04-22 12:44:04 +01:00
Nick Craig-Wood
03715f6c6b docs: add encoded characters to encoding table 2022-04-21 12:22:04 +01:00
Nick Craig-Wood
07481396e0 lib/encoder: add Semicolon encoding 2022-04-21 12:02:27 +01:00
Nick Craig-Wood
bab91e4402 putio: ignore URL encoded files as these fail in the integration tests 2022-04-15 17:57:15 +01:00
Nick Craig-Wood
fde40319ef koofr: remove digistorage from integration tests as no account 2022-04-15 17:57:15 +01:00
Nick Craig-Wood
94e330d4fa onedrive: remove onedrive China from integration tests as we no longer have an account 2022-04-15 17:57:15 +01:00
Nick Craig-Wood
087543d723 sftp: ignore failing entries in rsync.net integration tests 2022-04-15 17:57:15 +01:00
Nick Craig-Wood
6a759d936a storj: fix bucket creation on Move picked up by integration tests 2022-04-15 17:57:15 +01:00
Nick Craig-Wood
7c31240bb8 Add Nick Gooding to contributors 2022-04-15 17:57:15 +01:00
Nick Gooding
25146b4306 googlecloudstorage: add --gcs-no-check-bucket to minimise transactions and perms
Adds a configuration option to the GCS backend to allow skipping the
check if a bucket exists before copying an object to it, much like
f406dbb added for S3.
2022-04-14 11:18:36 +01:00
Nick Craig-Wood
240561850b test makefiles: add --chargen flag to make ascii chargen files 2022-04-13 23:07:56 +01:00
Nil Alexandrov
39a1e37441 netstorage: add support contacts to netstorage doc 2022-04-13 23:07:21 +01:00
Nick Craig-Wood
4c02f50ef5 build: update github.com/billziss-gh to github.com/winfsp 2022-04-13 10:18:26 +01:00
Nick Craig-Wood
f583b86334 test makefiles: fix crash if --min-file-size <= --max-file-size 2022-04-12 13:45:20 +01:00
Nick Craig-Wood
118e8e1470 test makefiles: add --sparse, --zero, --pattern and --ascii flags 2022-04-12 13:45:20 +01:00
Nick Craig-Wood
afcea9c72b test makefile: implement new test command to write a single file 2022-04-12 12:57:16 +01:00
Nick Craig-Wood
27176cc6bb config: use os.UserCacheDir from go stdlib to find cache dir #6095
When this code was originally implemented os.UserCacheDir wasn't
public so this used a copy of the code. This commit replaces that now
out of date copy with a call to the now public stdlib function.
2022-04-11 11:44:15 +01:00
Nick Craig-Wood
f1e4b7da7b Add Adrien Rey-Jarthon to contributors 2022-04-11 11:44:15 +01:00
albertony
f065a267f6 docs: fix some links to command pages 2022-04-07 15:50:41 +02:00
Adrien Rey-Jarthon
17f8014909 docs: Note that Scaleway C14 is deprecating SFTP in favor of S3
This updates the documentation to reflect the new C14 Cold Storage API
works with S3 and not with SFTP any more.

See: https://github.com/rclone/rclone/issues/1080#issuecomment-1082088870
2022-04-05 11:11:52 +01:00
Nick Craig-Wood
8ba04562c3 build: update android go build to 1.18.x and NDK to 23.1.7779620 2022-04-04 20:35:17 +01:00
Nick Craig-Wood
285747b1d1 build: update to go1.18 and make go1.16 the minimum required version 2022-04-04 20:35:17 +01:00
Nick Craig-Wood
7bb8b8f4ba cache: fix bug after golang.org/x/time/rate update
Before this change the cache backend was passing -1 into
rate.NewLimiter to mean unlimited transactions per second.

In a recent update this immediately returns a rate limit error as
might be expected.

This patch uses rate.Inf as indicated by the docs to signal no limits
are required.
2022-04-04 20:35:17 +01:00
Nick Craig-Wood
59c242bbf6 build: update dependencies
Also:

- dropbox: fix compile after API change in upstream library
2022-04-04 20:35:17 +01:00
Nick Craig-Wood
a2bacd7d3f Add rafma0 to contributors 2022-04-04 20:35:17 +01:00
Nick Craig-Wood
9babcc4811 Add GH to contributors 2022-04-04 20:35:17 +01:00
Nick Craig-Wood
a0f665ec3c Add KARBOWSKI Piotr to contributors 2022-04-04 20:35:17 +01:00
Nick Craig-Wood
ecdf42c17f Add Tobias Klauser to contributors 2022-04-04 20:35:17 +01:00
rafma0
be9ee1d138 putio: fix multithread download and other ranged requests
Before this change the 206 responses from putio Range requests were being
returned as errors.

This change checks for 200 and 206 in the GET response now.
2022-04-04 11:15:55 +01:00
GH
9e9ead2ac4 onedrive: note that sharepoint also changes web files (.html, .aspx) 2022-04-03 12:43:23 +01:00
KARBOWSKI Piotr
4f78226f8b sftp: Fix OpenSSH 8.8+ RSA keys incompatibility (#6076)
Updates golang.org/x/crypto to v0.0.0-20220331220935-ae2d96664a29.

Fixes the issues with connecting to OpenSSH 8.8+ remotes in case the
client uses RSA key pair due to OpenSSH dropping support for SHA1 based
ssh-rsa signature.

Bug: https://github.com/rclone/rclone/issues/6076
Bug: https://github.com/golang/go/issues/37278
Signed-off-by: KARBOWSKI Piotr <piotr.karbowski@gmail.com>
2022-04-01 12:49:39 +01:00
Tobias Klauser
54c9c3156c fs/config, lib/terminal: use golang.org/x/term
golang.org/x/crypto/ssh/terminal is deprecated in favor of
golang.org/x/term, see https://pkg.go.dev/golang.org/x/crypto/ssh/terminal

The latter also supports ReadPassword on solaris, so enable the
respective functionality in fs/config for solaris as well.
2022-04-01 12:48:18 +01:00
Nick Craig-Wood
6ecbbf796e netstorage: make levels of headings consistent 2022-03-31 18:11:37 +01:00
Nick Craig-Wood
603e51c43f s3: sync providers in config description with providers 2022-03-31 17:55:54 +01:00
Nick Craig-Wood
ca4671126e Add Berkan Teber to contributors 2022-03-31 17:55:54 +01:00
Berkan Teber
6ea26b508a putio: handle rate limit errors
For rate limit errors, "x-ratelimit-reset" header is now respected.
2022-03-30 12:25:53 +01:00
Nick Craig-Wood
887cccb2c1 filter: fix timezone of --min-age/-max-age from UTC to local as documented
Before this change if the timezone was omitted in a
--min-age/--max-age time specifier then rclone defaulted to a UTC
timezone.

This is documented as using the local timezone if the time zone
specifier is omitted which is a much more useful default and this
patch corrects the implementation to agree with the documentation.

See: https://forum.rclone.org/t/problem-utc-windows-europe-1-summer-problem/29917
2022-03-28 11:47:27 +01:00
Nick Craig-Wood
d975196cfa dropbox: fix retries of multipart uploads with incorrect_offset error
Before this fix, rclone retries chunks of multipart uploads. However
if they had been partially received dropbox would reply with an
incorrect_offset error which rclone was ignoring.

This patch parses the new offset from the error response and uses it
to adjust the data that rclone sends so it is the same as what dropbox
is expecting.

See: https://forum.rclone.org/t/dropbox-rate-limiting-for-upload/29779
2022-03-25 15:39:01 +00:00
Nick Craig-Wood
1f39b28f49 googlecloudstorage: use the s3 pacer to speed up transactions
This commit switches Google Cloud Storage from the drive pacer to the
s3 pacer. The main difference between them is that the s3 pacer does
not limit transactions in the non-error case. This is appropriate for
a cloud storage backend where you pay for each transaction.
2022-03-25 15:28:59 +00:00
Nick Craig-Wood
2738db22fb pacer: default the Google pacer to a burst of 100 to fix gcs pacing
Before this change the pacer defaulted to a burst of 1 which mean that
it kept being activated unecessarily.

This affected Google Cloud Storage and Google Photos.

See: https://forum.rclone.org/t/no-traverse-too-slow-with-lot-of-files/29886/12
2022-03-25 15:28:59 +00:00
Nick Craig-Wood
1978ddde73 Add GuoXingbin to contributors 2022-03-25 15:28:59 +00:00
GuoXingbin
c2bfda22ab s3: Add ChinaMobile EOS to provider list
China Mobile Ecloud Elastic Object Storage (EOS) is a cloud object storage service, and is fully compatible with S3.

Fixes #6054
2022-03-24 11:57:00 +00:00
Nick Craig-Wood
d4da9b98d6 vfs: add --vfs-fast-fingerprint for less accurate but faster fingerprints 2022-03-22 16:33:24 +00:00
Nick Craig-Wood
e4f5912294 azureblob: fix lint error with golangci-lint 1.45.0 2022-03-22 16:33:24 +00:00
Nick Craig-Wood
750fffdf71 netstorage: fix unescaped HTML in documentation 2022-03-18 14:40:12 +00:00
Nick Craig-Wood
388e74af52 Start v1.59.0-DEV development 2022-03-18 14:04:22 +00:00
Nick Craig-Wood
f9354fff2f Version v1.58.0 2022-03-18 12:29:54 +00:00
Nick Craig-Wood
ff1f173fc2 build: add bisync.md to docs builder and fix missing tardigrade.md stub 2022-03-18 11:22:23 +00:00
Nick Craig-Wood
f8073a7b63 build: ensure the Go version used for the build is always up to date #6020 2022-03-17 17:14:50 +00:00
Nick Craig-Wood
807f1cedaa hasher: fix crash on object not found
Before this fix `NewObject` could return a wrapped `fs.Object(nil)`
which caused a crash. This was caused by `wrapObject` returning a
`nil` `*Object` which was cast into an `fs.Object`.

This changes the interface of `wrapObject` so it returns an
`fs.Object` instead of a `*Object` and an error which must be checked.
This forces the callers to return a `nil` object rather than an
`fs.Object(nil)`.

See: https://forum.rclone.org/t/panic-in-hasher-when-mounting-with-vfs-cache-and-not-synced-data-in-the-cache/29697/11
2022-03-16 11:30:26 +00:00
Nick Craig-Wood
bf9c68c88a storj: implement server side Move 2022-03-14 15:44:56 +00:00
Nick Craig-Wood
189cba0fbe s3: add other regions for Lyve and correct Provider name 2022-03-14 15:43:35 +00:00
Nick Craig-Wood
69f726f16c Add Nil Alexandrov to contributors 2022-03-14 15:43:35 +00:00
Nil Alexandrov
65652f7a75 Add Akamai Netstorage as a new backend. 2022-03-09 12:42:22 +00:00
Nil Alexandrov
47f9ab2f56 lib/rest: add support for setting trailers 2022-03-09 12:42:22 +00:00
Nick Craig-Wood
5dd51e6149 union: fix deadlock when one part of a multi-upload fails
Before this fix, rclone would deadlock when uploading two files at
once, if one errored. This caused the other file to block in the multi
reader and never complete.

This fix drains the input buffer on error which allows the other
upload to complete.

See: https://forum.rclone.org/t/union-with-create-policy-all-copy-stuck-when-first-union-fails/29601
2022-03-09 11:30:55 +00:00
Nick Craig-Wood
6a6d254a9f s3: add support for Seagate Lyve Cloud storage 2022-03-09 11:30:55 +00:00
jaKa
fd453f2c7b koofr: renamed digistorage to exclude the romania part. 2022-03-08 22:39:23 +00:00
jaKa
5d06a82c5d koofr: add digistorage service as a koofr provider. 2022-03-08 10:36:18 +00:00
Nick Craig-Wood
847868b4ba ftp: hard fork github.com/jlaffaye/ftp to fix go get
Having a replace directive in go.mod causes "go get
github.com/rclone/rclone" to fail as it discussed in this Go issue:
https://github.com/golang/go/issues/44840

This is apparently how the Go team want go.mod to work, so this commit
hard forks github.com/jlaffaye/ftp into github.com/rclone/ftp so we
can remove the `replace` directive from the go.mod file.

Fixes #5810
2022-03-07 09:55:49 +00:00
Ivan Andreev
38ca178cf3 mailru: fix int32 overflow on arm32 - fixes #6003 2022-03-06 13:33:57 +00:00
Nick Craig-Wood
9427d22f99 Add ctrl-q to contributors 2022-03-06 13:33:26 +00:00
ctrl-q
7b1428a498 onedrive: Do not retry on 400 pathIsTooLong 2022-03-06 13:05:05 +00:00
Nick Craig-Wood
ec72432cec vfs: fix failed to _ensure cache internal error: downloaders is nil error
This error was caused by renaming an open file.

When the file was renamed in the cache, the downloaders were cleared,
however the downloaders were not re-opened when needed again, instead
this error was generated.

This fix re-opens the downloaders if they have been closed by renaming
the file.

Fixes #5984
2022-03-03 17:43:29 +00:00
Nick Craig-Wood
2339172df2 pcloud: fix pre-1970 time stamps - fixes #5917
Before this change rclone send pre-1970 timestamps as negative
numbers. pCloud ignores these and sets them as todays date.

This change sends the timestamps as unsigned 64 bit integers (which is
how the binary protocol sends them) and pCloud accepts the (actually
negative) timestamp like this.
2022-03-03 17:18:40 +00:00
Nick Craig-Wood
268b808bf8 filter: add {{ regexp }} syntax to pattern matches - fixes #4074
There has been a desire from more advanced rclone users to have regexp
filtering as well as the glob filtering.

This patch adds regexp filtering using this syntax `{{ regexp }}`
which is currently a syntax error, so is backwards compatibile.

This means regexps can be used everywhere globs can be used, and that
they also can be mixed with globs in the same pattern, eg `*.{{jpe?g}}`
2022-03-03 17:16:28 +00:00
Nick Craig-Wood
74898bac3b build: add windows/arm64 build - NB this does not support mount yet #5828 2022-03-03 17:13:32 +00:00
Nick Craig-Wood
e0fbca02d4 compress: fix memory leak - fixes #6013
Before this change we forgot to close the compressor when checking to
see if an object was compressible.
2022-03-03 17:10:21 +00:00
Nick Craig-Wood
21355b4208 sync: Fix --max-duration so it doesn't retry when the duration is exceeded
Before this change, if the --max-duration limit was reached then
rclone would retry the sync as a fatal error wasn't raised.

This checks the deadline and raises a fatal error if necessary at the
end of the sync.

Fixes #6002
2022-03-03 17:08:16 +00:00
Nick Craig-Wood
251b84ff2c sftp: fix unecessary seeking when uploading and downloading files
This stops the SFTP library issuing out of order writes which fixes
the problems uploading to `serve sftp` from the `sftp` backend.

This was fixes upstream in this pull request: https://github.com/pkg/sftp/pull/482

Fixes #5806
2022-03-03 17:02:35 +00:00
Nick Craig-Wood
537b62917f s3: add --s3-use-multipart-etag provider quirk #5993
Before this change the new multipart upload ETag checking code was
failing in the integration tests with Alibaba OSS.

Apparently Alibaba calculate the ETag in a different way to AWS.

This introduces a new provider quirk with a flag to disable the
checking of the ETag for multipart uploads.

Mulpart Etag checking has been enabled for all providers that we can
test for and work, and left disabled for the others.
2022-03-01 16:36:39 +00:00
Nick Craig-Wood
71a784cfa2 compress: fix crash if metadata upload failed - fixes #5994
Before this changed the backend attempted to delete a nil object if
the metadata upload failed.
2022-02-28 19:47:52 +00:00
Nick Craig-Wood
8ee0fe9863 serve docker: disable linux tests in CI as they are locking up regularly 2022-02-28 18:01:47 +00:00
Nick Craig-Wood
8f164e4df5 s3: Use the ETag on multipart transfers to verify the transfer was OK
Before this rclone ignored the ETag on multipart uploads which missed
an opportunity for a whole file integrity check.

This adds that check which means that we now check even harder that
multipart uploads have arrived properly.

See #5993
2022-02-25 16:19:03 +00:00
Nick Craig-Wood
06ecc6511b drive: when using a link type --drive-export-formats show all doc types
Before this change we always hid unexportable document types (eg
Google maps).

After this change, if using --drive-export-formats
url/desktop/link.html/webloc we will show links for all documents
regardless of whether they are exportable or not as the links to them
work regardless of whether they are exportable or not.

See: https://forum.rclone.org/t/rclone-mount-for-google-drive-does-not-show-as-web-links-the-google-documents-of-the-google-my-map-gmap-type/29415
2022-02-25 16:08:11 +00:00
Nick Craig-Wood
3529bdec9b sftp: update docs on how to create known_hosts file
This also removes the note on the limitation that only one entry per
host is allowed in the file as it works with many entries provided
they have different key types.

See: https://forum.rclone.org/t/rclone-fails-ssh-handshakes-with-rsync-nets-sftp-when-a-known-hosts-file-is-specified/29206/
2022-02-25 16:08:11 +00:00
partev
486b43f8c7 doc: fix a typo
"and this it may require you to unblock it temporarily" -> "and it may require you to unblock it temporarily"
2022-02-22 21:05:05 +00:00
Nick Craig-Wood
89f0e4df80 swift: fix about so it shows info about the current container only
Before this change `rclone about swift:container` would show aggregate
info about all the containers, not just the one in use.

This causes a problem if container listing is disabled (for example in
the Blomp service).

This fix makes `rclone about swift:container` show only the info about
the given `container`. If aggregate info about all the containers is
required then use `rclone about swift:`.

See: https://forum.rclone.org/t/rclone-mount-blomp-problem/29151/18
2022-02-22 12:55:57 +00:00
Nick Craig-Wood
399fb5b7fb Add Vincent Murphy to contributors 2022-02-22 12:55:57 +00:00
Vincent Murphy
19f1ed949c docs: Fix broken test_proxy.py link 2022-02-22 12:26:17 +00:00
Nick Craig-Wood
d3a1001094 drive: add --drive-skip-dangling-shortcuts flag - fixes #5949
This flag enables dangling shortcuts to be skipped without an error.
2022-02-22 12:22:21 +00:00
Nick Craig-Wood
dc7e3ea1e3 drive,gcs,googlephotos: disable OAuth OOB flow (copy a token) due to google deprecation
Before this change, rclone supported authorizing for remote systems by
going to a URL and cutting and pasting a token from Google. This is
known as the OAuth out-of-band (oob) flow.

This, while very convenient for users, has been shown to be insecure
and has been deprecated by Google.

https://developers.googleblog.com/2022/02/making-oauth-flows-safer.html#disallowed-oob

> OAuth out-of-band (OOB) is a legacy flow developed to support native
> clients which do not have a redirect URI like web apps to accept the
> credentials after a user approves an OAuth consent request. The OOB
> flow poses a remote phishing risk and clients must migrate to an
> alternative method to protect against this vulnerability. New
> clients will be unable to use this flow starting on Feb 28, 2022.

This change disables that flow, and forces the user to use the
redirect URL flow. (This is the flow used already for local configs.)

In practice this will mean that instead of cutting and pasting a token
for remote config, it will be necessary to run "rclone authorize"
instead. This is how all the other OAuth backends work so it is a well
tested code path.

Fixes #6000
2022-02-18 12:46:30 +00:00
Nick Craig-Wood
f22b703a51 storj: rename tardigrade backend to storj backend #5616
This adds an alias for backwards compatibility and leaves a stub
documentation page to redirect people to the new documentation.
2022-02-11 11:04:15 +00:00
Nick Craig-Wood
c40129d610 fs: allow backends to have aliases #5616
This allows a backend to have multiple aliases. These aliases are
hidden from `rclone config` and the command line flags are hidden from
the user. However the flags, environment varialbes and config for the
alias will work just fine.
2022-02-11 11:04:15 +00:00
Nick Craig-Wood
8dc93f1792 Add Márton Elek to contributors 2022-02-11 11:04:03 +00:00
Nick Craig-Wood
f4c40bf79d mount: add --devname to set the device name sent to FUSE for mount display
Before this change, the device name was always the remote:path rclone
was configured with. However this can contain sensitive information
and it appears in the `mount` output, so `--devname` allows the user
to configure it.

See: https://forum.rclone.org/t/rclone-mount-blomp-problem/29151/11
2022-02-09 11:56:43 +00:00
Nick Craig-Wood
9cc50a614b s3: add note about Storj provider bug and workaround
See: https://github.com/storj/gateway-mt/issues/39
2022-02-08 11:40:29 +00:00
Elek, Márton
bcb07a67f6 tardigrade: update docs to explain differences between s3 and this backend
Co-authored-by: Caleb Case <calebcase@gmail.com>
2022-02-08 11:40:29 +00:00
Márton Elek
25ea04f1db s3: add specific provider for Storj Shared gateways
- unsupported features (Copy) are turned off for Storj
- enable urlEncodedListing for Storj provider
- set chunksize to 64Mb
2022-02-08 11:40:29 +00:00
Nick Craig-Wood
06ffd4882d onedrive: add --onedrive-root-folder-id flag #5948
This is to navigate to difficult to find folders in onedrive.
2022-02-07 12:29:36 +00:00
Nick Craig-Wood
19a5e1d63b docs: document --disable-http2 #5253 2022-02-07 12:29:36 +00:00
Nick Craig-Wood
ec88b66dad Add Abhiraj to contributors 2022-02-07 12:29:36 +00:00
Abhiraj
aa2d7f00c2 drive: added --drive-copy-shortcut-content - fixes #4604 2022-02-04 11:37:58 +00:00
Nick Craig-Wood
3e125443aa build: fix ARM architecture version in .deb packages after nfpm change
Fixes #5973
2022-02-03 11:24:06 +00:00
Nick Craig-Wood
3c271b8b1e Add Eng Zer Jun to contributors 2022-02-03 11:24:06 +00:00
Nick Craig-Wood
6d92ba2c6c Add viveknathani to contributors 2022-02-03 11:24:06 +00:00
albertony
c26dc69e1b docs/jottacloud: add note that mime types are not available with --fast-list 2022-02-02 13:12:50 +01:00
albertony
b0de0b4609 docs: include all commands in online help top menu drop-down 2022-02-01 20:40:50 +01:00
albertony
f54641511a librclone: add support for mount commands
Fixes #5661
2022-02-01 19:29:36 +01:00
Eng Zer Jun
8cf76f5e11 test: use T.TempDir to create temporary test directory
The directory created by `T.TempDir` is automatically removed when the
test and all its subtests complete.

Reference: https://pkg.go.dev/testing#T.TempDir
Signed-off-by: Eng Zer Jun <engzerjun@gmail.com>
2022-02-01 11:47:04 +00:00
viveknathani
18c24014da docs/content: describe mandatory fields for drive
Making a client-id for Google Drive requires you to add two more fields
besides the already documented "Application name" field. This commit
documents what should be written for those two fields.

Fixes #5967
2022-02-01 11:42:12 +00:00
Nick Craig-Wood
0ae39bda8d docs: fix and reword --update docs
After discussion on the forum with @bandwidth, this rewords the
--update docs to be correct and easier to understand.

See: https://forum.rclone.org/t/help-understanding-update/28937
2022-02-01 11:07:51 +00:00
Nick Craig-Wood
051685baa1 s3: fix multipart upload with --no-head flag - Fixes #5956
Before this change a multipart upload with the --no-head flag returned
the MD5SUM as a base64 string rather than a Hex string as the rest of
rclone was expecting.
2022-01-29 12:48:51 +00:00
albertony
07f53aebdc touch: fix issue where directory is created instead of file
Detected on ftp, sftp and Dropbox backends.

Fixes #5952
2022-01-28 20:29:12 +01:00
albertony
bd6d36b3f6 docs: improve standard list of properties for options 2022-01-28 19:43:51 +01:00
Nick Craig-Wood
b168479429 gcs: add missing regions - fixes #5955 2022-01-28 12:34:13 +00:00
Nick Craig-Wood
b447b0cd78 build: upgrade actions runner macos-11 to fix macOS build problems #5951 2022-01-27 17:33:04 +00:00
Nick Craig-Wood
4bd2386632 build: don't specify macos SDK any more as default is good enough #5951
This fixes the build, in particular the error:

    Failed to run ["xcrun" "--sdk" "macosx11.1" "--show-sdk-path"]: exit status 1
2022-01-27 17:33:04 +00:00
Nick Craig-Wood
83b6b62c1b build: disable cmount tests under macOS and the CI since they are locking up
This fixes #5951 and allows the macOS builds to run again

See #5960 for more info.
2022-01-27 17:33:04 +00:00
Nick Craig-Wood
5826cc9d9e Add Paulo Martins to contributors 2022-01-27 17:33:04 +00:00
Nick Craig-Wood
252432ae54 Add Gourav T to contributors 2022-01-27 17:33:04 +00:00
Nick Craig-Wood
8821629333 Add Isaac Levy to contributors 2022-01-27 17:33:04 +00:00
Nick Craig-Wood
a2092a8faf Add Vanessasaurus to contributors 2022-01-27 17:33:04 +00:00
Nick Craig-Wood
2b6f4241b4 Add Alain Nussbaumer to contributors 2022-01-27 17:33:04 +00:00
Nick Craig-Wood
e3dd16d490 Add Charlie Jiang to contributors 2022-01-27 17:33:04 +00:00
Nick Craig-Wood
9e1fd923f6 Add Yunhai Luo to contributors 2022-01-27 17:33:04 +00:00
Nick Craig-Wood
3684789858 Add Koopa to contributors 2022-01-27 17:33:04 +00:00
Nick Craig-Wood
1ac1dd428a Add Niels van de Weem to contributors 2022-01-27 17:33:04 +00:00
Nick Craig-Wood
65dbd29c22 Add Kim to contributors 2022-01-27 17:33:04 +00:00
albertony
164774d7e1 Add Shmz Ozggrn to contributors 2022-01-27 09:43:42 +01:00
Shmz Ozggrn
507020f408 docs: Use Adaptive Logo in README 2022-01-27 09:35:36 +01:00
albertony
a667e03fc9 http: improved recognition of url pointing to a single file - fixes #5929 2022-01-26 11:41:01 +01:00
albertony
1045344943 http: status string already includes the status code 2022-01-26 11:41:01 +01:00
albertony
5e469db420 docs/http: fix list layout in --http-no-head help
Existing help text ended with a list, but then auto-generated list items
Config, Env Var, Type and Default would be included in the same list.
2022-01-26 11:41:01 +01:00
albertony
946e84d194 http: use string contains instead of index 2022-01-26 11:41:01 +01:00
albertony
162aba60eb http: error strings should not be capitalized 2022-01-26 11:41:01 +01:00
albertony
d8a874c32b Make http tests line ending agnostic 2022-01-26 11:41:01 +01:00
albertony
9c451d9ac6 Fix linting errors 2022-01-26 00:02:17 +01:00
albertony
8f3f24672c docs/serve: move help for template option into separate section 2022-01-25 18:19:21 +01:00
Paulo Martins
0eb7b716d9 s3: document Content-MD5 workaround for object-lock enabled buckets - Fixes #5765 2022-01-25 16:10:57 +00:00
Gourav T
ee9684e60f fichier: implemented about functionality 2022-01-25 15:53:58 +00:00
negative0
e0cbe413e1 rc: Allow user to disable authentication for web gui 2022-01-25 15:52:30 +00:00
albertony
2523dd6220 version: report correct friendly-name for windows 10/11 versions after 2004
Until Windows 10 version 2004 (May 2020) this can be found from registry entry
ReleaseID, after that we must use entry DisplayVersion (ReleaseId is stuck at 2009).
Source: https://ss64.com/nt/ver.html
2022-01-24 21:27:42 +01:00
albertony
c504d97017 config: fix display of config choices with empty help text 2022-01-18 20:17:57 +01:00
albertony
b783f09fc6 config: show default and example values in correct input syntax instead of quoted and escaped golang string syntax
See #5551
2022-01-16 14:57:38 +01:00
albertony
a301478a13 config: improved punctuation in initial config prompt 2022-01-16 14:57:38 +01:00
albertony
63b450a2a5 config: minor improvement of help text for encoding option
See #5551
2022-01-16 14:57:38 +01:00
albertony
843b77aaaa docs/ftp: improved default value description of port and username options
See #5551
2022-01-16 14:57:38 +01:00
albertony
3641727edb config: fix issue where required password options had to be re-entered when editing existing remote
See #5551
2022-01-16 14:57:38 +01:00
albertony
38e2f835ed config: fix handling of default, exclusive and required properties of multiple-choice options
Previously an empty input (just pressing enter) was only allowed for multiple-choice
options that did not have the Exclusive property set. With this change the existing
Required property is introduced into the multiple choice handling, so that one can have
Exclusive and Required options where only a value from the list is allowed, and one can
have Exclusive but not Required options where an empty value is accepted but any
non-empty value must still be matching an item from the list.

Fixes #5549

See #5551
2022-01-16 14:57:38 +01:00
albertony
bd4bbed592 config: remove explicit setting of required property to true for options with a default value
See #5551
2022-01-16 14:57:38 +01:00
albertony
994b501188 config: remove explicit setting of required property to its default value false
See #5551
2022-01-16 14:57:38 +01:00
albertony
dfa9381814 docs/jottacloud: correct reference to temp-dir 2022-01-16 14:34:15 +01:00
albertony
2a85feda4b docs/jottacloud: add note about upload only being supported on jotta device 2022-01-16 14:34:15 +01:00
albertony
ad46af9168 docs/librclone: note that adding -ldflags -s to the build command will reduce size of library file 2022-01-16 14:32:01 +01:00
albertony
2fed02211c docs/librclone: document use from C/C++ on Windows 2022-01-16 14:11:56 +01:00
albertony
237daa8aaf dedupe: add quit as a choice in interactive mode
Fixes #5881
2022-01-14 19:57:48 +01:00
albertony
8aeca6c033 docs: align menu items when icons have different sizes 2022-01-14 17:39:27 +00:00
albertony
fd82876086 librclone: allow empty string or null input instead of empty json object 2022-01-14 17:37:13 +00:00
Isaac Levy
be1a668e95 onedrive: minor optimization of quickxorhash
This patch avoids creating a new slice header in favour of a for loop.

This saves a few instructions!
2022-01-14 17:30:56 +00:00
Vanessasaurus
9d4eab32d8 cmd: fix broken example link in help.go
This link appears to be broken, so here is another reference to (I think) the same file that provides a good example of coba. We could also do the current commit 8312004f41/cli/cobra.go although it might be better to maintain an up to date example.
2022-01-13 16:26:19 +00:00
Alain Nussbaumer
b4ba7b69b8 dlna: change icons to the newest ones. 2022-01-13 16:23:24 +00:00
albertony
deef659aef Add Bumsu Hyeon to contributors 2022-01-13 13:25:20 +01:00
Bumsu Hyeon
4b99e84242 vfs/cache: fix handling of special characters in file names (#5875) 2022-01-13 13:23:25 +01:00
albertony
06bdf7c64c Add Lu Wang to contributors 2022-01-12 21:33:35 +01:00
Lu Wang
e1225b5729 docs/s3: fixed max-age example 2022-01-12 21:31:54 +01:00
albertony
871cc2f62d docs: fix links to rc sections 2022-01-12 19:51:26 +01:00
Charlie Jiang
bc23bf11db onedrive: add config option for oauth scope Sites.Read.All (#5883) 2022-01-10 21:28:19 +08:00
albertony
b55575e622 docs: fix typo 2022-01-03 18:46:40 +01:00
albertony
328f0e7135 docs: fix links to rc debug commands 2021-12-30 21:52:34 +01:00
albertony
a52814eed9 docs: fix links to rc data types section 2021-12-30 20:46:39 +01:00
albertony
071a9e882d docs: capitalization of flag usage strings 2021-12-30 14:07:24 +01:00
albertony
4e2ca3330c tree: remove obsolete --human replaced by global --human-readable - fixes #5868 2021-12-21 20:17:00 +01:00
Yunhai Luo
408d9f3e7a s3: Add GLACIER_IR storage class 2021-12-03 14:46:45 +00:00
Koopa
0681a5c86a lib/rest: process HTML entities within XML
MEGAcmd currently includes escaped HTML4 entites in its XML messages.
This behavior deviates from the XML standard, but currently it prevents
rclone from being able to use the remote.
2021-12-01 16:31:43 +00:00
Niels van de Weem
df09c3f555 pcloud: add support for recursive list 2021-12-01 15:58:44 +00:00
Kim
c41814fd2d backend:jottacloud change api used by ListR ( --fast-list ) 2021-12-01 14:21:37 +01:00
Nick Craig-Wood
c2557cc432 azureblob: fix crash with SAS URL and no container - fixes #5820
Before this change attempting NewObject on a SAS URL's root would
crash the Azure SDK.

This change detects that using the code from this previous fix

f7404f52e7 azureblob: fix crash when listing outside a SAS URL's root - fixes #4851

And returns not object not found instead.

It also prevents things being uploaded to the root of the SAS URL
which also crashes the Azure SDK.
2021-11-27 16:18:18 +00:00
Nick Craig-Wood
3425726c50 oauthutil: fix crash when webrowser requests /robots.txt - fixes #5836
Before this change the oauth webserver would crash if it received a
request to /robots.txt.

This patch makes it ignore (with 404 error) any paths it isn't
expecting.
2021-11-25 12:12:14 +00:00
Nick Craig-Wood
46175a22d8 Add Logeshwaran Murugesan to contributors 2021-11-25 12:11:47 +00:00
Logeshwaran Murugesan
bcf0e15ad7 Simplify content length processing in s3 with download url 2021-11-25 12:03:14 +00:00
Nick Craig-Wood
b91c349cd5 local: fix hash invalidation which caused errors with local crypt mount
Before this fix if a file was updated, but to the same length and
timestamp then the local backend would return the wrong (cached)
hashes for the object.

This happens regularly on a crypted local disk mount when the VFS
thinks files have been changed but actually their contents are
identical to that written previously. This is because when files are
uploaded their nonce changes so the contents of the file changes but
the timestamp and size remain the same because the file didn't
actually change.

This causes errors like this:

    ERROR: file: Failed to copy: corrupted on transfer: md5 crypted
    hash differ "X" vs "Y"

This turned out to be because the local backend wasn't clearing its
cache of hashes when the file was updated.

This fix clears the hash cache for Update and Remove.

It also puts a src and destination in the crypt message to make future
debugging easier.

Fixes #4031
2021-11-24 12:09:34 +00:00
Nick Craig-Wood
d252816706 vfs: add vfs/stats remote control to show statistics - fixes #5816 2021-11-23 18:00:21 +00:00
Nick Craig-Wood
729117af68 Add GGG KILLER to contributors 2021-11-23 18:00:21 +00:00
GGG KILLER
cd4d8d55ec docs: add a note about the B2 download_url format
Currently the B2 docs don't specify which format the download_url
setting should have, and if you input it wrong, there is nothing
in the verbose logs or anywhere else that can let you know that.
2021-11-23 17:57:34 +00:00
Nick Craig-Wood
f26abc89a6 union: fix treatment of remotes with // in
See: https://forum.rclone.org/t/connection-string-with-union-backend-and-a-lot-of-quotes/27577
2021-11-23 17:41:12 +00:00
lindwurm
b5abbe819f s3: Add Wasabi AP Northeast 2 endpoint info
* Wasabi starts to provide AP Northeast 2 (Osaka) endpoint, so add it to the list
* Rename ap-northeast-1 as "AP Northeast 1 (Tokyo)" from "AP Northeast"

Signed-off-by: lindwurm <lindwurm.q@gmail.com>
2021-11-22 18:02:57 +00:00
Nick Craig-Wood
a351484997 sftp: fix timeout on hashing large files by sending keepalives
Before this fix the SFTP sessions could timeout when doing hashes if
they took longer than the --timeout parameter.

This patch sends keepalive packets every minute while a shell command
is running to keep the connection open.

See: https://forum.rclone.org/t/rclone-check-over-sftp-failure-to-calculate-md5-hash-for-large-files/27487
2021-11-22 15:26:29 +00:00
Nick Craig-Wood
099eff8891 sftp: refactor so we only have one way of running remote commands
This also returns errors from running ssh Hash commands which we
didn't do before.
2021-11-22 15:26:29 +00:00
albertony
c4cb167d4a Add rsapkf and Will Holtz to contributors 2021-11-21 19:26:05 +01:00
Will Holtz
38e100ab19 docs/config: more explicit doc for config create --all with params 2021-11-21 19:22:19 +01:00
rsapkf
db95a0d6c3 docs/pcloud: fix typo 2021-11-21 19:16:19 +01:00
Nick Craig-Wood
df07964db3 azureblob: raise --azureblob-upload-concurrency to 16 by default
After speed testing it was discovered that upload speed goes up pretty
much linearly with upload concurrency. This patch changes the default
from 4 to 16 which means that rclone will use 16 * 4M = 64M per
transfer which is OK even for low memory devices.

This adds a note that performance may be increased by increasing
upload concurrency.

See: https://forum.rclone.org/t/performance-of-rclone-vs-azcopy/27437/9
2021-11-18 16:09:02 +00:00
Nick Craig-Wood
fbc4c4ad9a azureblob: remove 100MB upper limit on chunk_size as it is no longer needed 2021-11-18 16:09:02 +00:00
Nick Craig-Wood
4454b3e1ae azureblob: implement --azureblob-upload-concurrency parameter to speed uploads
See: https://forum.rclone.org/t/performance-of-rclone-vs-azcopy/27437
2021-11-18 16:08:57 +00:00
Nick Craig-Wood
f9321fccbb Add deinferno to contributors 2021-11-18 15:51:45 +00:00
Ole Frost
3c2252b7c0 fs/operations: add server-side moves to stats
Fixes #5430
2021-11-18 12:20:56 +00:00
Cnly
51c952654c fstests: treat accountUpgradeRequired as success for OneDrive PublicLink 2021-11-17 17:35:17 +00:00
deinferno
80e47be65f yandex: add permanent deletion support 2021-11-17 16:57:41 +00:00
Michał Matczuk
38dc3e93ee fshttp: add prometheus metrics for HTTP status code
This patch adds rclone_http_status_code counter vector labeled by

* host,
* method,
* code.

It allows to see HTTP errors, backoffs etc.

The Metrics struct is designed for extensibility.
Adding new metrics is a matter of adding them to Metrics struct and including them in the response handling.

This feature has been discussed in the forum [1].

[1] https://forum.rclone.org/t/prometheus-metrics/14484
2021-11-17 18:38:12 +03:00
Nick Craig-Wood
ba6730720d Fix repeated error messages after pkg/errors removal 2021-11-15 17:58:40 +00:00
Nick Craig-Wood
7735b5c694 Add Sinan Tan to contributors 2021-11-15 17:58:40 +00:00
Nick Craig-Wood
d45b3479ee Add Andy Jackson to contributors 2021-11-15 17:58:40 +00:00
Nick Craig-Wood
4c5df0a765 Add Fredric Arklid to contributors 2021-11-15 17:58:40 +00:00
Sinan Tan
8c61a09be2 crypt: add test cases and documentation for base64 and base32768 filename encoding #5801 2021-11-15 17:57:02 +00:00
Max Sum
c217145cae crypt: add base64 and base32768 filename encoding options #5801 2021-11-15 17:57:02 +00:00
thomae
4c93378f0e serve sftp: update docs on --stdio 2021-11-12 10:49:35 +00:00
thomae
f9e54f96c3 docs/sftp: fix typo 2021-11-11 19:20:15 +01:00
Andy Jackson
af0fcd03cb hdfs: add file and directory move/rename support 2021-11-11 16:41:43 +00:00
albertony
00aafc957e sftp: add rclone to list of supported md5sum/sha1sum commands to look for
See #5781
2021-11-11 15:16:45 +01:00
albertony
29abbd2032 hashsum: support creating hash from data received on stdin
See #5781
2021-11-11 15:16:45 +01:00
Fredric Arklid
663b2d9c46 jottacloud: Add support for Tele2 Cloud 2021-11-11 12:32:23 +00:00
Nick Craig-Wood
f36d6d01b5 rc: fix operations/publiclink default for expires parameter
Before this change the expires parameter was defaulting to 0 if not
provided.

This change makes it default to fs.DurationOff which is the same as
the `rclone link` command.

See: https://forum.rclone.org/t/operations-publiclink-from-dropbox-error-not-autorized/27374
2021-11-11 11:14:22 +00:00
Nick Craig-Wood
0c03aa3a8b dropbox: speed up directory listings by specifying 1000 items in a chunk 2021-11-11 11:14:22 +00:00
Nick Craig-Wood
caa2b8bf40 dropbox: save an API request when at the root
Before this change, rclone always emitted an API request to discover
what type of thing the root is.

This is unecessary as it is always a directory.
2021-11-11 11:14:22 +00:00
Nick Craig-Wood
421e840e37 Add Borna Butkovic to contributors 2021-11-11 11:14:22 +00:00
Nick Craig-Wood
9b57d27be4 Add David to contributors 2021-11-11 11:14:22 +00:00
Borna Butkovic
627ac1b2d9 ftp: add --ftp-ask-password to prompt for password when needed 2021-11-10 17:34:14 +00:00
David
ae395d8cf0 rc: error on web GUI update won't be fatal - fixes #5385 2021-11-10 17:05:13 +00:00
Ankur Gupta
f04520a6e3 operations: fix goroutine leak in case of copy retry
Whenever transfer.Account() is called, a new goroutine acc.averageLoop()
is started. This goroutine exits only when the channel acc.exit is closed.
acc.exit is closed when acc.Done() is called, which happens during tr.Done().

However, if tr.Reset is called during a copy low level retry, it replaces
the tr.acc, without calling acc.Done(), which results in the goroutine
mentioned above never exiting.

This commit calls acc.Done() during a tr.Reset()
2021-11-10 16:44:29 +00:00
Nick Craig-Wood
c968c3e41c build: raise minimum go version to go1.15
This was necessary because go1.14 seems to have a modules related bug
which means it tries to build modules even though the uses of them are
all disabled with build constraints. This seems to be fixed in go1.15.
2021-11-10 16:11:12 +00:00
Nick Craig-Wood
3661791e82 serve restic: disable for go1.16 and earlier after update 2021-11-10 15:42:50 +00:00
Nick Craig-Wood
4198763c35 build: update all dependencies 2021-11-10 10:34:56 +00:00
Nick Craig-Wood
3de47b8ed4 build: upgrade go.mod file to go1.17 2021-11-10 10:34:56 +00:00
Nick Craig-Wood
71b8e1e80b build: more docs on upgrading dependencies 2021-11-10 10:34:56 +00:00
Nick Craig-Wood
7366e97dfc mega: fix error handling broken by removal of github.com/pkg/errors
There were instances of errors.Wrap being called with a nil error
which the conversion didn't deal with correctly.
2021-11-09 13:43:45 +00:00
Nick Craig-Wood
21ba4d9a18 onedrive: fix error handling broken by removal of github.com/pkg/errors
There were instances of errors.Wrap being called with a nil error
which the conversion didn't deal with correctly.
2021-11-09 13:43:45 +00:00
Nick Craig-Wood
96e099d8e7 union: fix error handling broken by removal of github.com/pkg/errors
There were instances of errors.Wrap being called with a nil error
which the conversion didn't deal with correctly.
2021-11-09 13:43:45 +00:00
Nick Craig-Wood
2a31b5bdd6 Add bbabich to contributors 2021-11-09 13:43:45 +00:00
Nick Craig-Wood
9bdfe4c36f Add Vitor Arruda to contributors 2021-11-09 13:43:45 +00:00
Nick Craig-Wood
e3a2f539fe Add Chris Lu to contributors 2021-11-09 13:43:45 +00:00
Nick Craig-Wood
ffa943e31f Add Carlo Mion to contributors 2021-11-09 13:43:45 +00:00
bbabich
b16f603c51 s3: Add RackCorp object storage to providers 2021-11-09 11:46:58 +00:00
database64128
a7a8372976 🧪 fstest: fix time tests on Windows and add convenience methods to check local and remote fs with precision
Previously only the fs being checked on gets passed to
GetModifyWindow(). However, in most tests, the test files are
generated in the local fs and transferred to the remote fs. So the
local fs time precision has to be taken into account.

This meant that on Windows the time tests failed because the
local fs has a time precision of 100ns. Checking remote items uploaded
from local fs on Windows also requires a modify window of 100ns.
2021-11-09 11:43:36 +00:00
Vitor Arruda
9beb0677e4 backend: Fix union eplus policy returned nil 2021-11-08 11:55:27 +00:00
Nick Craig-Wood
e43b5ce5e5 Remove github.com/pkg/errors and replace with std library version
This is possible now that we no longer support go1.12 and brings
rclone into line with standard practices in the Go world.

This also removes errors.New and errors.Errorf from lib/errors and
prefers the stdlib errors package over lib/errors.
2021-11-07 11:53:30 +00:00
Chris Lu
97328e5755 Improve description for SeaweedFS 2021-11-06 21:01:50 +03:00
Carlo Mion
7b7d780fff stats: fix missing StatsInfo fields in the computation of the group sum 2021-11-05 15:33:00 +00:00
Carlo Mion
c2600f9e4d stats: fix missing computation of transferQueueSize when summing up statistics group - fixes #5749 2021-11-05 15:33:00 +00:00
Ivan Andreev
7bd853ce35 Add Roberto Ricci to contributors 2021-11-05 18:29:47 +03:00
Roberto Ricci
05150cfb1d backend/ftp: increase testUploadTimeout.maxTime to 10 seconds
On slow machines (e.g. Github CI), especially if GOARCH=386,
the test for cmd/serve/ftp could fail if this value is too small.

Fixes #5783
2021-11-05 18:27:44 +03:00
albertony
25366268fe Add Atílio Antônio to contributors 2021-11-04 12:55:49 +01:00
Atílio Antônio
c08d48a50d docs: improve grammar and fix typos (#5361)
This alters some comments in source files, but is interested mainly in documentation files and help messages.
2021-11-04 12:50:43 +01:00
Nick Craig-Wood
454574e2cc s3: collect the provider quirks into a single function and update
This removes the checks against the provider throughout the code and
puts them into a single setQuirks function for easy maintenance when
adding a new provider.

It also updates the quirks with the results of testing against
backends we have access to.

This also adds a list_url_encode parameter so that quirk can be
manually set.
2021-11-03 21:44:09 +00:00
Nick Craig-Wood
9218a3eb00 fs: add a tristate true/false/unset configuration value 2021-11-03 21:44:09 +00:00
Nick Craig-Wood
1e4ef4b4d5 Add Felix Bünemann to contributors 2021-11-03 21:44:09 +00:00
Nick Craig-Wood
8d92f7d697 s3: fallback to ListObject v1 on unsupported providers
This implements a quirks system for providers and notes which
providers we have tested to support ListObjectsV2.

For those providers which don't support ListObjectsV2 we use the
original ListObjects call.
2021-11-03 19:13:50 +00:00
Felix Bünemann
fd56abc5f2 s3: Use ListObjectsV2 for faster listings
Using ListObjectsV2 with a continuation token is about 5-6x faster than
ListObjectsV2 with a marker.
2021-11-03 19:13:50 +00:00
Ivan Andreev
b323bf34e2 sync/test: skip test ConcurrentTruncate on uptobox (take 2)
The test is not applicable to uptobox which can't upload empty files.
The test was not skipped as intended because the direct error was compared.
This fix will compare error Cause because Sync wraps the error.
2021-11-02 19:24:23 +03:00
Ivan Andreev
e78e73eae7 lib/encoder: fix benchmarks
Some day in the past the Slash encode option was added to Onedrive
encoder so it began to encode slashes in file names rather then treat
them as path separators.
This patch adapts benchmark test cases accordingly.

Fixes #5659
2021-11-02 19:23:16 +03:00
Nick Craig-Wood
f51a5eca2e fstests: add encoding test for URL encoded path name #5768
Add an encoding test to make sure backends can deal with a URL encoded
path name. This is a fairly common failing in backends and has been an
intermittent problem with onedrive itself.
2021-11-02 15:59:36 +00:00
albertony
39e2af7974 config: allow dot in remote names (#5606) 2021-11-01 20:50:06 +01:00
Ivan Andreev
b3217adf08 Add Chris Nelson to contributors 2021-11-01 21:24:06 +03:00
Ivan Andreev
074234119a bisync: documentation #5164
Co-authored-by: Chris Nelson <stuff@cjnaz.com>
2021-11-01 21:00:27 +03:00
Ivan Andreev
6210e22ab5 bisync: implementation #5164
Fixes #118

Co-authored-by: Chris Nelson <stuff@cjnaz.com>
2021-11-01 21:00:27 +03:00
Ivan Andreev
940e99a929 bisync: test scenarios #5164
Co-authored-by: Chris Nelson <stuff@cjnaz.com>
2021-11-01 21:00:27 +03:00
Ivan Andreev
79b6866b57 rc: export NewErrParamInvalid #5164 2021-11-01 21:00:27 +03:00
Ivan Andreev
c142e3edcc filter: export GlobToRegexp #5164 2021-11-01 21:00:27 +03:00
Nick Craig-Wood
5c646dff9a Start v1.58.0-DEV development 2021-11-01 16:54:17 +00:00
Nick Craig-Wood
19dfaf7440 docs: fix shortcode rendering on download page 2021-11-01 16:50:52 +00:00
Nick Craig-Wood
169990e270 Version v1.57.0 2021-11-01 15:45:40 +00:00
Nick Craig-Wood
e781bcbba1 Add David Liu to contributors 2021-11-01 15:45:40 +00:00
Nick Craig-Wood
409916b0c5 Add trevyn to contributors 2021-11-01 15:45:40 +00:00
albertony
d9c5be32e7 Add thomae to contributors 2021-11-01 15:10:14 +01:00
thomae
57074be9b3 serve sftp: fix typo 2021-11-01 15:07:13 +01:00
Joda Stößer
bae5c7c81b docs: remove application type "other" from drive.md
The application type "other" is not an option anymore.
2021-11-01 13:15:31 +00:00
albertony
05321f4aef docs/sftp: more detailed explanation of pubkey file and certificate 2021-11-01 13:14:17 +00:00
albertony
c9d7248d85 docs/jottacloud: update description of whitelabel services 2021-11-01 12:57:51 +00:00
albertony
da8f9be84b docs: describe the --human-readable option in more detail 2021-11-01 12:55:52 +00:00
David Liu
b806166147 docs: swift: Update OCI url
Oracle cloud storage now rebranded also as OCI bucket with new entry point
2021-11-01 12:54:23 +00:00
acsfer
20f936c9d4 Add note about S3 compatible services 2021-11-01 12:47:18 +00:00
albertony
91cdaffcc1 docs: add faq section explaining why rclone changes fullwidth characters in file names 2021-11-01 12:46:23 +00:00
trevyn
33bf9b4923 Add mention of Rust bindings for librclone 2021-11-01 12:43:31 +00:00
albertony
b4944f4520 docs/librclone: document that strings are utf8 encoded 2021-11-01 12:39:00 +00:00
albertony
286b152e7b librclone: free strings in python example 2021-11-01 12:36:52 +00:00
Nick Craig-Wood
f7764a0c9d premiumizeme: fix server side directory move after API changes
Apparently moving a directory using the id "0" as the root no longer
works, so this reads the real root ID when it is listed and uses that.

This fixes the DirMove problem.

See: https://forum.rclone.org/t/premiumize-cant-move-files/27169
See: #5734
2021-10-31 19:18:55 +00:00
Nick Craig-Wood
07fcba888c dircache: add SetRootIDAlias to update RootID from FindLeaf 2021-10-31 19:18:55 +00:00
Nick Craig-Wood
af705c754c premiumizeme: fix server side move after API change
See: https://forum.rclone.org/t/premiumize-cant-move-files/27169
See: #5734
2021-10-31 19:18:55 +00:00
Nick Craig-Wood
f85e3209b3 premiumizeme: fix directory listing after API changes
The API doesn't seem to accept a value of "0" any more for the root
directory ID, giving the error "Could not decode folder id".

However omitting it seems to work fine.
2021-10-31 19:18:55 +00:00
Nick Craig-Wood
e77dcb7f52 test_all: remove stray debug 2021-10-31 19:18:55 +00:00
Nick Craig-Wood
4ab842198a Revert "premiumizeme: attempt to fix integration tests"
This reverts commit 1eff0eef7a.

Now that the test account is premium again it is very fast and this is
no longer needed.
2021-10-31 19:18:54 +00:00
albertony
a8059b8a90 docs/mount: add note that to execute programs one must set custom filesystem permissions (#5771) 2021-10-31 00:48:56 +02:00
Nick Craig-Wood
cf2c2792e6 s3: fix corrupted on transfer: sizes differ 0 vs xxxx with Ceph
In this commit, released in 1.56.0 we started reading the size of the
object from the Content-Length header as returned by the GET request
to read the object.

4401d180aa s3: add --s3-no-head-object

However some object storage systems, notably Ceph, don't return a
Content-Length header.

The new code correctly calls the setMetaData function with a nil
pointer to the ContentLength.

However due to this commit from 2014, released in v1.18, the
setMetaData function was not ignoring the size as it should have done.

0da6f24221  s3: use official github.com/aws/aws-sdk-go including multipart upload #101

This commit correctly ignores the content length if not set.

Fixes #5732
2021-10-30 12:01:09 +01:00
Nick Craig-Wood
e6e1c49b58 s3: fix shared_credentials_file auth after reverting incorrect fix #5762
Before this change the `shared_credentials_file` config option was
being ignored.

The correct value is passed into the SDK but it only sets the
credentials in the default provider. Unfortunately we wipe the default
provider in order to install our own chain if env_auth is true.

This patch restores the shared credentials file in the session
options, exactly the same as how we restore the profile.

Original fix:

1605f9e14d s3: Fix shared_credentials_file auth
2021-10-30 11:54:17 +01:00
Nick Craig-Wood
712f9c9760 s3: fix IAM Role for Service Account not working and other auth problems
This patch reverts this commit

1605f9e14d s3: Fix shared_credentials_file auth

It unfortunately had the side effect of making the s3 SDK ignore the
config in our custom chain and use the default provider. This means
that advanced auth was being ignored such as --s3-profile with
role_arn.

Fixes #5468
Fixes #5762
2021-10-30 11:54:17 +01:00
albertony
a238877ad8 docs: note that destination is always a directory (#5766) 2021-10-30 00:30:00 +02:00
Ivan Andreev
70297c3aed sync/test: TestConcurrentTruncate needs empty files - skip on uptobox 2021-10-28 17:04:56 +03:00
Nolan Woods
a074a2b983 lib/http: Fix handling of ssl credentials
Adds a test that makes an actual http and https request against the server
2021-10-27 14:46:10 +03:00
Nick Craig-Wood
00ceeef21c hdfs: wait longer for the server to start up in the integration tests #5734
This needs fixing properly so rclone knows when the server has started
properly.
2021-10-23 22:53:17 +01:00
Nick Craig-Wood
2e81b78486 Add Dmitry Bogatov to contributors 2021-10-23 22:53:17 +01:00
Dmitry Bogatov
bb11803f1f Create direct share link for "koofr" backend
Instead of creating link to web interface, create direct link usable by
curl(1) or wget(1).
2021-10-23 15:00:33 +03:00
Nick Craig-Wood
a542ddf60a hdfs: attempt to make integration tests more reliable #5734
This makes sure the namenode is accepting TCP connections before
starting the integration tests in an attempt to make them more
reliable.
2021-10-22 13:07:48 +01:00
Nick Craig-Wood
257f5d279a filefabric: fix directory move after API change #5734
The API has changed in the directory move call JSON response from
returning a TaskID as a string to returning it as an integer. In other
places it is still returned as a string though.

This patch allows the TaskID to be an integer or a string in the JSON
response and keeps it internally as a string like before.
2021-10-22 12:58:00 +01:00
albertony
4f05ece39e test: fix touchdir test on backends without modtime support 2021-10-22 13:37:34 +02:00
albertony
9c8c0a58b5 touch: fix recursive touch due to recently introduced error ErrorIsDir 2021-10-22 13:37:34 +02:00
albertony
a70c20fe6b touch: improve error message from recursive touch 2021-10-22 13:37:34 +02:00
Ivan Andreev
59e77f794e serve/docker: skip race test until we find a solution for deadlock
Related to #5738
2021-10-22 14:00:48 +03:00
Ivan Andreev
1a66736ef0 Add Thomas Stachl to contributors 2021-10-21 15:23:48 +03:00
Ivan Andreev
844025d053 ftp: add support for precise time #5655 2021-10-21 14:50:53 +03:00
albertony
3a03f2778c test: ignore integration test TestCopyFileMaxTransfer on Google Drive
The test fails because it expects a copy with MaxTransfer and CutoffModeHard should
return fatal error, because this is thrown from accounting (ErrorMaxTransferLimitReachedFatal),
but in case of Google Drive the external google API catches and replaces it with a
non-fatal error:

pw.CloseWithError(fmt.Errorf("googleapi: Copy failed: %v", err))

(7290f25351/internal/gensupport/media.go (L140))
2021-10-21 12:42:25 +01:00
Ivan Andreev
29c6c86c00 ftp: fix timeout after long uploads #5596 2021-10-21 14:18:23 +03:00
Ivan Andreev
a32fde09ca fs/http: declutter code #5596 2021-10-21 14:18:23 +03:00
Ivan Andreev
1d50336615 ftp: replace jlaffaye/ftp by rclone/ftp in the build #5596 2021-10-21 14:18:23 +03:00
Thomas Stachl
015b250905 serve/docker: build docker plugin for multiple platforms #5668
Fixes #5462

Co-authored-by: Ivan Andreev <ivandeex@gmail.com>
2021-10-21 13:01:23 +03:00
Nick Craig-Wood
4b1ada2d51 filefabric: allow integration tests double time as they keep timing out #5734 2021-10-21 09:54:29 +01:00
albertony
f589dbc077 docs: don't show fictional example values for options as multiple choice items
See #5538
2021-10-20 22:56:19 +02:00
albertony
8efead1ee6 docs: update guide for contributing documentation
See #5538
2021-10-20 22:56:19 +02:00
albertony
9a17b32b5d docs: automatically remove ending punctuation from first line of backend option help string when used for flag usage
See #5538
2021-10-20 22:56:19 +02:00
albertony
8b65c55711 cmd/config: improve option prompt
See #5538
2021-10-20 22:56:19 +02:00
albertony
e2f47ecdeb docs: punctuation cleanup
See #5538
2021-10-20 22:56:19 +02:00
albertony
b868561951 jottacloud: return direct download link from rclone link command
If the shared path is a directory, then the download will be a zip archive.

Fixes #5391

See #5399
2021-10-20 19:54:29 +02:00
albertony
78db3dba0e jottacloud: add support for UserInfo feature
See #5399
2021-10-20 19:54:29 +02:00
albertony
41876dd669 touch: add support for touching files in directory, with options for recursive, filtering and dry-run/interactive
Fixes #5301
2021-10-20 19:24:57 +02:00
Nick Craig-Wood
2e72ec96c1 qingstor: ignore harmless failing integration test #5734
The test TestIntegration/FsMkdir/FsPutFiles/FromRoot/ListR fails in
the integration test because there is a broken bucket in the test
account which support haven't been able to remove.
2021-10-20 17:51:06 +01:00
Nick Craig-Wood
9742648fce fichier: allow more list retries for the integration tests #5734 2021-10-20 17:45:54 +01:00
Nick Craig-Wood
d73264572b putio: allow integration tests double time as they keep timing out #5734 2021-10-20 17:38:30 +01:00
Nick Craig-Wood
ff801e8e17 test_all: allow configuring a multiplier for the timeout #5734 2021-10-20 17:38:30 +01:00
Nick Craig-Wood
72c013c6f4 vfs: increase time to wait for writers in tests to 30s
In some backends (eg putio) this deadline was consistently missed at
10s so this patch increases it to 30s.

See: #5734
2021-10-20 17:38:30 +01:00
Nick Craig-Wood
1eff0eef7a premiumizeme: attempt to fix integration tests
This tries to fix the integration tests by only allowing one
premiumizeme test to run at once, in the hope it will stop rclone
hitting the rate limits and breaking the tests.

See: #5734
2021-10-20 17:38:30 +01:00
Nick Craig-Wood
5a5318720a onedrive: stop public link test complaining on non-business account
The TestIntegration/FsMkdir/FsPutFiles/PublicLink test doesn't work on
a standard onedrive account, it returns

    accessDenied: accountUpgradeRequired: Account Upgrade is required for this operation.

See: #5734
2021-10-20 17:38:30 +01:00
Nick Craig-Wood
e253b44882 dropbox: stop public link test complaining on non-enterprise account
The TestIntegration/FsMkdir/FsPutFiles/PublicLink test doesn't work on
a standard dropbox account, only on an enterprise account because it
sets expiry dates.

See: #5734
2021-10-20 17:38:30 +01:00
Ivan Andreev
0d7426a2dd hasher: backend documentation #5587 2021-10-20 19:11:54 +03:00
Ivan Andreev
f102ef2161 hasher: add hasher backend #5587 2021-10-20 19:11:54 +03:00
Ivan Andreev
57c7fde864 lib/kv: add unit tests, fix races #5587
After testing concurrent calling of `kv.Start` and `db.Stop` I had to restrict
more parts of these under mutex to make results deterministic without Sleep's
in the test body. It's more safe but has potential to lock Start for up to
2 seconds due to `db.open`.
2021-10-20 19:11:54 +03:00
Ivan Andreev
50df8cec9c lib/kv: add key-value database api #5587
Add bolt-based key-value database support.

Quick API description:
https://github.com/rclone/rclone/pull/5587#issuecomment-942174768
2021-10-20 19:11:54 +03:00
Nick Craig-Wood
8cd3251b57 fstests: Relax MimeType support checking #5587
Before this change we checked that features.ReadMimeTime was set if
and only if the Object.MimeType method was implemented.

However this test is overly general - we don't care if Objects
advertise MimeType when features.ReadMimeTime is set provided that
they always return an empty string (which is what a wrapping backend
might do).

This patch implements that logic.
2021-10-20 19:11:54 +03:00
Nick Craig-Wood
cc2f6f722c filefabric: make backoff exponential for error_background to fix errors
Before this change the backoff for the error_background error was 6
seconds. This means that if it wasn't resolved in 60 seconds (with the
default 10 low level retries) then an error was reported.

This error was being reported frequently in the integration tests, so
is likely affecting real users too.

This patch changes the backoff into an exponential backoff
1,2,4,8...1024 seconds to make sure we wait long enough for the
background operation to complete.

See #5734
2021-10-20 15:41:09 +01:00
Ivan Andreev
6cda4c2c3c Add Ian Levesque to contributors 2021-10-19 23:02:51 +03:00
Ivan Andreev
023b666863 Add Filip Rysavy to contributors 2021-10-19 23:01:47 +03:00
Ivan Andreev
2a4c6ad0e7 Add Matthew Sevey to contributors 2021-10-19 23:00:16 +03:00
Ivan Andreev
6d02530f9d sia: finish documentation #4514
Also rename stuttering `--sia-sia-user-agent` to `--sia-user-agent`
2021-10-19 22:55:27 +03:00
Ivan Andreev
c5bc857f9b sia: fix and enable integration tests #4514
- setup correct path encoding (fixes backend test FsEncoding)
- ignore range option if file is empty (fixes VFS test TestFileReadAtZeroLength)
- cleanup stray files left after failed upload (fixes test FsPutError)
- rebase code on master, adapt backend for rclone context passing
- translate Siad errors to rclone native FS errors in sia errorHandler
- TestSia: return proper backend options from the script
- TestSia: use uptodate AntFarm image, nebulouslabs/siaantfarm is stale
2021-10-19 22:55:27 +03:00
Matthew Sevey
0d1e017e09 sia: setup docker with sia-antfarm for test #4514
Always pull the latest Sia Antfarm docker image
Add wait for Sia renter to become upload ready

Co-authored-by: Filip Rysavy <fil@siasky.net>
2021-10-19 22:55:27 +03:00
Ian Levesque
3351b1e6ae sia: add backend for sia decentralized cloud #4514 2021-10-19 22:55:27 +03:00
Fred
b085aa1a3f seafile: fix error when not configured for 2fa (#5665) 2021-10-19 20:53:35 +01:00
Nick Craig-Wood
eb0c8284f1 azureblob: fix incorrect size after --azureblob-no-head-object patch
In

05f128868f azureblob: add --azureblob-no-head-object

we incorrectly parsed the size of the object as the Content-Length of
the returned header. This is incorrect in the presense of Range
requests.

This fixes the problem by parsing the Content-Range header if
avaialble to read the correct length from if a Range request was
issued.

See: #5734
2021-10-19 20:12:17 +01:00
Nick Craig-Wood
f5c7c597ba s3: Use a combination of SDK retries and rclone retries - fixes #5509
This reverts commit

dc06973796 Revert "s3: use rclone's low level retries instead of AWS SDK to fix listing retries"

Which in turn reverted

5470d34740 "backend/s3: use low-level-retries as the number of SDK retries"

So we are back where we started.

It then modifies it to set the AWS SDK to `--low-level-retries`
retries, but set the rclone retries to 2 so that directory listings
can be retried.
2021-10-19 20:12:17 +01:00
Nick Craig-Wood
3cef84aabe Add r0kk3rz to contributors 2021-10-19 20:12:17 +01:00
Nick Craig-Wood
93afd5c346 Add Rajat Goel to contributors 2021-10-19 20:12:17 +01:00
Alex Chen
1c3c8babd3 docs: mention make for building and cmount tag for macos (#5487) 2021-10-19 12:18:06 +08:00
Ivan Andreev
690a7ac783 chunker: fix md5all test for no-meta test remotes 2021-10-18 18:04:07 +03:00
Ivan Andreev
bbcc9a45fe serve/docker: allow to customize proxy settings of docker plugin 2021-10-18 18:03:06 +03:00
albertony
16949fde09 Do not override mime types from os defaults
https://forum.rclone.org/t/rclone-serve-http-save-as/26672
2021-10-18 13:28:22 +01:00
r0kk3rz
8e4b87ae03 s3: Add AWS Snowball Edge to providers examples - fixes #5720 2021-10-18 12:52:59 +01:00
Rajat Goel
db6002952e dropbox: upgrade sdk version 2021-10-16 10:55:02 +01:00
Nick Craig-Wood
96e14bf456 sftp: fix initialization bug introduced by fs.ErrorIsDir return
3fbaa4c0b0 backends: make NewObject return fs.ErrorIsDir if possible
2021-10-16 10:38:24 +01:00
Nick Craig-Wood
54da6154c4 operations: fix lsjson --stat on root directories of bucket based backends 2021-10-16 10:38:24 +01:00
Ivan Andreev
f50537b64b ftp: add option to disable tls13 2021-10-15 20:24:37 +03:00
Ivan Andreev
f37b25a2df ftp: enable tls session cache by default 2021-10-15 19:54:25 +03:00
albertony
29b8c71522 build: force utf8 when updating backend docs from python script (#5721) 2021-10-15 18:51:57 +02:00
Nick Craig-Wood
7b66ca132d build: increase timeout for golangci-lint to 10 minutes 2021-10-15 15:58:52 +01:00
Nick Craig-Wood
9ce0df3242 dropbox: add --dropbox-batch-commit-timeout to control batch timeout
This also adds an Debug message log showing how long each batch took

See: #5491
2021-10-15 15:32:40 +01:00
Nick Craig-Wood
f4c5f1f185 box: retry operation_blocked_temporary errors #5545 2021-10-15 15:28:54 +01:00
Nick Craig-Wood
825f7826f5 box: add --box-owned-by to only show items owned by the login passed #5545 2021-10-15 15:28:54 +01:00
Nick Craig-Wood
34140b2f57 box: delete items in parallel in cleanup using --checkers threads #5545 2021-10-15 15:28:54 +01:00
Nick Craig-Wood
e18ae5da09 box: factor directory listing and cleanup listing into one function #5545 2021-10-15 15:28:54 +01:00
Nick Craig-Wood
b61912b4c8 box: add --box-list-chunk to control listing chunk size #5545 2021-10-15 15:28:54 +01:00
Nick Craig-Wood
bfecf5301b box: when doing cleanup delete as much as possible - fixes #5545
Before this change the cleanup routine exited on the first deletion
error.

This change counts any errors on deletion and exits when the iteration
is complete with an error showing the number of deletion failures.
Deletion failures will be logged.
2021-10-15 15:28:54 +01:00
Nick Craig-Wood
308323e9c4 box: make listings of heavily used directories more reliable #5545
Before this change we uses limit/offset paging for directories in the
main directory listing routine and in the trash cleanup listing.

This switches to the new scheme of limit/marker which is more reliable
on a directory which is continuously changing. It has the disadvantage
that it doesn't tell us the total number of items available, however
that wasn't information rclone uses.
2021-10-15 15:28:54 +01:00
Nick Craig-Wood
fc5d6c16b6 serve ftp: ensure modtime is passed as UTC always to fix timezone oddities
See: https://forum.rclone.org/t/ftp-server-reports-file-timestamps-in-utc/26274
2021-10-15 15:25:51 +01:00
Nick Craig-Wood
c821fbeddf drive: add -o config option to backend drives to config for all shared drives
See: https://forum.rclone.org/t/bulk-create-remotes-to-existing-google-shared-drives/26837/
2021-10-15 15:22:14 +01:00
Nick Craig-Wood
93d85015af sftp: fix timeout when doing MD5SUM of large file
Before this change we were timing out MD5SUMs after 1 minute because
rclone was closing the SSH session when there were sessions still
aftive.

This change counts sessions active for all SSH sessions now (Upload,
Download, Hashes and running commands).

See: https://forum.rclone.org/t/while-rclone-copying-large-files-md5sum-failed-with-exit-status/26845/
2021-10-15 15:19:22 +01:00
Nick Craig-Wood
a98e3ea6f1 build: replace the deprecated golint linter with revive
This fixes up a small number of new lint items also
2021-10-15 12:51:31 +01:00
Nick Craig-Wood
167406bc68 build: switch to using the golangci-lint action for better error reporting
The action reports errors to users in their pull requests which is
much easier to understand.
2021-10-15 12:50:22 +01:00
Nick Craig-Wood
036abde393 build: fix indentation in build.yml 2021-10-15 12:50:22 +01:00
Nick Craig-Wood
edf8978d15 operations: fix HashSum tests after removing ERROR and UNSUPPORTED
This was caused by

7a1cab57b6 cmd/hashsum: dont put ERROR or UNSUPPORTED in output

And was picked up in the integration tests.

This patch no longer calls the HashLister for unsupported hash types.
2021-10-15 10:51:08 +01:00
Nick Craig-Wood
f529c02446 lsjson: add --stat flag and operations/stat api
This enables information about single files to be efficiently
retrieved.
2021-10-14 17:15:50 +01:00
Nick Craig-Wood
3fbaa4c0b0 backends: make NewObject return fs.ErrorIsDir if possible
This changes the interface to NewObject so that if NewObject is called
on a directory then it should return fs.ErrorIsDir if possible without
doing any extra work, otherwise fs.ErrorObjectNotFound.

Tested on integration test server with:

go run integration-test.go -tests backend -run TestIntegration/FsMkdir/FsPutFiles/FsNewObjectDir -branch fix-stat -maxtries 1
2021-10-14 17:15:50 +01:00
Nick Craig-Wood
af732c5431 Add Logeshwaran to contributors 2021-10-14 17:15:48 +01:00
Nick Craig-Wood
14de0cfb43 Add Joda Stößer to contributors 2021-10-14 17:14:53 +01:00
albertony
c2597a4fa3 docs: cleanup header levels in backend docs (#5698) 2021-10-14 15:40:18 +02:00
Logeshwaran
ceaafe6620 s3: add support to use CDN URL to download the file
The egress charges while using a CloudFront CDN url is cheaper when
compared to accessing the file directly from S3. So added a download
URL advanced option, which when set downloads the file using it.
2021-10-14 11:19:38 +01:00
Joda Stößer
d41b9b46d4 docs: improve ordered list prefix for drive.md 2021-10-14 11:08:15 +01:00
Joda Stößer
98d9ba363f .github: correct contribution link in pull request template 2021-10-14 11:07:25 +01:00
Ivan Andreev
16fb608bee hashsum: treat hash values in sum file as case insensitive
Also warn duplicate file paths in sum files.

Fixes https://forum.rclone.org/t/rclone-check-sum/25566/45
2021-10-13 18:21:34 +03:00
Ivan Andreev
cf9b82b8db chunker: md5all must create metadata if base hash is slow
Before this patch the md5all option would skip creating metadata with
hashsum if base filesystem provided md5, in hope to pass it through.
However, if base hash is slow (for example on local fs), chunker passed
slow md5 but never reported this fact in features.

This patch makes chunker snapshot base hashsum in metadata when md5all is
set and base hashsum is slow since chunker was intended to provide only
instant hashsums from the start.

Fixes #5508
2021-10-13 16:18:08 +03:00
albertony
7d66bfbb7c docs: toc styling (#5695) 2021-10-13 15:04:11 +02:00
Nolan Woods
023e32de05 lib/http: Factor password hash salt into options with default 2021-10-13 11:33:38 +01:00
Nolan Woods
b1cb41f8da lib/http: Fix bad username check in single auth secret provider 2021-10-13 11:33:38 +01:00
Nick Craig-Wood
1cb31e8cc7 crypt: fix uploads with --crypt-no-data-encryption
Before this change, when uploading to a crypt, the ObjectInfo
accidentally used the encrypted size, not the unencrypted size when
--crypt-no-data-encryption was set.

Fixes #5498
2021-10-12 17:12:41 +01:00
Ivan Andreev
1e7db7193e docs: note minimum supported docker engine 2021-10-12 13:27:20 +03:00
Ivan Andreev
7190c058a7 crypt: return wrapped object even with no-data-encryption
In presence of no_data_encryption the Crypt's Put method used to over-optimize
and returned base object. This patch makes it return Crypt-wrapped object now.

Fixes #5498
2021-10-12 00:41:12 +03:00
albertony
85074f8f88 librclone: add RcloneFreeString function
See PR #5703

Based on initial work by Weng Haoyu (@wengh) in PR #5362
2021-10-11 19:10:07 +02:00
albertony
c7329d2ece docs: add section in install documentation about portable install
See #5591
2021-10-11 15:08:35 +02:00
albertony
f3e71f129c config: convert --cache-dir value to an absolute path 2021-10-11 15:08:35 +02:00
albertony
0ffdca42d5 docs: document --cache-dir flag 2021-10-11 15:08:35 +02:00
albertony
dbb6f94d95 config: make temporary directory user-configurable
See #5591
2021-10-11 15:08:35 +02:00
albertony
352f9bcd47 config: add paths command to show configured paths
See #5591
2021-10-11 15:08:35 +02:00
Nick Craig-Wood
d8886b37a6 serve sftp: update docs on host key generation 2021-10-11 10:43:16 +01:00
albertony
894a5a1a83 serve sftp: fix generation of server keys on windows 2021-10-11 10:43:16 +01:00
albertony
ada6a92c8b serve sftp: generate an Ed25519 server key as well as ECDSA and RSA 2021-10-11 10:43:16 +01:00
Nick Craig-Wood
df0b7d8eab serve sftp: generate an ECDSA server key as well as RSA
Before this fix, rclone only generated an RSA server key when the user
didn't supply a key.

However the RSA server key is being deprecated as it is now insecure.

This patch generates an ECDSA server key too which will be used in
preference over the RSA key, but the RSA key will carry on working.

Fixes #5671
2021-10-11 10:43:16 +01:00
Nick Craig-Wood
0dfffc0ed4 Add YenForYang to contributors 2021-10-11 10:43:16 +01:00
Alfonso Montero
19fc1b2a95 docs/compress: minor improvements 2021-10-09 18:22:38 +02:00
Ivan Andreev
bce395385d mount/docs: improve wording 2021-10-09 18:53:57 +03:00
albertony
a5b8fcc127 docs: align dropdown items when icons have different sizes 2021-10-09 18:49:05 +03:00
YenForYang
269f90c1e4 drive: Fix buffering for single request upload for files smaller than --drive-upload-cutoff
I discovered that `rclone` always upload in chunks of 16MiB whenever
uploading a file smaller than `--drive-upload-cutoff`. This is
undesirable since the purpose of the flag `--drive-upload-cutoff` is
to *prevent* chunking below a certain file size.

I realized that it wasn't `rclone` forcing the 16MiB chunks. The
`google-api-go-client` forces a chunk size default of
[`googleapi.DefaultUploadChunkSize`](32bf29c2e1/googleapi/googleapi.go (L55-L57))
bytes for resumable type uploads. This means that all requests that
use `*drive.Service` directly for upload without specifying a
`googleapi.ChunkSize` will be forced to use a *`resumable`*
`uploadType` (rather than `multipart`) for files less than
`googleapi.DefaultUploadChunkSize`. This is also noted directly in the
Drive API client documentation [here](https://pkg.go.dev/google.golang.org/api/drive/v3@v0.44.0#FilesUpdateCall.Media).

This fixes the problem by passing `googleapi.ChunkSize(0)` to
`Media()` method calls, which is the only way to disable chunking
completely. This is mentioned in the API docs
[here](https://pkg.go.dev/google.golang.org/api/googleapi@v0.44.0#ChunkSize).

The other alternative would be to pass
`googleapi.ChunkSize(f.opt.ChunkSize)` -- however, I'm *strongly* in
favor of *not* doing this for performance reasons. By not explicitly
passing a `googleapi.ChunkSize(0)`, we effectively allow
[`PrepareUpload()`](https://pkg.go.dev/google.golang.org/api/internal/gensupport@v0.44.0#PrepareUpload)
to create a
[`NewMediaBuffer`](https://pkg.go.dev/google.golang.org/api/internal/gensupport@v0.44.0#NewMediaBuffer)
that copies the original `io.Reader` passed to `Media()` in order to
check that its size is less than `ChunkSize`, which will unnecessarily
consume time and memory.

`minChunkSize` is also changed to be `googleapi.MinUploadChunkSize`,
as it is something specified we have no control over.
2021-10-08 15:29:38 +01:00
Ivan Andreev
7a1cab57b6 cmd/hashsum: dont put ERROR or UNSUPPORTED in output 2021-10-08 14:26:27 +03:00
Ankur Gupta
c8d5606f2c Removed multiple emails for Ankur Gupta 2021-10-08 10:25:42 +01:00
Ivan Andreev
a2545066e2 drive: constrain list by filter #5023
Google Drive API allows for clauses like "modifiedTime > '2012-06-04T12:00:00'"
in the query param, so the filter flags --max-age and --min-age can be applied
directly at the directory listing phase rather than in a filter.
This is extremely helpful when we want to do an incremental backup of a remote
drive with many files but the number of recently changed file is small.

Co-authored-by: fotile96 <fotile96@users.noreply.github.com>
2021-10-07 22:11:22 +03:00
Ivan Andreev
729704bcb8 serve/docker: fix octal umask 2021-10-07 22:02:27 +03:00
Nick Craig-Wood
8b4a89d34b Update github.com/ncw/swift to v2.0.1 2021-10-07 12:02:09 +01:00
Ivan Andreev
15a9816512 ftp: update encoding in integration tests with ProFtpd, PureFtpd, VsFtpd
PR #5589 established recommended encodings to use with major FTP servers.
This patch updates integration tests correspondingly.
2021-10-05 21:45:08 +03:00
Ivan Andreev
cace18d89a docs/ftp: state clearly that active mode is not supported 2021-10-05 15:52:50 +03:00
Ivan Andreev
a065fb23e5 mount: document the mount helper mode, make command docs - #5594 2021-10-03 01:44:08 +03:00
Ivan Andreev
a95c7a001e core: run rclone as mount helper - #5594 2021-10-03 01:44:08 +03:00
Ivan Andreev
ffa1b1a258 config: enable verbose logging by the --verbose argument - #5594 2021-10-03 01:44:08 +03:00
Ivan Andreev
8b8a943dd8 mountlib: correctly daemonize for compatibility with automount - #5593
This patch will:
- add --daemon-wait flag to control the time to wait for background mount
- remove dependency on sevlyar/go-daemon and implement backgrounding directly
- avoid setsid during backgrounding as it can result in race under Automount
- provide a fallback PATH to correctly run `fusermount` under systemd as it
  runs mount units without standard environment variables
- correctly handle ^C pressed while background process is being setting up
2021-10-02 23:45:16 +03:00
Ivan Andreev
8c10dee510 mountlib: use procfs to validate mount on linux - #5593
Current way of checking whether mountpoint has been already mounted (directory
list) can result in race if rclone runs under Automount (classic or systemd).

This patch adopts Linux ProcFS for the check. Note that mountpoint is considered
empty if it's tagged as "mounted" by autofs. Also ProcFS is used to check whether
rclone mount was successful (ie. tagged by a string containing "rclone").

On macOS/BSD where ProcFS is unavailable the old method is still used.

This patch also moves a few utility functions unchanged to utils.go:
CheckOverlap, CheckAllowings, SetVolumeName.
2021-10-02 23:45:16 +03:00
Ivan Andreev
68be24c88d log: optionally print pid in logs - #5593
This option is useful to troubleshoot `rclone mount --daemon`
2021-10-02 23:45:16 +03:00
albertony
fbc7f2e61b lib/file: improve error message when attempting to create dir on nonexistent drive on windows
This replaces built-in os.MkdirAll with a patched version that stops the recursion
when reaching the volume part of the path. The original version would continue recursion,
and for extended length paths end up with \\? as the top-level directory, and the error
message would then be something like:
mkdir \\?: The filename, directory name, or volume label syntax is incorrect.
2021-10-01 23:18:39 +02:00
Nolan Woods
b30731c9d0 lib/http: Add auth to http service
Fixes https://github.com/rclone/rclone/issues/5620
2021-10-01 15:51:48 +01:00
albertony
26b6c83e49 docs: extend documentation on valid remote names 2021-10-01 15:18:04 +02:00
albertony
59c74ea1b8 config: support hyphen in remote name from environment variable 2021-10-01 15:18:04 +02:00
Ivan Andreev
2d05b28b0a ftp: enable CI for ProFtpd, PureFtpd, VsFtpd 2021-10-01 10:09:57 +03:00
Ivan Andreev
dc589d3070 ftp: provide valid encoding for ProFtpd, PureFtpd, VsFtpd 2021-10-01 10:09:57 +03:00
Ivan Andreev
48e7246163 lib/encoder: add encoding of square brackets 2021-10-01 10:09:57 +03:00
Ivan Andreev
69f4b48719 ftp: fix deadlock after failed update when concurrency=1 2021-10-01 10:03:59 +03:00
Nick Craig-Wood
bb0c4ad2d8 union: fix rename not working with union of local disk and bucket based remote
Before this change the union's feature flags were a strict AND of the
underlying remotes. This means that a union of a local disk (which can
Move but not Copy) and a bucket based remote (which can Copy but not
Move) could neither Move nor Copy.

This fix advertises Move in the union if all the remotes can Move or
Copy. It also implements Move as Copy+Delete (like rclone does
normally) if the underlying union does not support Move.

This enables renames to work with unions of local disk and bucket
based remotes expected.

Fixes #5632
2021-09-30 20:09:02 +01:00
albertony
b389b84685 jottacloud: refactor all file state checks into common functions 2021-09-30 19:34:48 +02:00
albertony
b0f06d9920 jottacloud: improved error handling with SetModTime and corrupt files in general 2021-09-30 19:34:48 +02:00
albertony
159229527d jottacloud: implement SetModTime to support modtime-only changes - #5627 2021-09-30 19:34:48 +02:00
albertony
b5a27b1c75 docs: cleanup header levels 2021-09-30 17:54:57 +02:00
albertony
db7db952c1 Add Jonta to contributors 2021-09-30 15:38:24 +02:00
Jonta
d8d621c175 docs: grammar/readability (#5633) 2021-09-30 15:34:00 +02:00
Nick Craig-Wood
0902e5c48e vfs: Ignore ECLOSED in Setattr when truncating file handles
Before this change file handles could get closed while the truncate
the file handles loop was running.

This would mean that ocassionally an ECLOSED (which is translated into
EBADF by cmd/mount) would spuriously be returned if Release happened
to happen in the middle of a Truncate call (Setattr called with
size=0).

This change ignores the ECLOSED while truncating file handles.

See: https://forum.rclone.org/t/writes-to-wasabi-mount-failing-with-bad-file-descriptor-intermittently/26321
2021-09-28 11:51:41 +01:00
Nick Craig-Wood
5b6bcfc184 Add HNGamingUK to contributors 2021-09-28 11:51:41 +01:00
HNGamingUK
1409b89f6c swift: document OVH Cloud Archive - fixes ##3041
Added documentation for OVH Cloud Archive, provides information on how to restore/unfreeze/download objects.
2021-09-20 17:32:13 +01:00
Fred
00c6642fad seafile: fix 2fa state machine 2021-09-18 12:44:59 +01:00
Nick Craig-Wood
badefdb060 pcloud: try harder to delete a failed upload
This fixes the integration tests when testing errored uploads
2021-09-17 10:43:45 +01:00
Nick Craig-Wood
9c2533821d pcloud: return an early error when Put is called with an unknown size
This stops the 10 minute pause in the integration tests
2021-09-17 10:43:45 +01:00
Nick Craig-Wood
c718fe4330 pcloud: fix sha256 hashes #5496
This was started in

3626f10f26 pcloud: add sha256 support - fixes #5496

But this support turned out to be incomplete and caused the
integration tests to fail.
2021-09-17 10:43:45 +01:00
Nick Craig-Wood
3298493b0b Add wzl to contributors 2021-09-17 10:43:45 +01:00
Abhinav Sharma
18f3929186 docs: update ignored email as per #5586 2021-09-12 18:46:37 +01:00
wzl
b35db61a80 docs: add a step for drive.md 2021-09-11 23:00:59 +03:00
Abhinav Sharma
3c17762c4e update the email 2021-09-11 13:08:29 +03:00
Ivan Andreev
24de896df2 build: apply gofmt from golang 1.17 2021-09-09 20:43:59 +03:00
Ivan Andreev
2bc2546d5c test: skip mount2 test on single-CPU runners 2021-09-06 15:01:44 +03:00
Tatsuya Noyori
05f128868f azureblob: add --azureblob-no-head-object 2021-09-06 10:41:54 +01:00
x0b
f7f4468cbc build: update Go to 1.16 and NDK to 22b for android/any 2021-09-03 13:32:48 +03:00
Ivan Andreev
aa0ceb6c5c cmd/version: add support for openbsd/386
After this patch the version command will be fully
supported on openbsd/amd64 and openbsd/386.
Remaining os/arch combinations stay as is.
2021-09-02 11:13:12 +03:00
albertony
f1f923a986 Change byte unit format from MiByte to MiB 2021-08-31 09:57:27 +02:00
albertony
8500d95579 test: consider global option for printing human-readable sizes and avoid unsigned integer overflow 2021-08-31 09:57:27 +02:00
albertony
8c4b06da79 tree: option to print human-readable sizes removed in favor of global option 2021-08-31 09:57:27 +02:00
albertony
6d25ba7c02 about: make human-readable output more consistent with other commands 2021-08-31 09:57:27 +02:00
albertony
774efeabf0 ncdu: introduce key u to toggle human-readable 2021-08-31 09:57:27 +02:00
albertony
d24f87c6a9 size: include human-readable count 2021-08-31 09:57:27 +02:00
albertony
721a9786a7 ls: introduce and global option to print human-readable sizes and consider it for ls commands
Fixes #1890
2021-08-31 09:57:27 +02:00
albertony
94521959f8 docs/config: remove use of backticks around words within a larger code block 2021-08-31 09:02:31 +02:00
Nick Craig-Wood
6a9ef27b09 cache: don't run failing tests on windows/386
After updating rclone's dependencies these tests started failing on
windows/386

- TestInternalDoubleWrittenContentMatches
- TestInternalMaxChunkSizeRespected

The failures look like this. The root cause is unknown. The `Wait(n=1)
would exceed context deadline` errors come from golang.org/x/time/rate
but it isn't clear what is calling them.

2021/08/20 21:57:16 ERROR : worker-0 <one>: object open failed 0: rate: Wait(n=1) would exceed context deadline
[snip ~10 duplicates]
2021/08/20 21:57:56 ERROR : tidwcm1629496636/one: (0/26) error (chunk not found 0) response
2021/08/20 21:58:02 ERROR : worker-0 <one>: object open failed 0: rate: Wait(n=1) would exceed context deadline
--- FAIL: TestInternalDoubleWrittenContentMatches (45.77s)
    cache_internal_test.go:310:
        	Error Trace:	cache_internal_test.go:310
        	Error:      	Not equal:
        	            	expected: "one content updated double"
        	            	actual  : ""

        	            	Diff:
        	            	--- Expected
        	            	+++ Actual
        	            	@@ -1 +1 @@
        	            	-one content updated double
        	            	+
        	Test:       	TestInternalDoubleWrittenContentMatches
2021/08/20 21:58:03 original size: 23592960
2021/08/20 21:58:03 updated size: 12
2021-08-20 23:28:18 +01:00
Nick Craig-Wood
09fd258b5c build: update all dependencies 2021-08-20 22:03:38 +01:00
Nick Craig-Wood
2cefae51a1 build: make go1.14 the minimum supported Go for building 2021-08-20 19:04:29 +01:00
Nick Craig-Wood
e4fb5e99ef build: use go1.17 for building 2021-08-20 19:04:29 +01:00
Nick Craig-Wood
8bd26c663a build: update golang.org/x/sys for go1.17 build 2021-08-20 19:04:29 +01:00
Nick Craig-Wood
dd97fbc55f Add Parth Shukla to contributors 2021-08-20 19:04:23 +01:00
Nick Craig-Wood
b32d00ba37 Add Justin Hellings to contributors 2021-08-20 19:04:23 +01:00
albertony
3a2f748aeb vfs: ensure names used in cache path are legal on current os
Fixes #5360
2021-08-19 20:14:50 +02:00
albertony
18be4ad10d vfs: fix issue where empty dirs would build up in cache meta dir 2021-08-19 20:14:50 +02:00
albertony
9a2811f0b2 local: refactor default os encoding out from local backend into shared encoder lib 2021-08-19 20:14:50 +02:00
albertony
63708d73be docs/vfs: Merge duplicate chunked reading documentation from mount docs 2021-08-19 19:29:41 +02:00
Parth Shukla
60323dc5e2 googlephotos: Use encoder for album names 2021-08-19 16:38:31 +01:00
Justin Hellings
359648e002 docs: Removed ambiguity from copy command docs
Switched from talking about "unchanged" files to "identical" files.

I found out the hard way that the rclone copy will overwrite newer files.
Looking at posts in the rclone forum, this is a common experience.

The docs for copy have referred to "unchanged" files.
This is ambiguous because it intuitively introduces a sense
of chronology, but chronology is irrelevant.
Rclone only "cares" about difference, not change.
2021-08-19 16:34:57 +01:00
Ivan Andreev
e45c23ab79 cmd/version: add support for openbsd/amd64
After this patch the version command will be
- fully supported on openbsd/amd64
- stay stub on openbsd/i386 until we deprecate go 1.17
Remaining os/arch combinations stay as is.
2021-08-16 11:39:34 +03:00
Nick Craig-Wood
890b6a45b5 sugarsync: fix initial connection after config re-arrangement - Fixes #5525
In this commit the config system was re-arranged

    94dbfa4ea fs: change Config callback into state based callback #3455

This passed the password as a temporary config parameter but forgot to
reveal it in the API call.
2021-08-14 12:53:36 +01:00
Nick Craig-Wood
227119da16 Add Ken Enrique Morel to contributors 2021-08-14 12:53:36 +01:00
Ken Enrique Morel
3626f10f26 pcloud: add sha256 support - fixes #5496 2021-08-14 12:48:25 +01:00
negative0
82ad9a30b9 rc: fix speed does not update in core/stats 2021-08-14 12:45:51 +01:00
Ivan Andreev
448a03181f cmd/mount: --fast-list does nothing on a mount 2021-08-13 21:11:56 +03:00
Ivan Andreev
3615619645 serve/docker: retry saveState to fix sporadic test failure on macOS/Windows 2021-08-13 21:00:21 +03:00
Nick Craig-Wood
33ddd540b6 accounting: fix maximum bwlimit by scaling scale max token bucket size
Before this fix, on Windows, the --bwlimit would max out at 2.5Gbps
even when set to 10 Gbps.

This turned out to be because of the maximum token bucket size.

This fix scales up the token bucket size linearly above a bwlimit of
2Gbps.

Fixes #5507
2021-08-13 16:55:24 +01:00
Nick Craig-Wood
a5f277f47e vfs: fix crash when truncating a just uploaded object - Fixes #5522 2021-08-11 11:55:09 +01:00
Nick Craig-Wood
bec253fd39 Add vinibali to contributors 2021-08-11 11:55:09 +01:00
Nick Craig-Wood
815a6ac8aa Add hota to contributors 2021-08-11 11:55:09 +01:00
Ivan Andreev
8106f65e0b Add yedamo to contributors 2021-08-11 11:07:13 +03:00
yedamo
96f77ebe5a selfupdate: fix --quiet option, not quite quiet
Fixes #5505
2021-08-11 10:14:19 +03:00
Greg Sadetsky
36f0231082 docs/drive: Fix lsf example without drive-impersonate (#5504) 2021-08-10 21:59:36 +02:00
albertony
168cb65c61 Add Greg Sadetsky to contributors 2021-08-10 21:50:26 +02:00
Greg Sadetsky
e00db968aa docs/s3: fix typo in s3 documentation (#5515) 2021-08-10 21:45:49 +02:00
partev
bb6b44d199 DOC: "OS X" -> "macOS" 2021-08-10 10:12:30 +03:00
vinibali
88b35bc32d Update yandex.md
add mail subscription exception
2021-08-09 23:28:41 +03:00
Nathan Collins
c32d5dd1f3 fs: move with --ignore-existing will not delete skipped files - #5463 2021-08-01 17:46:45 +01:00
Greg Sadetsky
3d9da896d2 drive: fix instructions for auto config #5499 2021-08-01 15:17:07 +01:00
hota
839c20bb35 s3: add Wasabi's AP-Northeast endpoint info
* Wasabi starts to provide AP Northeast (Tokyo) endpoint for all customers, so add it to the list

Signed-off-by: lindwurm <lindwurm.q@gmail.com>
2021-08-01 14:56:52 +01:00
Nick Craig-Wood
7c58148840 Start v1.57.0-DEV development 2021-08-01 13:43:36 +01:00
Nick Craig-Wood
6545755758 sftp: remove spurious error message on --sftp-disable-concurrent-reads 2021-07-31 11:04:45 +01:00
Nick Craig-Wood
c86a55c798 vfs: fix duplicates on rename - fixes #5469
Before this change, if there was an existing file being uploaded when
a file was renamed on top of it, then both would be uploaded. This
causes a duplicate in Google Drive as both files get uploaded at the
same time. This was triggered reliably by LibreOffice saving doc
files.

This fix removes any duplicates in the upload queue on rename.
2021-07-30 19:31:02 +01:00
Nick Craig-Wood
1d280081d4 Add Mariano Absatz (git) to contributors 2021-07-30 19:31:02 +01:00
Nick Craig-Wood
f48cb5985f Add Justin Winokur (Jwink3101) to contributors 2021-07-30 19:31:02 +01:00
Ivan Andreev
55e766f4e8 mountlib: restore daemon mode after #5415 2021-07-29 13:35:04 +03:00
Alex Chen
63a24255f8 onedrive: handle HTTP 400 better in PublicLink() (#5419) 2021-07-27 17:55:57 +08:00
Cnly
bc74f0621e http: fix serve http exits directly after starting 2021-07-25 14:06:43 +01:00
Mariano Absatz (git)
f39a08c9d7 clarification of the process for creating custom client_id 2021-07-24 09:19:48 +03:00
Justin Winokur (Jwink3101)
675548070d fs/operations: add rmdirs -v output - fixes #5464 2021-07-24 09:16:23 +03:00
Nick Craig-Wood
37ff05a5fa Version v1.56.0 2021-07-20 19:45:41 +01:00
Nick Craig-Wood
c67c1ab4ee test makefiles: fix documentation so it doesn't have HTML in 2021-07-20 19:37:09 +01:00
Nick Craig-Wood
76f8095bc5 hdfs: fix documentation so it doesn't have HTML in 2021-07-20 19:36:30 +01:00
Nick Craig-Wood
f646cd0a2a librclone: add missing sync/* rc methods
See: https://forum.rclone.org/t/missing-directory-copy-move-methods-in-librclone/24503
2021-07-20 16:59:02 +01:00
Nick Craig-Wood
d38f6bb0ab gphotos: fix read only scope not being used properly
Before this change the read only scope was being ignored and rclone
was asking for a read-write scope.

https://forum.rclone.org/t/google-photos-copy-sync-errors/25153
2021-07-20 16:57:55 +01:00
Nick Craig-Wood
11d86c74b2 docs: expand contents and make docs full screen 2021-07-20 16:53:21 +01:00
Nick Craig-Wood
feb6046a8a docs: add table of contents to every page 2021-07-20 16:53:21 +01:00
Nick Craig-Wood
807102ada2 drive: fix config system overwriting team drive ID - fixes #5454 2021-07-20 16:51:59 +01:00
Nick Craig-Wood
770b3496a1 config: fix in memory config not saving on the fly backend config
Before this fix, saving a :backend config gave the error

    Can't save config "token" = "XXX" for on the fly backend ":backend"

Even when using the in-memory config `--config ""`

This fixes the problem by
- always using the in memory config if it is configured
- moving the check for a :backend config save to the file config backend

It also removes the contents of the config items being saved from the
log which saves confidential tokens being logged.

Fixes #5451
2021-07-20 12:09:38 +01:00
buengese
da36ce08e4 docs/jottacloud: add short note on how no versions option works 2021-07-15 17:29:30 +02:00
buengese
8652cfe575 jottacloud: add no versions option 2021-07-15 17:29:30 +02:00
Nick Craig-Wood
94b1439299 drive: fix some google docs being treated as files - fixes #5455
At some point some google docs files started having sizes returned in
their listing information.

This then caused rclone to treat the docs as files which caused
downloads to fail.

The API docs now state that google docs may have sizes (whereas I'm
pretty sure it didn't earlier).

This fix removes the check for size, so google docs are identified
solely by not having an MD5 checksum.
2021-07-14 11:40:58 +01:00
Nick Craig-Wood
97c9e55ddb Add Antoine GIRARD to contributors 2021-07-14 11:40:57 +01:00
Ivan Andreev
c0b2832509 docs: serve docker: fix URL of systemd contrib files (#5415) 2021-07-11 13:23:00 +03:00
Ivan Andreev
7436768d62 docs for serve docker and docker plugin (#5415) 2021-07-10 23:56:09 +03:00
Ivan Andreev
55153403aa build docker plugin (#5415) 2021-07-10 23:56:09 +03:00
Antoine GIRARD
daf449b5f2 cmd/serve: add serve docker command (#5415)
Fixes #4750

Co-authored-by: Ivan Andreev <ivandeex@gmail.com>
2021-07-10 23:56:09 +03:00
Antoine GIRARD
221dfc3882 mountlib: refactor before adding serve docker (#5415)
Co-authored-by: Ivan Andreev <ivandeex@gmail.com>
2021-07-10 23:56:09 +03:00
Nick Craig-Wood
aab29353d1 Update email address for Serge Pouliquen 2021-07-08 12:49:13 +01:00
Nick Craig-Wood
c24504b793 Add Chuan Zh to contributors 2021-07-08 12:47:35 +01:00
Nick Craig-Wood
6338d0026e Add Michael Hanselmann to contributors 2021-07-08 12:47:35 +01:00
Chuan Zh
ba836d45ff s3: update Alibaba OSS endpoints 2021-07-08 12:03:04 +01:00
Ole Frost
367cf984af docs: added tip to reduce SharePoint throttling - fixes #5404 2021-07-08 11:39:52 +01:00
Michael Hanselmann
6b7d7d0441 atexit: Terminate with non-zero status after receiving signal
When rclone received a SIGINT (Ctrl+C) or SIGTERM signal while an atexit
function is registered it always terminated with status code 0. Unix
convention is to exit with a non-zero status code. Often it's
`128 + int(signum), but at least not zero.

With this change fatal signals handled by the `atexit` package cause
a non-zero exit code. On Unix systems it's `128 + int(signum)` while
on other systems, such as Windows, it's always 2 ("error not otherwise
categorised").

Resolves #5437.

Signed-off-by: Michael Hanselmann <public@hansmi.ch>
2021-07-07 17:59:26 +01:00
Michael Hanselmann
cf19073ac9 cmd: Move exit status codes to separate package
Signal handling by the `atexit` package needs acceess to
`exitCodeUncategorizedError`. With this change all exit status values
are moved to a dedicated package so that they can be reused.

Signed-off-by: Michael Hanselmann <public@hansmi.ch>
2021-07-07 17:59:26 +01:00
Nick Craig-Wood
ba5c559fec fs/sync: fix tests by only --compare-dest timestamp if have hash
This fixes the integration test errors introduced in #5410
2021-07-07 16:59:51 +01:00
Nick Craig-Wood
abb8fe8ba1 Add Haochen Tong to contributors 2021-07-07 16:59:51 +01:00
Nick Craig-Wood
765af387e6 Add Dmitry Sitnikov to contributors 2021-07-07 16:59:51 +01:00
Nick Craig-Wood
d05cf6aba8 Add partev to contributors 2021-07-07 16:59:51 +01:00
Nick Craig-Wood
76a3fef24d Add Xuanchen Wu to contributors 2021-07-07 16:59:51 +01:00
Ivan Andreev
b40d9bd4c4 cmd: add hashSUM file support (#5352)
Currently rclone check supports matching two file trees by sizes and hashes.
This change adds support for SUM files produced by GNU utilities like sha1sum.

Fixes #1005 

Note: checksum by default checks, hashsum by default prints sums.
New flag is named "--checkfile" but carries hash name.
Summary of introduced command forms:

```
rclone check sums.sha1 remote:path --checkfile sha1
rclone checksum sha1 sums.sha1 remote:path             
rclone hashsum sha1 remote:path --checkfile sums.sha1
rclone sha1sum remote:path --checkfile sums.sha1
rclone md5sum remote:path --checkfile sums.md5
```
2021-07-07 18:34:16 +03:00
Ivan Andreev
4680c0776d backend/local: skip entries removed concurrently with List() (#5297)
This change fixes the bug described below:
if a file is removed while the local backend List() runs,
the call will flag an accounting error.
The bug manifests itself if local backend is the Sync target
due to intrinsic concurrency.
The odds to hit this bug depend on --checkers and --transfers.
Chunker over local backend is affected even more because
updating a composite object with a smaller size content
translates into removing chunks on the underlying file system
and involves a number of List() calls.
2021-07-07 16:50:19 +03:00
buengese
fb305b5976 fichier: check that we actually got a download token and retry if we didn't 2021-07-06 14:58:50 +02:00
Ole Frost
5e91b93e59 cmdtest: end-to-end test for commands, flags and environment variables
There was no easy way to automatically test the end-to-end functionality
of commands, flags, environment variables etc.

The need for end-to-end testing was highlighted by the issues fixed
in #5341. There was no automated test to continually verify current
behaviour, nor a framework to quickly test the correctness of the fixes.

This change adds an end-to-end testing framework in the cmdtest folder.
It has some simple examples in func TestCmdTest in cmdtest_test.go. The
tests should be readable by anybody familiar with rclone and look like
this:

    // Test the rclone version command with debug logging (-vv)
    out, err = rclone("version", "-vv")
    if assert.NoError(t, err) {
        assert.Contains(t, out, "rclone v")
        assert.Contains(t, out, "os/version:")
        assert.Contains(t, out, " DEBUG : ")
    }

The end-to-end tests are executed just like the Go unit tests, that is:

    go test ./cmdtest -v

The change also contains a thorough test of environment variables in
environment_test.go.

Thanks to @ncw for encouragement and introduction to the TestMain trick.
2021-07-05 16:38:20 +01:00
Ole Frost
58c99427b3 config: fixed issues with flags/options set by environment vars.
Some environment variables didn’t behave like their corresponding
command line flags. The affected flags were --stats, --log-level,
--separator, --multi-tread-streams, --rc-addr, --rc-user and --rc-pass.
Example:

    RCLONE_STATS='10s'
    rclone check remote: remote: --progress
    # Expected: rclone check remote: remote: --progress –-stats=10s
    # Actual: rclone check remote: remote: --progress

Remote specific options set by environment variables was overruled by
less specific backend options set by environment variables. Example:

    RCLONE_DRIVE_USE_TRASH='false'
    RCLONE_CONFIG_MYDRIVE_USE_TRASH='true'
    rclone deletefile myDrive:my-test-file
    # Expected: my-test-file is recoverable in the trash folder
    # Actual: my-test-file is permanently deleted (not recoverable)

Backend specific options set by environment variables was overruled by
general backend options set by environment variables. Example:

    RCLONE_SKIP_LINKS='true'
    RCLONE_LOCAL_SKIP_LINKS='false'
    rclone lsd local:
    # Expected result: Warnings when symlinks are skipped
    # Actual result: No warnings when symlinks are skipped
    # That is RCLONE_SKIP_LINKS takes precedence

The above issues have been fixed.

The debug logging (-vv) has been enhanced to show when flags are set by
environment variables.

The documentation has been enhanced with details on the precedence of
configuration options.

See pull request #5341 for more information.
2021-07-05 16:38:20 +01:00
albertony
fee0abf513 docs: add note about use of user and logname environment variables for current username 2021-07-05 16:31:16 +01:00
Nick Gaya
40024990b7 fs/operations: Don't update timestamps of files in --compare-dest 2021-07-05 16:29:44 +01:00
Haochen Tong
04aa6969a4 accounting: calculate rolling average speed 2021-07-05 16:27:33 +01:00
Haochen Tong
d2050523de accounting: fix startTime of statsGroups.sum 2021-07-05 16:27:33 +01:00
Ivan Andreev
1cc6dd349e Add google search widget to rclone.org 2021-07-05 16:21:36 +01:00
Ole Frost
721bae11c3 docs: ease contribution for beginners in Go, Git and GitHub
Improved/added steps to:
 * Install Git with basic setup
 * Use both SSH and HTTPS for the git origin
 * Install Go and verify the GOPATH
 * Update the forked master
 * Find a popular editor for Go
2021-07-05 16:03:53 +01:00
Dmitry Sitnikov
b439199578 azureblob: Fix typo in Azure Blob help
Change the command to create RBAC file to the correct one
`az ad sp create-for-rbac`
Add the link to the command documentation
https://docs.microsoft.com/en-us/cli/azure/ad/sp?view=azure-cli-latest#az_ad_sp_create_for_rbac
2021-07-05 15:58:41 +01:00
partev
0bfd6f793b docs: replace OSX with macOS 2021-07-05 14:51:00 +01:00
Nick Craig-Wood
76ea716abf ftp: make upload error 250 indicate success
Some servers seem to send return code 250 to indicate successful
upload - previously rclone was treating this as an error.

See: https://forum.rclone.org/t/transfer-on-mega-in-ftp-mode-is-not-working/24642/
2021-07-05 10:35:02 +01:00
Alex Chen
e635f4c0be fs: make --dump imply -vv (#5418) 2021-06-23 00:32:26 +08:00
Xuanchen Wu
0cb973f127 onedrive: Make link return direct download link (#5417)
Co-authored-by: Cnly <minecnly@gmail.com>
2021-06-22 21:25:08 +08:00
Alex Chen
96ace599a8 fs: fix logging level mentioned in docs of Logf 2021-06-21 23:30:26 +08:00
Ivan Andreev
80bccacd83 fs: split overgrown fs.go (#5405)
Nothing is added or removed and no package is renamed by this change.
Just rearrange definitions between source files in the fs directory.

New source files:
- types.go      Filesystem types and interfaces
- features.go   Features and optional interfaces
- registry.go   Filesystem registry and backend options
- newfs.go      NewFs and its helpers
- configmap.go  Getters and Setters for ConfigMap
- pacer.go      Pacer with logging and calculator
The final fs.go contains what is left.

Also rename options.go to open_options.go
to dissociate from registry options.
2021-06-14 14:42:49 +03:00
Nick Craig-Wood
3349b055f5 fichier: fix move of files in the same directory
See: https://forum.rclone.org/t/1fichier-rclone-does-not-allow-to-rename-files-and-folders-when-you-mount-a-1fichier-disk-drive/24726/24
2021-06-11 14:21:23 +01:00
Nick Craig-Wood
bef0c23e00 fichier: make error messages report text from the API
See: https://forum.rclone.org/t/1fichier-rclone-does-not-allow-to-rename-files-and-folders-when-you-mount-a-1fichier-disk-drive/24726/24
2021-06-11 14:21:23 +01:00
Nick Craig-Wood
84201ed891 zoho: improve wording for region - fixes #5377 2021-06-11 14:21:23 +01:00
Nick Craig-Wood
04608428bf Add Florian Penzkofer to contributors 2021-06-11 14:21:23 +01:00
Nick Craig-Wood
6aaa06d7be Add darrenrhs to contributors 2021-06-11 14:21:23 +01:00
Nick Craig-Wood
e53bad5353 Add Reid Buzby to contributors 2021-06-11 14:21:23 +01:00
Nick Craig-Wood
f5397246eb Add Chris Lu to contributors 2021-06-11 14:21:23 +01:00
Nick Craig-Wood
b8b73f2656 Add database64128 to contributors 2021-06-11 14:21:23 +01:00
Nick Craig-Wood
96b67ce0ec Add Tyson Moore to contributors 2021-06-11 14:21:23 +01:00
Nick Craig-Wood
e2beeffd76 Add Tom to contributors 2021-06-11 14:21:23 +01:00
Nick Craig-Wood
30b949642d Add acsfer to contributors 2021-06-11 14:21:23 +01:00
Florian Penzkofer
92b3518c78 fichier: support downloading password protected files and folders 2021-06-10 19:00:26 +02:00
Ivan Andreev
062919e08c deprecate cache backend (#5382) 2021-06-10 19:52:55 +03:00
darrenrhs
654f5309b0 docs: drive: include requirement to publish app in step-by-step - fixes #5393 2021-06-10 17:00:52 +01:00
albertony
318fa4472b docs: fix incorrect syntax in config update example 2021-06-10 08:59:18 +02:00
Reid Buzby
5104e24153 docs: fix incorrect token type for yandex
https://forum.rclone.org/t/yandex-documentation/24445/2
2021-06-09 13:04:55 +02:00
albertony
9d87a5192d docs: fix code section formatting in filtering docs
Fixes #5387
2021-06-08 18:53:18 +02:00
Ivan Andreev
29f967dba3 make commanddocs for v1.56 (#5383) 2021-06-08 18:57:04 +03:00
Chris Lu
1f846c18d4 s3: Add SeaweedFS 2021-06-08 09:59:57 +01:00
albertony
41f561bf26 jottacloud: fix invalid url in output from link command
Fixes #5370
2021-05-31 10:40:21 +02:00
database64128
df60e6323c 🧹 GCS: Clean up time format constants 2021-05-28 14:44:50 +01:00
database64128
58006a925a 📑 GCS: Update docs on mtime
- Mention the new modification time behavior and the modify window issue.
- Unify markdown format.
- ref rclone/rclone#5331
2021-05-28 14:44:50 +01:00
database64128
ee2fac1855 🕰️ GCS: Compatible with gsutil's mtime metadata
- Write `goog-reserved-file-mtime` in addition to `mtime`.
- Fallback to `goog-reserved-file-mtime` if `mtime` doesn't exist.
- ref rclone/rclone#5331
2021-05-28 14:44:50 +01:00
Tyson Moore
2188fe38e5 docs: add caveat about DSCP on Windows 2021-05-28 13:43:38 +01:00
Tyson Moore
b5f8f0973b fshttp: implement graceful DSCP error handling 2021-05-28 13:43:38 +01:00
Tyson Moore
85b8ba9469 fshttp: rework address parsing for DSCP (fixes #5293) 2021-05-28 13:43:38 +01:00
Tom
04a1f673f0 serve sftp: add --stdio flag to serve via stdio - fixes #5311 2021-05-28 13:40:32 +01:00
albertony
0574ebf44a vfs: do not print notice about missing poll-interval support when set to 0
Fixes #5359
2021-05-28 13:09:15 +02:00
albertony
22e86ce335 vfs: fix that umask option cannot be set as environment variable (#5351)
Fixes #5350
2021-05-22 20:48:02 +02:00
acsfer
c9fce20249 tardigrade: add warning about too many open files - Fixes #5310 2021-05-21 20:04:57 +01:00
Ivan Andreev
5b6f637461 fs/hash: align hashsum names and update documentation (#5339)
- Unify all hash names as lowercase alphanumerics without punctuation.
- Legacy names continue to work but disappear from docs, they can be depreciated or dropped later.
- Make rclone hashsum print supported hash list in case of wrong spelling.
- Update documentation.

Fixes #5071
Fixes #4841
2021-05-21 17:32:33 +03:00
albertony
07f2f3a62e docs: fix link to paths on windows section 2021-05-19 22:11:17 +02:00
albertony
6dc190ec93 docs: mention that network/unc paths are supported in local filesystem on windows 2021-05-19 22:11:17 +02:00
Nick Craig-Wood
71f75a1d95 operations: fix tests work on compress by supplying incompressible data 2021-05-18 17:38:32 +01:00
Nick Craig-Wood
1b44035e45 filefabric: fix listing after change of from field from "int" to int. 2021-05-18 17:11:16 +01:00
Nick Craig-Wood
054b467f32 check: log the hash in use like cryptcheck does
See: https://forum.rclone.org/t/does-a-rclone-check-on-similar-remotes-still-compute-hashes/24288/15
2021-05-18 16:21:19 +01:00
Ivan Andreev
23da913d03 dbhashsum: drop command deprecated a year ago - #4837 (#5336)
dbhashsum was deprecated in rclone 1.52 on 2020-05-27
this patch drops the command completely since rclone 1.56
2021-05-18 12:27:17 +03:00
Nick Craig-Wood
c0cda087a8 s3: don't check to see if remote is object if it ends with /
Before this change, rclone would always check the root to see if it
was an object.

This change doesn't check to see if the root is an object if the path
ends with a /

This avoids a transaction where rclone HEADs the path to see if it
exists.

See #4990
2021-05-17 16:43:34 +01:00
Nick Craig-Wood
1773717a47 fs/march: improve errors when root source/destination doesn't exist
See: https://forum.rclone.org/t/rclone-attempts-to-read-files-in-the-destination-directory-when-the-source-doesnt-exist/23412
2021-05-17 16:38:03 +01:00
Nick Craig-Wood
04308dcaa1 local: add --local-unicode-normalization (and remove --local-no-unicode-normalization)
macOS stores files in NFD form and transferring them like this to some
systems causes the Korean language to display incorrectly.

This adds the flag --local-unicode-normalization to optionally
normalize the file names to NFC.

This also removes the (long deprecated) --local-no-unicode-normalization flag

See: https://forum.rclone.org/t/support-for-korean-jaso-conversion/19435
2021-05-17 16:34:25 +01:00
Nick Craig-Wood
06f27384dd b2: fix versions and .files with no extension - fixes #5244 2021-05-17 16:20:29 +01:00
Nick Craig-Wood
82f1f7d2c4 config: expand docs on config protocol #3455 2021-05-17 12:10:58 +01:00
Nick Craig-Wood
6555d3eb33 onedrive: fix failed to configure: empty token found error #3455
This bug was caused as part of the config rework
2021-05-17 12:10:58 +01:00
Nick Craig-Wood
03229cf394 bin/config.py: add --rc flag for testing to an rclone rcd #3455 2021-05-17 12:10:58 +01:00
Nick Craig-Wood
f572bf7829 Add sp31415t1 to contributors 2021-05-17 12:10:58 +01:00
sp31415t1
f593558dc2 docs: improve --disable help 2021-05-14 15:44:58 +01:00
Ivan Andreev
08040a57b0 dropbox: improve "own App IP" instructions (#5325)
Instructions in https://rclone.org/dropbox/#get-your-own-dropbox-app-id
are a little incomplete. I had to guess a few extra details to make things work.
This patch adds missing parts.

Fixes #5242
2021-05-14 17:42:30 +03:00
Alexey Ivanov
2fa7a3c0fb dropbox: simplify chunked uploads
Signed-off-by: Alexey Ivanov <rbtz@dropbox.com>
2021-05-14 14:07:44 +01:00
Nick Craig-Wood
798d1293df Add Alexey Ivanov to contributors 2021-05-14 14:07:44 +01:00
Nick Craig-Wood
75c417ad93 dropbox: fix async batch missing the last few entries 2021-05-14 14:07:44 +01:00
Nick Craig-Wood
5ee646f264 dropbox: make batcher retry all errors so it doesn't exit early
See: https://forum.rclone.org/t/dropbox-too-many-requests-or-write-operations-trying-again-in-15-seconds/23316/18
2021-05-14 14:07:44 +01:00
Nick Craig-Wood
4a4aca4da7 dropbox: fix deadlock in batch Commit 2021-05-14 14:07:44 +01:00
Nick Craig-Wood
2e4b65f888 dropbox: add --dropbox-batch-mode flag to speed up uploading #5156
This adds 3 upload modes for dropbox off, sync and async and makes
sync the default.

This should improve uploads (especially for small files) greatly.
2021-05-14 14:07:44 +01:00
Nick Craig-Wood
77cda6773c config: tidy code to use UpdateRemote/CreateRemote instead of editOptions #3455 2021-05-14 14:07:44 +01:00
Nick Craig-Wood
dbc5167281 bin: add config.py as an example of how to use the state based config #3455 2021-05-14 14:07:44 +01:00
Nick Craig-Wood
635d1e10ae config create: add --state and --result parameters #3455 2021-05-14 14:07:44 +01:00
Nick Craig-Wood
296ceadda6 fs: add --all to rclone config create/update to ask all the config questions #3455
This also factors the config questions into a state based mechanism so
a backend can be configured using the same dialog as rclone config but
remotely.
2021-05-14 14:07:44 +01:00
Nick Craig-Wood
7ae2891252 fs: Add Exclusive parameter to Option to choose Examples only #3455 2021-05-14 14:07:44 +01:00
Nick Craig-Wood
99caf79ffe config: allow config create and friends to take key=value parameters #3455 2021-05-14 14:07:44 +01:00
Nick Craig-Wood
095cf9e4be config create: add --non-interactive and --continue parameters #3455
This adds a mechanism to add external interfaces to rclone using the
state based configuration.
2021-05-14 14:07:44 +01:00
buengese
e57553930f jottacloud: fix legacy auth with state based config system
...also some minor cleanup
2021-05-14 14:07:44 +01:00
Nick Craig-Wood
f122808d86 fs: add names to each config parameter so we can override them #3455 2021-05-14 14:07:44 +01:00
Nick Craig-Wood
94dbfa4ea6 fs: change Config callback into state based callback #3455
This is a very large change which turns the post Config function in
backends into a state based call and response system so that
alternative user interfaces can be added.

The existing config logic has been converted, but it is quite
complicated and folloup commits will likely be needed to fix it!

Follow up commits will add a command line and API based way of using
this configuration system.
2021-05-14 14:07:44 +01:00
Nick Craig-Wood
6f2e525821 Add Antoon Prins to contributors 2021-05-14 14:07:44 +01:00
Ivan Andreev
119bddc10b selfupdate: fix archive name on macos 2021-05-13 22:35:39 +03:00
albertony
28e9fd45cc vfs: avoid unnecessary subdir in cache path
Fixes #5316
2021-05-13 11:16:42 +02:00
Antoon Prins
326f3b35ff webdav: add headers option 2021-05-12 09:52:07 +01:00
albertony
ce83228cb2 sftp: expand tilde and environment variables in configured known_hosts_file (#5322)
Fixes #5220
2021-05-11 19:58:26 +02:00
Chris Macklin
732bc08ced config: replace defaultConfig with a thread-safe in-memory implementation 2021-05-07 16:04:09 +01:00
Nick Craig-Wood
6ef7178ee4 local: always use readlink to read symlink size
It was discovered on some Android systems, the stat size of a symlink
is different to the size that readlink returns.

This was giving errors like this

    transport connection broken: http: ContentLength=30 with Body length 28

There are enough exceptions to the size of readlink being different to
the size of stat that this patch now always does readlink to work out
the size of a symlink.

Since symlinks are relatively uncommon this shouldn't affect
performance too much and will mean that the size is always correct.

This deprecates the --local-zero-size-links flag which is now
effectively always enabled.

See: https://forum.rclone.org/t/problem-with-symlinks-and-links/23840/
2021-05-04 08:53:09 +01:00
Nick Craig-Wood
9ff6f48d74 Remove accidentally committed *.orig and *.rej files and ignore 2021-05-03 07:58:29 +01:00
Nick Craig-Wood
532af77fd1 Add Chris Macklin to contributors 2021-05-03 07:58:29 +01:00
Nolan Woods
ab7dfe0c87 http: clean up Bind to better use middleware 2021-05-02 11:31:01 +01:00
Nolan Woods
e489a101f6 lib/http: add default 404 handler 2021-05-02 11:30:02 +01:00
Chris Macklin
35a86193b7 accounting: deglobalize startTime/elapsedTime - fixes #5282 2021-05-01 14:51:21 +01:00
x0b
2833941da8 build: add gomobile android build 2021-04-30 20:39:04 +01:00
Nick Craig-Wood
9e6c23d9af fs: add --disable-http2 for global http2 disable #5253 2021-04-30 20:31:04 +01:00
Nick Craig-Wood
8bef972262 Add Gautam Kumar to contributors 2021-04-30 20:31:04 +01:00
Nick Craig-Wood
0a968818f6 Add Nolan Woods to contributors 2021-04-30 20:31:04 +01:00
Nick Craig-Wood
c2ac353183 Add lewisxy to contributors 2021-04-30 20:31:04 +01:00
Nick Craig-Wood
773da395fb Add Tatsuya Noyori to contributors 2021-04-30 20:31:04 +01:00
Gautam Kumar
9e8cd6bff9 docs: fixed some typos 2021-04-28 22:55:27 +01:00
Nolan Woods
5d2e327b6f http: Replace httplib with lib/http 2021-04-28 22:54:15 +01:00
Nolan Woods
77221d7528 httplib: Deprecate package 2021-04-28 22:54:15 +01:00
Nolan Woods
1971c1ef87 httplib: Move httplib/serve/data to ../serve/http/data 2021-04-28 22:54:15 +01:00
Nolan Woods
7e7dbe16c2 httplib: Add --template config and flags to serve/data 2021-04-28 22:54:15 +01:00
Nolan Woods
002d323c94 lib/http: Move HTTP object serialization logic to lib/http 2021-04-28 22:54:15 +01:00
Nolan Woods
4ad62ec016 lib/http: Add authentication middleware with basic auth implementation 2021-04-28 22:54:15 +01:00
Nolan Woods
95ee14bb2c feat: Add lib/http
lib/http provides an abstraction for a central http server that services can bind routes to
2021-04-28 22:54:15 +01:00
Romeo Kienzler
88aabd1f71 docs: corrected spelling
from "Check the integrity of an encrypted remote." to "Check the integrity of a crypted remote."
2021-04-28 22:50:55 +01:00
Nick Craig-Wood
34627c5c7e librclone: update docs for merge #4891 2021-04-28 20:42:00 +01:00
Nick Craig-Wood
e33303df94 librclone: add basic Python bindings with tests #4891 2021-04-28 16:55:08 +01:00
Nick Craig-Wood
665eceaec3 librclone: catch panics at the language change boundary #4891 2021-04-28 16:55:08 +01:00
Nick Craig-Wood
ba09ee18bb librclone: factor into gomobile and internal implementation #4891
This was needed because gomobile can't use a main package wheras this
is required to make a normal shared C library.
2021-04-28 16:55:08 +01:00
Nick Craig-Wood
62bf63d36f librclone: add tests for build and execute them in the actions #4891 2021-04-28 16:55:08 +01:00
Nick Craig-Wood
f38c262471 librclone: change interface for C code and add Mobile interface #4891
This changes the interface for the C code to return a struct on the
stack that is defined in the code rather than one which is defined by
the cgo compiler. This is more future proof and inline with the
gomobile interface.

This also adds a gomobile interface RcloneMobileRPC which uses generic
go types conforming to the gobind restrictions.

It also fixes up initialisation errors.
2021-04-28 16:55:08 +01:00
Nick Craig-Wood
5db88fed2b librclone: exports, errors, docs and examples #4891
- rename C exports to be namespaced with Rclone prefix
- fix error handling in RcloneRPC
- add more examples
- add more docs
- add README
- simplify ctest Makefile
2021-04-28 16:55:08 +01:00
lewisxy
316e65589b librclone: export the rclone RC as a C library #4891 2021-04-28 16:55:08 +01:00
Tatsuya Noyori
4401d180aa s3: add --s3-no-head-object
This stops rclone doing any HEAD requests on objects.
2021-04-28 11:05:54 +01:00
Nick Craig-Wood
9ccd870267 Move the how to use GitHub info in the bug/issue templates to the end
This is so that we see the text of the bug/issue first rather than the
how to use GitHub issue which is very useful when posting bug reports
to the forum or social media.
2021-04-28 09:40:19 +01:00
Nick Craig-Wood
16d1da2c1e vfs: remove item.metaDirty as it was confusing and not used
See discussion in #5277
2021-04-28 09:33:22 +01:00
Nick Craig-Wood
00a0ee1899 vfs: fix modtime changing when reading file into cache - fixes #5277
Before this change but after:

aea8776a43 vfs: fix modtimes not updating when writing via cache #4763

When a file was opened read-only the modtime was read from the cached
file. However this modtime wasn't correct leading to an incorrect
result.

This change fixes the definition of `item.IsDirty` to be true only
when the data is dirty. This fixes the problem as a read only file
isn't considered dirty.
2021-04-28 09:33:22 +01:00
Nick Craig-Wood
b78c9a65fa backends: remove log.Fatal and replace with error returns #5234
This changes the Config interface so that it returns an error.
2021-04-27 18:18:08 +01:00
Nick Craig-Wood
ef3c350686 box: return errors instead of calling log.Fatal with them #5234 2021-04-27 18:18:08 +01:00
Nick Craig-Wood
742af80972 Add jtagcat to contributors 2021-04-27 18:18:08 +01:00
albertony
08a2df51be Use decimal prefixes for counts
Fixes #5126
2021-04-27 02:25:52 +03:00
albertony
2925e1384c Use binary prefixes for size and rate units
Includes adding support for additional size input suffix Mi and MiB, treated equivalent to M.
Extends binary suffix output with letter i, e.g. Ki and Mi.
Centralizes creation of bit/byte unit strings.
2021-04-27 02:25:52 +03:00
albertony
2ec0c8d45f stats: correct spelling of data rate units 2021-04-27 02:25:52 +03:00
albertony
98579608ec docs: cleanup spelling of size and rate units 2021-04-27 02:25:52 +03:00
Caleb Case
a1a41aa0c1 backend/tardigrade: use negative offset
v1.4.6 of uplink allows us to do a negative offset from the end of the
file. This removes a round trip when requesting the last N bytes of a
file.

Previous to v1.4.6 of uplink it wasn't possible to do a negative offset
on download. This meant that to fulfill the semantics of http range
headers it was necessary to first fetch the size of the object via a
stat call and compute absolute offset and length.
2021-04-27 02:20:08 +03:00
albertony
f8d56bebaf config: delay load config file (#5258)
Restructuring of config code in v1.55 resulted in config
file being loaded early at process startup. If configuration
file is encrypted this means user will need to supply the password,
even when running commands that does not use config.
This also lead to an issue where mount with --deamon failed to
decrypt the config file when it had to prompt user for passord.

Fixes #5236
Fixes #5228
2021-04-26 23:37:49 +02:00
jtagcat
5d799431a7 GitHub issue templates: Add GH Etiquette. 2021-04-26 18:12:37 +01:00
Leo Luan
8f23cae1c0 vfs: Add cache reset for --vfs-cache-max-size handling at cache poll interval
The vfs-cache-max-size parameter is probably confusing to many users.
The cache cleaner checks cache size periodically at the --vfs-cache-poll-interval
(default 60 seconds) interval and remove cache items in the following order.

(1) cache items that are not in use and with age > vfs-cache-max-age
(2) if the cache space used at this time still is larger than
vfs-cache-max-size, the cleaner continues to remove cache items that are
not in use.

The cache cleaning process does not remove cache items that are currently in use.
If the total space consumed by in-use cache items exceeds vfs-cache-max-size, the
periodical cache cleaner thread does not do anything further and leaves the in-use
cache items alone with a total space larger than vfs-cache-max-size.

A cache reset feature was introduced in 1.53 which resets in-use (but not dirty,
i.e., not being updated) cache items when additional cache data incurs an ENOSPC
error.  But this code was not activated in the periodical cache cleaning thread.

This patch adds the cache reset step in the cache cleaner thread during cache
poll to reset cache items until the total size of the remaining cache items is
below vfs-cache-max-size.
2021-04-26 17:55:52 +01:00
Mathieu Carbou
964088affa build: Only run event-based workflow scripts under rclone repo with manual override
This updates the actions to only run event-based workflow scripts
under the rclone repository only and not forks. It also adds the
ability to manually trigger a build from a branch in rclone repository
and forks.

Fixes #5272
2021-04-26 17:52:03 +01:00
1404 changed files with 201712 additions and 73238 deletions

View File

@@ -9,7 +9,7 @@ We understand you are having a problem with rclone; we want to help you with tha
**STOP and READ**
**YOUR POST WILL BE REMOVED IF IT IS LOW QUALITY**:
Please show the effort you've put in to solving the problem and please be specific.
Please show the effort you've put into solving the problem and please be specific.
People are volunteering their time to help! Low effort posts are not likely to get good answers!
If you think you might have found a bug, try to replicate it with the latest beta (or stable).
@@ -37,7 +37,6 @@ The Rclone Developers
-->
#### The associated forum post URL from `https://forum.rclone.org`
@@ -65,3 +64,11 @@ The Rclone Developers
#### A log from the command with the `-vv` flag (e.g. output from `rclone -vv copy /tmp remote:tmp`)
<!--- Please keep the note below for others who read your bug report. -->
#### How to use GitHub
* Please use the 👍 [reaction](https://blog.github.com/2016-03-10-add-reactions-to-pull-requests-issues-and-comments/) to show that you are affected by the same issue.
* Please don't comment if you have no relevant information to add. It's just extra noise for everyone subscribed to this issue.
* Subscribe to receive notifications on status change and new comments.

View File

@@ -26,7 +26,6 @@ The Rclone Developers
-->
#### The associated forum post URL from `https://forum.rclone.org`
@@ -42,3 +41,11 @@ The Rclone Developers
#### How do you think rclone should be changed to solve that?
<!--- Please keep the note below for others who read your feature request. -->
#### How to use GitHub
* Please use the 👍 [reaction](https://blog.github.com/2016-03-10-add-reactions-to-pull-requests-issues-and-comments/) to show that you are affected by the same issue.
* Please don't comment if you have no relevant information to add. It's just extra noise for everyone subscribed to this issue.
* Subscribe to receive notifications on status change and new comments.

View File

@@ -22,7 +22,7 @@ Link issues and relevant forum posts here.
#### Checklist
- [ ] I have read the [contribution guidelines](https://github.com/rclone/rclone/blob/master/CONTRIBUTING.md#submitting-a-pull-request).
- [ ] I have read the [contribution guidelines](https://github.com/rclone/rclone/blob/master/CONTRIBUTING.md#submitting-a-new-feature-or-bug-fix).
- [ ] I have added tests for all changes in this PR if appropriate.
- [ ] I have added documentation for the changes if appropriate.
- [ ] All commit messages are in [house style](https://github.com/rclone/rclone/blob/master/CONTRIBUTING.md#commit-messages).

10
.github/dependabot.yml vendored Normal file
View File

@@ -0,0 +1,10 @@
version: 2
updates:
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: "daily"
- package-ecosystem: "gomod"
directory: "/"
schedule:
interval: "daily"

View File

@@ -12,29 +12,45 @@ on:
tags:
- '*'
pull_request:
workflow_dispatch:
inputs:
manual:
description: Manual run (bypass default conditions)
type: boolean
required: true
default: true
jobs:
build:
if: ${{ github.event.inputs.manual == 'true' || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name)) }}
timeout-minutes: 60
strategy:
fail-fast: false
matrix:
job_name: ['linux', 'mac_amd64', 'mac_arm64', 'windows_amd64', 'windows_386', 'other_os', 'go1.13', 'go1.14', 'go1.15']
job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.18', 'go1.19']
include:
- job_name: linux
os: ubuntu-latest
go: '1.16.x'
go: '1.20'
gotags: cmount
build_flags: '-include "^linux/"'
check: true
quicktest: true
racequicktest: true
librclonetest: true
deploy: true
- job_name: linux_386
os: ubuntu-latest
go: '1.20'
goarch: 386
gotags: cmount
quicktest: true
- job_name: mac_amd64
os: macOS-latest
go: '1.16.x'
os: macos-11
go: '1.20'
gotags: 'cmount'
build_flags: '-include "^darwin/amd64" -cgo'
quicktest: true
@@ -42,54 +58,38 @@ jobs:
deploy: true
- job_name: mac_arm64
os: macOS-latest
go: '1.16.x'
os: macos-11
go: '1.20'
gotags: 'cmount'
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -macos-sdk macosx11.1 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
deploy: true
- job_name: windows_amd64
- job_name: windows
os: windows-latest
go: '1.16.x'
go: '1.20'
gotags: cmount
build_flags: '-include "^windows/amd64" -cgo'
build_args: '-buildmode exe'
quicktest: true
racequicktest: true
deploy: true
- job_name: windows_386
os: windows-latest
go: '1.16.x'
gotags: cmount
goarch: '386'
cgo: '1'
build_flags: '-include "^windows/386" -cgo'
cgo: '0'
build_flags: '-include "^windows/"'
build_args: '-buildmode exe'
quicktest: true
deploy: true
- job_name: other_os
os: ubuntu-latest
go: '1.16.x'
go: '1.20'
build_flags: '-exclude "^(windows/|darwin/|linux/)"'
compile_all: true
deploy: true
- job_name: go1.13
- job_name: go1.18
os: ubuntu-latest
go: '1.13.x'
quicktest: true
- job_name: go1.14
os: ubuntu-latest
go: '1.14.x'
go: '1.18'
quicktest: true
racequicktest: true
- job_name: go1.15
- job_name: go1.19
os: ubuntu-latest
go: '1.15.x'
go: '1.19'
quicktest: true
racequicktest: true
@@ -99,15 +99,15 @@ jobs:
steps:
- name: Checkout
uses: actions/checkout@v2
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Install Go
uses: actions/setup-go@v2
uses: actions/setup-go@v3
with:
stable: 'false'
go-version: ${{ matrix.go }}
check-latest: true
- name: Set environment variables
shell: bash
@@ -124,7 +124,7 @@ jobs:
sudo modprobe fuse
sudo chmod 666 /dev/fuse
sudo chown root:$USER /etc/fuse.conf
sudo apt-get install fuse libfuse-dev rpm pkg-config
sudo apt-get install fuse3 libfuse-dev rpm pkg-config
if: matrix.os == 'ubuntu-latest'
- name: Install Libraries on macOS
@@ -132,7 +132,7 @@ jobs:
run: |
brew update
brew install --cask macfuse
if: matrix.os == 'macOS-latest'
if: matrix.os == 'macos-11'
- name: Install Libraries on Windows
shell: powershell
@@ -163,7 +163,7 @@ jobs:
env
- name: Go module cache
uses: actions/cache@v2
uses: actions/cache@v3
with:
path: ~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
@@ -175,6 +175,11 @@ jobs:
run: |
make
- name: Rclone version
shell: bash
run: |
rclone version
- name: Run tests
shell: bash
run: |
@@ -187,12 +192,13 @@ jobs:
make racequicktest
if: matrix.racequicktest
- name: Code quality test
- name: Run librclone tests
shell: bash
run: |
make build_dep
make check
if: matrix.check
make -C librclone/ctest test
make -C librclone/ctest clean
librclone/python/test_rclone.py
if: matrix.librclonetest
- name: Compile all architectures test
shell: bash
@@ -213,100 +219,137 @@ jobs:
# Deploy binaries if enabled in config && not a PR && not a fork
if: matrix.deploy && github.head_ref == '' && github.repository == 'rclone/rclone'
lint:
if: ${{ github.event.inputs.manual == 'true' || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name)) }}
timeout-minutes: 30
name: "lint"
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v3
- name: Code quality test
uses: golangci/golangci-lint-action@v3
with:
# Optional: version of golangci-lint to use in form of v1.2 or v1.2.3 or `latest` to use the latest version
version: latest
# Run govulncheck on the latest go version, the one we build binaries with
- name: Install Go
uses: actions/setup-go@v3
with:
go-version: '1.20'
check-latest: true
- name: Install govulncheck
run: go install golang.org/x/vuln/cmd/govulncheck@latest
- name: Scan for vulnerabilities
run: govulncheck ./...
android:
timeout-minutes: 30
name: "android-all"
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
with:
fetch-depth: 0
if: ${{ github.event.inputs.manual == 'true' || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name)) }}
timeout-minutes: 30
name: "android-all"
runs-on: ubuntu-latest
# Upgrade together with NDK version
- name: Set up Go 1.14
uses: actions/setup-go@v1
with:
go-version: 1.14
steps:
- name: Checkout
uses: actions/checkout@v3
with:
fetch-depth: 0
# Upgrade together with Go version. Using a GitHub-provided version saves around 2 minutes.
- name: Force NDK version
run: echo "y" | sudo ${ANDROID_HOME}/tools/bin/sdkmanager --install "ndk;21.4.7075529" | grep -v = || true
# Upgrade together with NDK version
- name: Set up Go
uses: actions/setup-go@v3
with:
go-version: '1.20'
- name: Go module cache
uses: actions/cache@v2
with:
path: ~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-
- name: Go module cache
uses: actions/cache@v3
with:
path: ~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-
- name: Set global environment variables
shell: bash
run: |
echo "VERSION=$(make version)" >> $GITHUB_ENV
- name: Set global environment variables
shell: bash
run: |
echo "VERSION=$(make version)" >> $GITHUB_ENV
- name: build native rclone
run: |
make
- name: build native rclone
run: |
make
- name: arm-v7a Set environment variables
shell: bash
run: |
echo "CC=$(echo $ANDROID_HOME/ndk/21.4.7075529/toolchains/llvm/prebuilt/linux-x86_64/bin/armv7a-linux-androideabi16-clang)" >> $GITHUB_ENV
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
echo 'GOOS=android' >> $GITHUB_ENV
echo 'GOARCH=arm' >> $GITHUB_ENV
echo 'GOARM=7' >> $GITHUB_ENV
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
- name: arm-v7a build
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-16-armv7a .
- name: install gomobile
run: |
go install golang.org/x/mobile/cmd/gobind@latest
go install golang.org/x/mobile/cmd/gomobile@latest
env PATH=$PATH:~/go/bin gomobile init
echo "RCLONE_NDK_VERSION=21" >> $GITHUB_ENV
- name: arm64-v8a Set environment variables
shell: bash
run: |
echo "CC=$(echo $ANDROID_HOME/ndk/21.4.7075529/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android21-clang)" >> $GITHUB_ENV
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
echo 'GOOS=android' >> $GITHUB_ENV
echo 'GOARCH=arm64' >> $GITHUB_ENV
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
- name: arm-v7a gomobile build
run: env PATH=$PATH:~/go/bin gomobile bind -androidapi ${RCLONE_NDK_VERSION} -v -target=android/arm -javapkg=org.rclone -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} github.com/rclone/rclone/librclone/gomobile
- name: arm64-v8a build
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-21-armv8a .
- name: arm-v7a Set environment variables
shell: bash
run: |
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/armv7a-linux-androideabi${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
echo 'GOOS=android' >> $GITHUB_ENV
echo 'GOARCH=arm' >> $GITHUB_ENV
echo 'GOARM=7' >> $GITHUB_ENV
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
- name: x86 Set environment variables
shell: bash
run: |
echo "CC=$(echo $ANDROID_HOME/ndk/21.4.7075529/toolchains/llvm/prebuilt/linux-x86_64/bin/i686-linux-android16-clang)" >> $GITHUB_ENV
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
echo 'GOOS=android' >> $GITHUB_ENV
echo 'GOARCH=386' >> $GITHUB_ENV
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
- name: arm-v7a build
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-armv7a .
- name: x86 build
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-16-x86 .
- name: arm64-v8a Set environment variables
shell: bash
run: |
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
echo 'GOOS=android' >> $GITHUB_ENV
echo 'GOARCH=arm64' >> $GITHUB_ENV
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
- name: x64 Set environment variables
shell: bash
run: |
echo "CC=$(echo $ANDROID_HOME/ndk/21.4.7075529/toolchains/llvm/prebuilt/linux-x86_64/bin/x86_64-linux-android21-clang)" >> $GITHUB_ENV
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
echo 'GOOS=android' >> $GITHUB_ENV
echo 'GOARCH=amd64' >> $GITHUB_ENV
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
- name: arm64-v8a build
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-armv8a .
- name: x64 build
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-21-x64 .
- name: x86 Set environment variables
shell: bash
run: |
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/i686-linux-android${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
echo 'GOOS=android' >> $GITHUB_ENV
echo 'GOARCH=386' >> $GITHUB_ENV
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
- name: Upload artifacts
run: |
make ci_upload
env:
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
# Upload artifacts if not a PR && not a fork
if: github.head_ref == '' && github.repository == 'rclone/rclone'
- name: x86 build
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-x86 .
- name: x64 Set environment variables
shell: bash
run: |
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/x86_64-linux-android${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
echo 'GOOS=android' >> $GITHUB_ENV
echo 'GOARCH=amd64' >> $GITHUB_ENV
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
- name: x64 build
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-x64 .
- name: Upload artifacts
run: |
make ci_upload
env:
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
# Upload artifacts if not a PR && not a fork
if: github.head_ref == '' && github.repository == 'rclone/rclone'

View File

@@ -7,11 +7,12 @@ on:
jobs:
build:
if: github.repository == 'rclone/rclone'
runs-on: ubuntu-latest
name: Build image job
steps:
- name: Checkout master
uses: actions/checkout@v2
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Build and publish image
@@ -19,7 +20,7 @@ jobs:
with:
tag: beta
imageName: rclone/rclone
platform: linux/amd64,linux/386,linux/arm64,linux/arm/v7
platform: linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6
publish: true
dockerHubUser: ${{ secrets.DOCKER_HUB_USER }}
dockerHubPassword: ${{ secrets.DOCKER_HUB_PASSWORD }}

View File

@@ -6,11 +6,12 @@ on:
jobs:
build:
if: github.repository == 'rclone/rclone'
runs-on: ubuntu-latest
name: Build image job
steps:
- name: Checkout master
uses: actions/checkout@v2
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Get actual patch version
@@ -27,7 +28,32 @@ jobs:
with:
tag: latest,${{ steps.actual_patch_version.outputs.ACTUAL_PATCH_VERSION }},${{ steps.actual_minor_version.outputs.ACTUAL_MINOR_VERSION }},${{ steps.actual_major_version.outputs.ACTUAL_MAJOR_VERSION }}
imageName: rclone/rclone
platform: linux/amd64,linux/386,linux/arm64,linux/arm/v7
platform: linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6
publish: true
dockerHubUser: ${{ secrets.DOCKER_HUB_USER }}
dockerHubPassword: ${{ secrets.DOCKER_HUB_PASSWORD }}
build_docker_volume_plugin:
if: github.repository == 'rclone/rclone'
needs: build
runs-on: ubuntu-latest
name: Build docker plugin job
steps:
- name: Checkout master
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Build and publish docker plugin
shell: bash
run: |
VER=${GITHUB_REF#refs/tags/}
PLUGIN_USER=rclone
docker login --username ${{ secrets.DOCKER_HUB_USER }} \
--password-stdin <<< "${{ secrets.DOCKER_HUB_PASSWORD }}"
for PLUGIN_ARCH in amd64 arm64 arm/v7 arm/v6 ;do
export PLUGIN_USER PLUGIN_ARCH
make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}
make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}-${VER#v}
done
make docker-plugin PLUGIN_ARCH=amd64 PLUGIN_TAG=latest
make docker-plugin PLUGIN_ARCH=amd64 PLUGIN_TAG=${VER#v}

14
.github/workflows/winget.yml vendored Normal file
View File

@@ -0,0 +1,14 @@
name: Publish to Winget
on:
release:
types: [released]
jobs:
publish:
runs-on: windows-latest # Action can only run on Windows
steps:
- uses: vedantmgoyal2009/winget-releaser@v2
with:
identifier: Rclone.Rclone
installers-regex: '-windows-\w+\.zip$'
token: ${{ secrets.WINGET_TOKEN }}

4
.gitignore vendored
View File

@@ -11,3 +11,7 @@ rclone.iml
*.log
*.iml
fuzz-build.zip
*.orig
*.rej
Thumbs.db
__pycache__

View File

@@ -5,7 +5,7 @@ linters:
- deadcode
- errcheck
- goimports
- golint
- revive
- ineffassign
- structcheck
- varcheck
@@ -20,7 +20,11 @@ issues:
exclude-use-default: false
# Maximum issues count per one linter. Set to 0 to disable. Default is 50.
max-per-linter: 0
max-issues-per-linter: 0
# Maximum count of issues with the same text. Set to 0 to disable. Default is 3.
max-same-issues: 0
run:
# timeout for analysis, e.g. 30s, 5m, default is 1m
timeout: 10m

View File

@@ -12,95 +12,164 @@ When filing an issue, please include the following information if
possible as well as a description of the problem. Make sure you test
with the [latest beta of rclone](https://beta.rclone.org/):
* Rclone version (e.g. output from `rclone -V`)
* Which OS you are using and how many bits (e.g. Windows 7, 64 bit)
* Rclone version (e.g. output from `rclone version`)
* Which OS you are using and how many bits (e.g. Windows 10, 64 bit)
* The command you were trying to run (e.g. `rclone copy /tmp remote:tmp`)
* A log of the command with the `-vv` flag (e.g. output from `rclone -vv copy /tmp remote:tmp`)
* if the log contains secrets then edit the file with a text editor first to obscure them
## Submitting a pull request ##
## Submitting a new feature or bug fix ##
If you find a bug that you'd like to fix, or a new feature that you'd
like to implement then please submit a pull request via GitHub.
If it is a big feature then make an issue first so it can be discussed.
If it is a big feature, then [make an issue](https://github.com/rclone/rclone/issues) first so it can be discussed.
You'll need a Go environment set up with GOPATH set. See [the Go
getting started docs](https://golang.org/doc/install) for more info.
First in your web browser press the fork button on [rclone's GitHub
To prepare your pull request first press the fork button on [rclone's GitHub
page](https://github.com/rclone/rclone).
Now in your terminal
Then [install Git](https://git-scm.com/downloads) and set your public contribution [name](https://docs.github.com/en/github/getting-started-with-github/setting-your-username-in-git) and [email](https://docs.github.com/en/github/setting-up-and-managing-your-github-user-account/setting-your-commit-email-address#setting-your-commit-email-address-in-git).
Next open your terminal, change directory to your preferred folder and initialise your local rclone project:
git clone https://github.com/rclone/rclone.git
cd rclone
git remote rename origin upstream
# if you have SSH keys setup in your GitHub account:
git remote add origin git@github.com:YOURUSER/rclone.git
go build
# otherwise:
git remote add origin https://github.com/YOURUSER/rclone.git
Make a branch to add your new feature
Note that most of the terminal commands in the rest of this guide must be executed from the rclone folder created above.
Now [install Go](https://golang.org/doc/install) and verify your installation:
go version
Great, you can now compile and execute your own version of rclone:
go build
./rclone version
(Note that you can also replace `go build` with `make`, which will include a
more accurate version number in the executable as well as enable you to specify
more build options.) Finally make a branch to add your new feature
git checkout -b my-new-feature
And get hacking.
When ready - run the unit tests for the code you changed
You may like one of the [popular editors/IDE's for Go](https://github.com/golang/go/wiki/IDEsAndTextEditorPlugins) and a quick view on the rclone [code organisation](#code-organisation).
When ready - test the affected functionality and run the unit tests for the code you changed
cd folder/with/changed/files
go test -v
Note that you may need to make a test remote, e.g. `TestSwift` for some
of the unit tests.
Note the top level Makefile targets
* make check
* make test
Both of these will be run by Travis when you make a pull request but
you can do this yourself locally too. These require some extra go
packages which you can install with
* make build_dep
This is typically enough if you made a simple bug fix, otherwise please read the rclone [testing](#testing) section too.
Make sure you
* Add [unit tests](#testing) for a new feature.
* Add [documentation](#writing-documentation) for a new feature.
* Follow the [commit message guidelines](#commit-messages).
* Add [unit tests](#testing) for a new feature
* squash commits down to one per feature
* rebase to master with `git rebase master`
* [Commit your changes](#committing-your-changes) using the [message guideline](#commit-messages).
When you are done with that
When you are done with that push your changes to GitHub:
git push -u origin my-new-feature
Go to the GitHub website and click [Create pull
and open the GitHub website to [create your pull
request](https://help.github.com/articles/creating-a-pull-request/).
You patch will get reviewed and you might get asked to fix some stuff.
Your changes will then get reviewed and you might get asked to fix some stuff. If so, then make the changes in the same branch, commit and push your updates to GitHub.
If so, then make the changes in the same branch, squash the commits (make multiple commits one commit) by running:
```
git log # See how many commits you want to squash
git reset --soft HEAD~2 # This squashes the 2 latest commits together.
git status # Check what will happen, if you made a mistake resetting, you can run git reset 'HEAD@{1}' to undo.
git commit # Add a new commit message.
git push --force # Push the squashed commit to your GitHub repo.
# For more, see Stack Overflow, Git docs, or generally Duck around the web. jtagcat also recommends wizardzines.com
```
You may sometimes be asked to [base your changes on the latest master](#basing-your-changes-on-the-latest-master) or [squash your commits](#squashing-your-commits).
## CI for your fork ##
## Using Git and GitHub ##
### Committing your changes ###
Follow the guideline for [commit messages](#commit-messages) and then:
git checkout my-new-feature # To switch to your branch
git status # To see the new and changed files
git add FILENAME # To select FILENAME for the commit
git status # To verify the changes to be committed
git commit # To do the commit
git log # To verify the commit. Use q to quit the log
You can modify the message or changes in the latest commit using:
git commit --amend
If you amend to commits that have been pushed to GitHub, then you will have to [replace your previously pushed commits](#replacing-your-previously-pushed-commits).
### Replacing your previously pushed commits ###
Note that you are about to rewrite the GitHub history of your branch. It is good practice to involve your collaborators before modifying commits that have been pushed to GitHub.
Your previously pushed commits are replaced by:
git push --force origin my-new-feature
### Basing your changes on the latest master ###
To base your changes on the latest version of the [rclone master](https://github.com/rclone/rclone/tree/master) (upstream):
git checkout master
git fetch upstream
git merge --ff-only
git push origin --follow-tags # optional update of your fork in GitHub
git checkout my-new-feature
git rebase master
If you rebase commits that have been pushed to GitHub, then you will have to [replace your previously pushed commits](#replacing-your-previously-pushed-commits).
### Squashing your commits ###
To combine your commits into one commit:
git log # To count the commits to squash, e.g. the last 2
git reset --soft HEAD~2 # To undo the 2 latest commits
git status # To check everything is as expected
If everything is fine, then make the new combined commit:
git commit # To commit the undone commits as one
otherwise, you may roll back using:
git reflog # To check that HEAD{1} is your previous state
git reset --soft 'HEAD@{1}' # To roll back to your previous state
If you squash commits that have been pushed to GitHub, then you will have to [replace your previously pushed commits](#replacing-your-previously-pushed-commits).
Tip: You may like to use `git rebase -i master` if you are experienced or have a more complex situation.
### GitHub Continuous Integration ###
rclone currently uses [GitHub Actions](https://github.com/rclone/rclone/actions) to build and test the project, which should be automatically available for your fork too from the `Actions` tab in your repository.
## Testing ##
### Quick testing ###
rclone's tests are run from the go testing framework, so at the top
level you can run this to run all the tests.
go test -v ./...
You can also use `make`, if supported by your platform
make quicktest
The quicktest is [automatically run by GitHub](#github-continuous-integration) when you push your branch to GitHub.
### Backend testing ###
rclone contains a mixture of unit tests and integration tests.
Because it is difficult (and in some respects pointless) to test cloud
storage systems by mocking all their interfaces, rclone unit tests can
@@ -134,12 +203,19 @@ project root:
go install github.com/rclone/rclone/fstest/test_all
test_all -backend drive
### Full integration testing ###
If you want to run all the integration tests against all the remotes,
then change into the project root and run
make check
make test
This command is run daily on the integration test server. You can
The commands may require some extra go packages which you can install with
make build_dep
The full integration tests are run daily on the integration test server. You can
find the results at https://pub.rclone.org/integration-tests/
## Code Organisation ##
@@ -147,16 +223,17 @@ find the results at https://pub.rclone.org/integration-tests/
Rclone code is organised into a small number of top level directories
with modules beneath.
* backend - the rclone backends for interfacing to cloud providers -
* backend - the rclone backends for interfacing to cloud providers -
* all - import this to load all the cloud providers
* ...providers
* bin - scripts for use while building or maintaining rclone
* cmd - the rclone commands
* all - import this to load all the commands
* ...commands
* cmdtest - end-to-end tests of commands, flags, environment variables,...
* docs - the documentation and website
* content - adjust these docs only - everything else is autogenerated
* command - these are auto generated - edit the corresponding .go file
* command - these are auto-generated - edit the corresponding .go file
* fs - main rclone definitions - minimal amount of code
* accounting - bandwidth limiting and statistics
* asyncreader - an io.Reader which reads ahead
@@ -198,18 +275,39 @@ If you add a new general flag (not for a backend), then document it in
alphabetical order.
If you add a new backend option/flag, then it should be documented in
the source file in the `Help:` field. The first line of this is used
for the flag help, the remainder is shown to the user in `rclone
config` and is added to the docs with `make backenddocs`.
the source file in the `Help:` field.
* Start with the most important information about the option,
as a single sentence on a single line.
* This text will be used for the command-line flag help.
* It will be combined with other information, such as any default value,
and the result will look odd if not written as a single sentence.
* It should end with a period/full stop character, which will be shown
in docs but automatically removed when producing the flag help.
* Try to keep it below 80 characters, to reduce text wrapping in the terminal.
* More details can be added in a new paragraph, after an empty line (`"\n\n"`).
* Like with docs generated from Markdown, a single line break is ignored
and two line breaks creates a new paragraph.
* This text will be shown to the user in `rclone config`
and in the docs (where it will be added by `make backenddocs`,
normally run some time before next release).
* To create options of enumeration type use the `Examples:` field.
* Each example value have their own `Help:` field, but they are treated
a bit different than the main option help text. They will be shown
as an unordered list, therefore a single line break is enough to
create a new list item. Also, for enumeration texts like name of
countries, it looks better without an ending period/full stop character.
The only documentation you need to edit are the `docs/content/*.md`
files. The `MANUAL.*`, `rclone.1`, web site, etc. are all auto generated
files. The `MANUAL.*`, `rclone.1`, website, etc. are all auto-generated
from those during the release process. See the `make doc` and `make
website` targets in the Makefile if you are interested in how. You
don't need to run these when adding a feature.
Documentation for rclone sub commands is with their code, e.g.
`cmd/ls/ls.go`.
`cmd/ls/ls.go`. Write flag help strings as a single sentence on a single
line, without a period/full stop character at the end, as it will be
combined unmodified with other information (such as any default value).
Note that you can use [GitHub's online editor](https://help.github.com/en/github/managing-files-in-a-repository/editing-files-in-another-users-repository)
for small changes in the docs which makes it very easy.
@@ -252,7 +350,7 @@ And here is an example of a longer one:
```
mount: fix hang on errored upload
In certain circumstances if an upload failed then the mount could hang
In certain circumstances, if an upload failed then the mount could hang
indefinitely. This was fixed by closing the read pipe after the Put
completed. This will cause the write side to return a pipe closed
error fixing the hang.
@@ -284,7 +382,7 @@ and `go.sum` in the same commit as your other changes.
If you need to update a dependency then run
GO111MODULE=on go get -u github.com/pkg/errors
GO111MODULE=on go get -u golang.org/x/crypto
Check in a single commit as above.
@@ -327,8 +425,8 @@ Research
Getting going
* Create `backend/remote/remote.go` (copy this from a similar remote)
* box is a good one to start from if you have a directory based remote
* b2 is a good one to start from if you have a bucket based remote
* box is a good one to start from if you have a directory-based remote
* b2 is a good one to start from if you have a bucket-based remote
* Add your remote to the imports in `backend/all/all.go`
* HTTP based remotes are easiest to maintain if they use rclone's rest module, but if there is a really good go SDK then use that instead.
* Try to implement as many optional methods as possible as it makes the remote more usable.

View File

@@ -15,11 +15,11 @@ Current active maintainers of rclone are:
| Ivan Andreev | @ivandeex | chunker & mailru backends |
| Max Sum | @Max-Sum | union backend |
| Fred | @creativeprojects | seafile backend |
| Caleb Case | @calebcase | tardigrade backend |
| Caleb Case | @calebcase | storj backend |
**This is a work in progress Draft**
This is a guide for how to be an rclone maintainer. This is mostly a writeup of what I (@ncw) attempt to do.
This is a guide for how to be an rclone maintainer. This is mostly a write-up of what I (@ncw) attempt to do.
## Triaging Tickets ##
@@ -27,15 +27,15 @@ When a ticket comes in it should be triaged. This means it should be classified
Rclone uses the labels like this:
* `bug` - a definite verified bug
* `bug` - a definitely verified bug
* `can't reproduce` - a problem which we can't reproduce
* `doc fix` - a bug in the documentation - if users need help understanding the docs add this label
* `duplicate` - normally close these and ask the user to subscribe to the original
* `enhancement: new remote` - a new rclone backend
* `enhancement` - a new feature
* `FUSE` - to do with `rclone mount` command
* `good first issue` - mark these if you find a small self contained issue - these get shown to new visitors to the project
* `help` wanted - mark these if you find a self contained issue - these get shown to new visitors to the project
* `good first issue` - mark these if you find a small self-contained issue - these get shown to new visitors to the project
* `help` wanted - mark these if you find a self-contained issue - these get shown to new visitors to the project
* `IMPORTANT` - note to maintainers not to forget to fix this for the release
* `maintenance` - internal enhancement, code re-organisation, etc.
* `Needs Go 1.XX` - waiting for that version of Go to be released
@@ -51,7 +51,7 @@ The milestones have these meanings:
* v1.XX - stuff we would like to fit into this release
* v1.XX+1 - stuff we are leaving until the next release
* Soon - stuff we think is a good idea - waiting to be scheduled to a release
* Soon - stuff we think is a good idea - waiting to be scheduled for a release
* Help wanted - blue sky stuff that might get moved up, or someone could help with
* Known bugs - bugs waiting on external factors or we aren't going to fix for the moment
@@ -65,7 +65,7 @@ Close tickets as soon as you can - make sure they are tagged with a release. Po
Try to process pull requests promptly!
Merging pull requests on GitHub itself works quite well now-a-days so you can squash and rebase or rebase pull requests. rclone doesn't use merge commits. Use the squash and rebase option if you need to edit the commit message.
Merging pull requests on GitHub itself works quite well nowadays so you can squash and rebase or rebase pull requests. rclone doesn't use merge commits. Use the squash and rebase option if you need to edit the commit message.
After merging the commit, in your local master branch, do `git pull` then run `bin/update-authors.py` to update the authors file then `git push`.
@@ -81,15 +81,15 @@ Rclone aims for a 6-8 week release cycle. Sometimes release cycles take longer
High impact regressions should be fixed before the next release.
Near the start of the release cycle the dependencies should be updated with `make update` to give time for bugs to surface.
Near the start of the release cycle, the dependencies should be updated with `make update` to give time for bugs to surface.
Towards the end of the release cycle try not to merge anything too big so let things settle down.
Follow the instructions in RELEASE.md for making the release. Note that the testing part is the most time consuming often needing several rounds of test and fix depending on exactly how many new features rclone has gained.
Follow the instructions in RELEASE.md for making the release. Note that the testing part is the most time-consuming often needing several rounds of test and fix depending on exactly how many new features rclone has gained.
## Mailing list ##
There is now an invite only mailing list for rclone developers `rclone-dev` on google groups.
There is now an invite-only mailing list for rclone developers `rclone-dev` on google groups.
## TODO ##

19995
MANUAL.html generated

File diff suppressed because it is too large Load Diff

24140
MANUAL.md generated

File diff suppressed because it is too large Load Diff

25160
MANUAL.txt generated

File diff suppressed because it is too large Load Diff

View File

@@ -81,6 +81,9 @@ quicktest:
racequicktest:
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) -cpu=2 -race ./...
compiletest:
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) -run XXX ./...
# Do source code quality checks
check: rclone
@echo "-- START CODE QUALITY REPORT -------------------------------"
@@ -97,17 +100,21 @@ release_dep_linux:
# Get the release dependencies we only install on Windows
release_dep_windows:
GO111MODULE=off GOOS="" GOARCH="" go get github.com/josephspurrier/goversioninfo/cmd/goversioninfo
GOOS="" GOARCH="" go install github.com/josephspurrier/goversioninfo/cmd/goversioninfo@latest
# Update dependencies
showupdates:
@echo "*** Direct dependencies that could be updated ***"
@GO111MODULE=on go list -u -f '{{if (and (not (or .Main .Indirect)) .Update)}}{{.Path}}: {{.Version}} -> {{.Update.Version}}{{end}}' -m all 2> /dev/null
# Update direct dependencies only
updatedirect:
GO111MODULE=on go get -d $$(go list -m -f '{{if not (or .Main .Indirect)}}{{.Path}}{{end}}' all)
GO111MODULE=on go mod tidy
# Update direct and indirect dependencies and test dependencies
update:
GO111MODULE=on go get -u -t ./...
-#GO111MODULE=on go get -d $(go list -m -f '{{if not (or .Main .Indirect)}}{{.Path}}{{end}}' all)
GO111MODULE=on go get -d -u -t ./...
GO111MODULE=on go mod tidy
# Tidy the module dependencies
@@ -241,18 +248,48 @@ retag:
startdev:
@echo "Version is $(VERSION)"
@echo "Next version is $(NEXT_VERSION)"
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(NEXT_VERSION)-DEV\"\n" | gofmt > fs/version.go
echo -e "package fs\n\n// VersionTag of rclone\nvar VersionTag = \"$(NEXT_VERSION)\"\n" | gofmt > fs/versiontag.go
echo -n "$(NEXT_VERSION)" > docs/layouts/partials/version.html
echo "$(NEXT_VERSION)" > VERSION
git commit -m "Start $(NEXT_VERSION)-DEV development" fs/version.go VERSION docs/layouts/partials/version.html
git commit -m "Start $(NEXT_VERSION)-DEV development" fs/versiontag.go VERSION docs/layouts/partials/version.html
startstable:
@echo "Version is $(VERSION)"
@echo "Next stable version is $(NEXT_PATCH_VERSION)"
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(NEXT_PATCH_VERSION)-DEV\"\n" | gofmt > fs/version.go
echo -e "package fs\n\n// VersionTag of rclone\nvar VersionTag = \"$(NEXT_PATCH_VERSION)\"\n" | gofmt > fs/versiontag.go
echo -n "$(NEXT_PATCH_VERSION)" > docs/layouts/partials/version.html
echo "$(NEXT_PATCH_VERSION)" > VERSION
git commit -m "Start $(NEXT_PATCH_VERSION)-DEV development" fs/version.go VERSION docs/layouts/partials/version.html
git commit -m "Start $(NEXT_PATCH_VERSION)-DEV development" fs/versiontag.go VERSION docs/layouts/partials/version.html
winzip:
zip -9 rclone-$(TAG).zip rclone.exe
# docker volume plugin
PLUGIN_USER ?= rclone
PLUGIN_TAG ?= latest
PLUGIN_BASE_TAG ?= latest
PLUGIN_ARCH ?= amd64
PLUGIN_IMAGE := $(PLUGIN_USER)/docker-volume-rclone:$(PLUGIN_TAG)
PLUGIN_BASE := $(PLUGIN_USER)/rclone:$(PLUGIN_BASE_TAG)
PLUGIN_BUILD_DIR := ./build/docker-plugin
PLUGIN_CONTRIB_DIR := ./contrib/docker-plugin/managed
docker-plugin-create:
docker buildx inspect |grep -q /${PLUGIN_ARCH} || \
docker run --rm --privileged tonistiigi/binfmt --install all
rm -rf ${PLUGIN_BUILD_DIR}
docker buildx build \
--no-cache --pull \
--build-arg BASE_IMAGE=${PLUGIN_BASE} \
--platform linux/${PLUGIN_ARCH} \
--output ${PLUGIN_BUILD_DIR}/rootfs \
${PLUGIN_CONTRIB_DIR}
cp ${PLUGIN_CONTRIB_DIR}/config.json ${PLUGIN_BUILD_DIR}
docker plugin rm --force ${PLUGIN_IMAGE} 2>/dev/null || true
docker plugin create ${PLUGIN_IMAGE} ${PLUGIN_BUILD_DIR}
docker-plugin-push:
docker plugin push ${PLUGIN_IMAGE}
docker plugin rm ${PLUGIN_IMAGE}
docker-plugin: docker-plugin-create docker-plugin-push

View File

@@ -1,8 +1,9 @@
[<img src="https://rclone.org/img/logo_on_light__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/)
[<img src="https://rclone.org/img/logo_on_light__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/#gh-light-mode-only)
[<img src="https://rclone.org/img/logo_on_dark__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/#gh-dark-mode-only)
[Website](https://rclone.org) |
[Documentation](https://rclone.org/docs/) |
[Download](https://rclone.org/downloads/) |
[Download](https://rclone.org/downloads/) |
[Contributing](CONTRIBUTING.md) |
[Changelog](https://rclone.org/changelog/) |
[Installation](https://rclone.org/install/) |
@@ -10,38 +11,46 @@
[![Build Status](https://github.com/rclone/rclone/workflows/build/badge.svg)](https://github.com/rclone/rclone/actions?query=workflow%3Abuild)
[![Go Report Card](https://goreportcard.com/badge/github.com/rclone/rclone)](https://goreportcard.com/report/github.com/rclone/rclone)
[![GoDoc](https://godoc.org/github.com/rclone/rclone?status.svg)](https://godoc.org/github.com/rclone/rclone)
[![GoDoc](https://godoc.org/github.com/rclone/rclone?status.svg)](https://godoc.org/github.com/rclone/rclone)
[![Docker Pulls](https://img.shields.io/docker/pulls/rclone/rclone)](https://hub.docker.com/r/rclone/rclone)
# Rclone
Rclone *("rsync for cloud storage")* is a command line program to sync files and directories to and from different cloud storage providers.
Rclone *("rsync for cloud storage")* is a command-line program to sync files and directories to and from different cloud storage providers.
## Storage providers
* 1Fichier [:page_facing_up:](https://rclone.org/fichier/)
* Akamai Netstorage [:page_facing_up:](https://rclone.org/netstorage/)
* Alibaba Cloud (Aliyun) Object Storage System (OSS) [:page_facing_up:](https://rclone.org/s3/#alibaba-oss)
* Amazon Drive [:page_facing_up:](https://rclone.org/amazonclouddrive/) ([See note](https://rclone.org/amazonclouddrive/#status))
* Amazon S3 [:page_facing_up:](https://rclone.org/s3/)
* Backblaze B2 [:page_facing_up:](https://rclone.org/b2/)
* Box [:page_facing_up:](https://rclone.org/box/)
* Ceph [:page_facing_up:](https://rclone.org/s3/#ceph)
* China Mobile Ecloud Elastic Object Storage (EOS) [:page_facing_up:](https://rclone.org/s3/#china-mobile-ecloud-eos)
* Cloudflare R2 [:page_facing_up:](https://rclone.org/s3/#cloudflare-r2)
* Arvan Cloud Object Storage (AOS) [:page_facing_up:](https://rclone.org/s3/#arvan-cloud-object-storage-aos)
* Citrix ShareFile [:page_facing_up:](https://rclone.org/sharefile/)
* DigitalOcean Spaces [:page_facing_up:](https://rclone.org/s3/#digitalocean-spaces)
* Digi Storage [:page_facing_up:](https://rclone.org/koofr/#digi-storage)
* Dreamhost [:page_facing_up:](https://rclone.org/s3/#dreamhost)
* Dropbox [:page_facing_up:](https://rclone.org/dropbox/)
* Enterprise File Fabric [:page_facing_up:](https://rclone.org/filefabric/)
* FTP [:page_facing_up:](https://rclone.org/ftp/)
* GetSky [:page_facing_up:](https://rclone.org/jottacloud/)
* Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/)
* Google Drive [:page_facing_up:](https://rclone.org/drive/)
* Google Photos [:page_facing_up:](https://rclone.org/googlephotos/)
* HDFS (Hadoop Distributed Filesystem) [:page_facing_up:](https://rclone.org/hdfs/)
* HiDrive [:page_facing_up:](https://rclone.org/hidrive/)
* HTTP [:page_facing_up:](https://rclone.org/http/)
* Hubic [:page_facing_up:](https://rclone.org/hubic/)
* Huawei Cloud Object Storage Service(OBS) [:page_facing_up:](https://rclone.org/s3/#huawei-obs)
* Internet Archive [:page_facing_up:](https://rclone.org/internetarchive/)
* Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
* IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3)
* IONOS Cloud [:page_facing_up:](https://rclone.org/s3/#ionos)
* Koofr [:page_facing_up:](https://rclone.org/koofr/)
* Liara Object Storage [:page_facing_up:](https://rclone.org/s3/#liara-object-storage)
* Mail.ru Cloud [:page_facing_up:](https://rclone.org/mailru/)
* Memset Memstore [:page_facing_up:](https://rclone.org/swift/)
* Mega [:page_facing_up:](https://rclone.org/mega/)
@@ -54,27 +63,45 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and
* OpenDrive [:page_facing_up:](https://rclone.org/opendrive/)
* OpenStack Swift [:page_facing_up:](https://rclone.org/swift/)
* Oracle Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
* Oracle Object Storage [:page_facing_up:](https://rclone.org/oracleobjectstorage/)
* ownCloud [:page_facing_up:](https://rclone.org/webdav/#owncloud)
* pCloud [:page_facing_up:](https://rclone.org/pcloud/)
* premiumize.me [:page_facing_up:](https://rclone.org/premiumizeme/)
* put.io [:page_facing_up:](https://rclone.org/putio/)
* QingStor [:page_facing_up:](https://rclone.org/qingstor/)
* Qiniu Cloud Object Storage (Kodo) [:page_facing_up:](https://rclone.org/s3/#qiniu)
* Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
* RackCorp Object Storage [:page_facing_up:](https://rclone.org/s3/#RackCorp)
* Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)
* Seafile [:page_facing_up:](https://rclone.org/seafile/)
* SeaweedFS [:page_facing_up:](https://rclone.org/s3/#seaweedfs)
* SFTP [:page_facing_up:](https://rclone.org/sftp/)
* SMB / CIFS [:page_facing_up:](https://rclone.org/smb/)
* StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
* Storj [:page_facing_up:](https://rclone.org/storj/)
* SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)
* Tardigrade [:page_facing_up:](https://rclone.org/tardigrade/)
* Tencent Cloud Object Storage (COS) [:page_facing_up:](https://rclone.org/s3/#tencent-cos)
* Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi)
* WebDAV [:page_facing_up:](https://rclone.org/webdav/)
* Yandex Disk [:page_facing_up:](https://rclone.org/yandex/)
* Zoho WorkDrive [:page_facing_up:](https://rclone.org/zoho/)
* The local filesystem [:page_facing_up:](https://rclone.org/local/)
Please see [the full list of all storage providers and their features](https://rclone.org/overview/)
### Virtual storage providers
These backends adapt or modify other storage providers
* Alias: rename existing remotes [:page_facing_up:](https://rclone.org/alias/)
* Cache: cache remotes (DEPRECATED) [:page_facing_up:](https://rclone.org/cache/)
* Chunker: split large files [:page_facing_up:](https://rclone.org/chunker/)
* Combine: combine multiple remotes into a directory tree [:page_facing_up:](https://rclone.org/combine/)
* Compress: compress files [:page_facing_up:](https://rclone.org/compress/)
* Crypt: encrypt files [:page_facing_up:](https://rclone.org/crypt/)
* Hasher: hash files [:page_facing_up:](https://rclone.org/hasher/)
* Union: join multiple remotes to work together [:page_facing_up:](https://rclone.org/union/)
## Features
* MD5/SHA-1 hashes checked at all times for file integrity
@@ -87,10 +114,9 @@ Please see [the full list of all storage providers and their features](https://r
* Optional large file chunking ([Chunker](https://rclone.org/chunker/))
* Optional transparent compression ([Compress](https://rclone.org/compress/))
* Optional encryption ([Crypt](https://rclone.org/crypt/))
* Optional cache ([Cache](https://rclone.org/cache/))
* Optional FUSE mount ([rclone mount](https://rclone.org/commands/rclone_mount/))
* Multi-threaded downloads to local disk
* Can [serve](https://rclone.org/commands/rclone_serve/) local or remote files over HTTP/WebDav/FTP/SFTP/dlna
* Can [serve](https://rclone.org/commands/rclone_serve/) local or remote files over HTTP/WebDAV/FTP/SFTP/DLNA
## Installation & documentation
@@ -111,5 +137,5 @@ Please see the [rclone website](https://rclone.org/) for:
License
-------
This is free software under the terms of MIT the license (check the
This is free software under the terms of the MIT license (check the
[COPYING file](/COPYING) included in this package).

View File

@@ -34,13 +34,32 @@ This file describes how to make the various kinds of releases
* make startdev # make startstable for stable branch
* # announce with forum post, twitter post, patreon post
## Update dependencies
Early in the next release cycle update the dependencies
* Review any pinned packages in go.mod and remove if possible
* make update
* git status
* git add new files
* make updatedirect
* make
* git commit -a -v
* make update
* make
* roll back any updates which didn't compile
* git commit -a -v --amend
Note that `make update` updates all direct and indirect dependencies
and there can occasionally be forwards compatibility problems with
doing that so it may be necessary to roll back dependencies to the
version specified by `make updatedirect` in order to get rclone to
build.
## Tidy beta
At some point after the release run
bin/tidy-beta v1.55
where the version number is that of a couple ago to remove old beta binaries.
## Making a point release
@@ -55,8 +74,7 @@ Set vars
First make the release branch. If this is a second point release then
this will be done already.
* git branch ${BASE_TAG} ${BASE_TAG}-stable
* git co ${BASE_TAG}-stable
* git co -b ${BASE_TAG}-stable ${BASE_TAG}.0
* make startstable
Now

View File

@@ -1 +1 @@
v1.56.0
v1.62.0

View File

@@ -1,3 +1,4 @@
// Package alias implements a virtual provider to rename existing remotes.
package alias
import (
@@ -20,7 +21,7 @@ func init() {
NewFs: NewFs,
Options: []fs.Option{{
Name: "remote",
Help: "Remote or path to alias.\nCan be \"myremote:path/to/dir\", \"myremote:bucket\", \"myremote:\" or \"/local/path\".",
Help: "Remote or path to alias.\n\nCan be \"myremote:path/to/dir\", \"myremote:bucket\", \"myremote:\" or \"/local/path\".",
Required: true,
}},
}

View File

@@ -20,7 +20,7 @@ var (
)
func prepare(t *testing.T, root string) {
configfile.LoadConfig(context.Background())
configfile.Install()
// Configure the remote
config.FileSet(remoteName, "type", "alias")

View File

@@ -1,3 +1,4 @@
// Package all imports all the backends
package all
import (
@@ -9,6 +10,7 @@ import (
_ "github.com/rclone/rclone/backend/box"
_ "github.com/rclone/rclone/backend/cache"
_ "github.com/rclone/rclone/backend/chunker"
_ "github.com/rclone/rclone/backend/combine"
_ "github.com/rclone/rclone/backend/compress"
_ "github.com/rclone/rclone/backend/crypt"
_ "github.com/rclone/rclone/backend/drive"
@@ -18,17 +20,21 @@ import (
_ "github.com/rclone/rclone/backend/ftp"
_ "github.com/rclone/rclone/backend/googlecloudstorage"
_ "github.com/rclone/rclone/backend/googlephotos"
_ "github.com/rclone/rclone/backend/hasher"
_ "github.com/rclone/rclone/backend/hdfs"
_ "github.com/rclone/rclone/backend/hidrive"
_ "github.com/rclone/rclone/backend/http"
_ "github.com/rclone/rclone/backend/hubic"
_ "github.com/rclone/rclone/backend/internetarchive"
_ "github.com/rclone/rclone/backend/jottacloud"
_ "github.com/rclone/rclone/backend/koofr"
_ "github.com/rclone/rclone/backend/local"
_ "github.com/rclone/rclone/backend/mailru"
_ "github.com/rclone/rclone/backend/mega"
_ "github.com/rclone/rclone/backend/memory"
_ "github.com/rclone/rclone/backend/netstorage"
_ "github.com/rclone/rclone/backend/onedrive"
_ "github.com/rclone/rclone/backend/opendrive"
_ "github.com/rclone/rclone/backend/oracleobjectstorage"
_ "github.com/rclone/rclone/backend/pcloud"
_ "github.com/rclone/rclone/backend/premiumizeme"
_ "github.com/rclone/rclone/backend/putio"
@@ -37,9 +43,11 @@ import (
_ "github.com/rclone/rclone/backend/seafile"
_ "github.com/rclone/rclone/backend/sftp"
_ "github.com/rclone/rclone/backend/sharefile"
_ "github.com/rclone/rclone/backend/sia"
_ "github.com/rclone/rclone/backend/smb"
_ "github.com/rclone/rclone/backend/storj"
_ "github.com/rclone/rclone/backend/sugarsync"
_ "github.com/rclone/rclone/backend/swift"
_ "github.com/rclone/rclone/backend/tardigrade"
_ "github.com/rclone/rclone/backend/union"
_ "github.com/rclone/rclone/backend/uptobox"
_ "github.com/rclone/rclone/backend/webdav"

View File

@@ -14,16 +14,15 @@ we ignore assets completely!
import (
"context"
"encoding/json"
"errors"
"fmt"
"io"
"log"
"net/http"
"path"
"strings"
"time"
acd "github.com/ncw/go-acd"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
@@ -70,11 +69,10 @@ func init() {
Prefix: "acd",
Description: "Amazon Drive",
NewFs: NewFs,
Config: func(ctx context.Context, name string, m configmap.Mapper) {
err := oauthutil.Config(ctx, "amazon cloud drive", name, m, acdConfig, nil)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
return oauthutil.ConfigOut("", &oauthutil.Options{
OAuth2Config: acdConfig,
})
},
Options: append(oauthutil.SharedOptions, []fs.Option{{
Name: "checkpoint",
@@ -83,16 +81,16 @@ func init() {
Advanced: true,
}, {
Name: "upload_wait_per_gb",
Help: `Additional time per GB to wait after a failed complete upload to see if it appears.
Help: `Additional time per GiB to wait after a failed complete upload to see if it appears.
Sometimes Amazon Drive gives an error when a file has been fully
uploaded but the file appears anyway after a little while. This
happens sometimes for files over 1GB in size and nearly every time for
files bigger than 10GB. This parameter controls the time rclone waits
happens sometimes for files over 1 GiB in size and nearly every time for
files bigger than 10 GiB. This parameter controls the time rclone waits
for the file to appear.
The default value for this parameter is 3 minutes per GB, so by
default it will wait 3 minutes for every GB uploaded to see if the
The default value for this parameter is 3 minutes per GiB, so by
default it will wait 3 minutes for every GiB uploaded to see if the
file appears.
You can disable this feature by setting it to 0. This may cause
@@ -112,7 +110,7 @@ in this situation.`,
Files this size or more will be downloaded via their "tempLink". This
is to work around a problem with Amazon Drive which blocks downloads
of files bigger than about 10GB. The default for this is 9GB which
of files bigger than about 10 GiB. The default for this is 9 GiB which
shouldn't need to be changed.
To download files above this threshold, rclone requests a "tempLink"
@@ -261,7 +259,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
}
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(ctx, name, m, acdConfig, baseClient)
if err != nil {
return nil, errors.Wrap(err, "failed to configure Amazon Drive")
return nil, fmt.Errorf("failed to configure Amazon Drive: %w", err)
}
c := acd.NewClient(oAuthClient)
@@ -294,13 +292,13 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
return f.shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "failed to get endpoints")
return nil, fmt.Errorf("failed to get endpoints: %w", err)
}
// Get rootID
rootInfo, err := f.getRootInfo(ctx)
if err != nil || rootInfo.Id == nil {
return nil, errors.Wrap(err, "failed to get root")
return nil, fmt.Errorf("failed to get root: %w", err)
}
f.trueRootID = *rootInfo.Id
@@ -437,7 +435,7 @@ func (f *Fs) listAll(ctx context.Context, dirID string, title string, directorie
query += " AND kind:" + folderKind
} else if filesOnly {
query += " AND kind:" + fileKind
} else {
//} else {
// FIXME none of these work
//query += " AND kind:(" + fileKind + " OR " + folderKind + ")"
//query += " AND (kind:" + fileKind + " OR kind:" + folderKind + ")"
@@ -558,9 +556,9 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
//
// This is a workaround for Amazon sometimes returning
//
// * 408 REQUEST_TIMEOUT
// * 504 GATEWAY_TIMEOUT
// * 500 Internal server error
// - 408 REQUEST_TIMEOUT
// - 504 GATEWAY_TIMEOUT
// - 500 Internal server error
//
// At the end of large uploads. The speculation is that the timeout
// is waiting for the sha1 hashing to complete and the file may well
@@ -628,7 +626,7 @@ func (f *Fs) checkUpload(ctx context.Context, resp *http.Response, in io.Reader,
// Put the object into the container
//
// Copy the reader in to the new object which is returned
// Copy the reader in to the new object which is returned.
//
// The new object may have been created if an error is returned
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
@@ -687,9 +685,9 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
// Move src to this remote using server-side move operations.
//
// This is stored with the remote path given
// This is stored with the remote path given.
//
// It returns the destination Object and a possible error
// It returns the destination Object and a possible error.
//
// Will only be called if src.Fs().Name() == f.Name()
//
@@ -1004,7 +1002,6 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
// ModTime returns the modification time of the object
//
//
// It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers
func (o *Object) ModTime(ctx context.Context) time.Time {

View File

@@ -1,5 +1,6 @@
// Test AmazonCloudDrive filesystem interface
//go:build acd
// +build acd
package amazonclouddrive_test

File diff suppressed because it is too large Load Diff

View File

@@ -1,4 +1,5 @@
// +build !plan9,!solaris,!js,go1.14
//go:build !plan9 && !solaris && !js && go1.18
// +build !plan9,!solaris,!js,go1.18
package azureblob

View File

@@ -1,11 +1,11 @@
// Test AzureBlob filesystem interface
// +build !plan9,!solaris,!js,go1.14
//go:build !plan9 && !solaris && !js && go1.18
// +build !plan9,!solaris,!js,go1.18
package azureblob
import (
"context"
"testing"
"github.com/rclone/rclone/fs"
@@ -20,7 +20,7 @@ func TestIntegration(t *testing.T) {
NilObject: (*Object)(nil),
TiersToTest: []string{"Hot", "Cool"},
ChunkedUpload: fstests.ChunkedUploadConfig{
MaxChunkSize: maxChunkSize,
MinChunkSize: defaultChunkSize,
},
})
}
@@ -33,32 +33,24 @@ var (
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
)
// TestServicePrincipalFileSuccess checks that, given a proper JSON file, we can create a token.
func TestServicePrincipalFileSuccess(t *testing.T) {
ctx := context.TODO()
credentials := `
{
"appId": "my application (client) ID",
"password": "my secret",
"tenant": "my active directory tenant ID"
}
`
tokenRefresher, err := newServicePrincipalTokenRefresher(ctx, []byte(credentials))
if assert.NoError(t, err) {
assert.NotNil(t, tokenRefresher)
func TestValidateAccessTier(t *testing.T) {
tests := map[string]struct {
accessTier string
want bool
}{
"hot": {"hot", true},
"HOT": {"HOT", true},
"Hot": {"Hot", true},
"cool": {"cool", true},
"archive": {"archive", true},
"empty": {"", false},
"unknown": {"unknown", false},
}
for name, test := range tests {
t.Run(name, func(t *testing.T) {
got := validateAccessTier(test.accessTier)
assert.Equal(t, test.want, got)
})
}
}
// TestServicePrincipalFileFailure checks that, given a JSON file with a missing secret, it returns an error.
func TestServicePrincipalFileFailure(t *testing.T) {
ctx := context.TODO()
credentials := `
{
"appId": "my application (client) ID",
"tenant": "my active directory tenant ID"
}
`
_, err := newServicePrincipalTokenRefresher(ctx, []byte(credentials))
assert.Error(t, err)
assert.EqualError(t, err, "error creating service principal token: parameter 'secret' cannot be empty")
}

View File

@@ -1,6 +1,7 @@
// Build for azureblob for unsupported platforms to stop go complaining
// about "no buildable Go source files "
// +build plan9 solaris js !go1.14
//go:build plan9 || solaris || js || !go1.18
// +build plan9 solaris js !go1.18
package azureblob

View File

@@ -1,137 +0,0 @@
// +build !plan9,!solaris,!js,go1.14
package azureblob
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"github.com/Azure/go-autorest/autorest/adal"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/fshttp"
)
const (
azureResource = "https://storage.azure.com"
imdsAPIVersion = "2018-02-01"
msiEndpointDefault = "http://169.254.169.254/metadata/identity/oauth2/token"
)
// This custom type is used to add the port the test server has bound to
// to the request context.
type testPortKey string
type msiIdentifierType int
const (
msiClientID msiIdentifierType = iota
msiObjectID
msiResourceID
)
type userMSI struct {
Type msiIdentifierType
Value string
}
type httpError struct {
Response *http.Response
}
func (e httpError) Error() string {
return fmt.Sprintf("HTTP error %v (%v)", e.Response.StatusCode, e.Response.Status)
}
// GetMSIToken attempts to obtain an MSI token from the Azure Instance
// Metadata Service.
func GetMSIToken(ctx context.Context, identity *userMSI) (adal.Token, error) {
// Attempt to get an MSI token; silently continue if unsuccessful.
// This code has been lovingly stolen from azcopy's OAuthTokenManager.
result := adal.Token{}
req, err := http.NewRequestWithContext(ctx, "GET", msiEndpointDefault, nil)
if err != nil {
fs.Debugf(nil, "Failed to create request: %v", err)
return result, err
}
params := req.URL.Query()
params.Set("resource", azureResource)
params.Set("api-version", imdsAPIVersion)
// Specify user-assigned identity if requested.
if identity != nil {
switch identity.Type {
case msiClientID:
params.Set("client_id", identity.Value)
case msiObjectID:
params.Set("object_id", identity.Value)
case msiResourceID:
params.Set("mi_res_id", identity.Value)
default:
// If this happens, the calling function and this one don't agree on
// what valid ID types exist.
return result, fmt.Errorf("unknown MSI identity type specified")
}
}
req.URL.RawQuery = params.Encode()
// The Metadata header is required by all calls to IMDS.
req.Header.Set("Metadata", "true")
// If this function is run in a test, query the test server instead of IMDS.
testPort, isTest := ctx.Value(testPortKey("testPort")).(int)
if isTest {
req.URL.Host = fmt.Sprintf("localhost:%d", testPort)
req.Host = req.URL.Host
}
// Send request
httpClient := fshttp.NewClient(ctx)
resp, err := httpClient.Do(req)
if err != nil {
return result, errors.Wrap(err, "MSI is not enabled on this VM")
}
defer func() { // resp and Body should not be nil
_, err = io.Copy(ioutil.Discard, resp.Body)
if err != nil {
fs.Debugf(nil, "Unable to drain IMDS response: %v", err)
}
err = resp.Body.Close()
if err != nil {
fs.Debugf(nil, "Unable to close IMDS response: %v", err)
}
}()
// Check if the status code indicates success
// The request returns 200 currently, add 201 and 202 as well for possible extension.
switch resp.StatusCode {
case 200, 201, 202:
break
default:
body, _ := ioutil.ReadAll(resp.Body)
fs.Errorf(nil, "Couldn't obtain OAuth token from IMDS; server returned status code %d and body: %v", resp.StatusCode, string(body))
return result, httpError{Response: resp}
}
b, err := ioutil.ReadAll(resp.Body)
if err != nil {
return result, errors.Wrap(err, "Couldn't read IMDS response")
}
// Remove BOM, if any. azcopy does this so I'm following along.
b = bytes.TrimPrefix(b, []byte("\xef\xbb\xbf"))
// This would be a good place to persist the token if a large number of rclone
// invocations are being made in a short amount of time. If the token is
// persisted, the azureblob code will need to check for expiry before every
// storage API call.
err = json.Unmarshal(b, &result)
if err != nil {
return result, errors.Wrap(err, "Couldn't unmarshal IMDS response")
}
return result, nil
}

View File

@@ -1,117 +0,0 @@
// +build !plan9,!solaris,!js,go1.14
package azureblob
import (
"context"
"encoding/json"
"net/http"
"net/http/httptest"
"strconv"
"strings"
"testing"
"github.com/Azure/go-autorest/autorest/adal"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func handler(t *testing.T, actual *map[string]string) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
err := r.ParseForm()
require.NoError(t, err)
parameters := r.URL.Query()
(*actual)["path"] = r.URL.Path
(*actual)["Metadata"] = r.Header.Get("Metadata")
(*actual)["method"] = r.Method
for paramName := range parameters {
(*actual)[paramName] = parameters.Get(paramName)
}
// Make response.
response := adal.Token{}
responseBytes, err := json.Marshal(response)
require.NoError(t, err)
_, err = w.Write(responseBytes)
require.NoError(t, err)
}
}
func TestManagedIdentity(t *testing.T) {
// test user-assigned identity specifiers to use
testMSIClientID := "d859b29f-5c9c-42f8-a327-ec1bc6408d79"
testMSIObjectID := "9ffeb650-3ca0-4278-962b-5a38d520591a"
testMSIResourceID := "/subscriptions/fe714c49-b8a4-4d49-9388-96a20daa318f/resourceGroups/somerg/providers/Microsoft.ManagedIdentity/userAssignedIdentities/someidentity"
tests := []struct {
identity *userMSI
identityParameterName string
expectedAbsent []string
}{
{&userMSI{msiClientID, testMSIClientID}, "client_id", []string{"object_id", "mi_res_id"}},
{&userMSI{msiObjectID, testMSIObjectID}, "object_id", []string{"client_id", "mi_res_id"}},
{&userMSI{msiResourceID, testMSIResourceID}, "mi_res_id", []string{"object_id", "client_id"}},
{nil, "(default)", []string{"object_id", "client_id", "mi_res_id"}},
}
alwaysExpected := map[string]string{
"path": "/metadata/identity/oauth2/token",
"resource": "https://storage.azure.com",
"Metadata": "true",
"api-version": "2018-02-01",
"method": "GET",
}
for _, test := range tests {
actual := make(map[string]string, 10)
testServer := httptest.NewServer(handler(t, &actual))
defer testServer.Close()
testServerPort, err := strconv.Atoi(strings.Split(testServer.URL, ":")[2])
require.NoError(t, err)
ctx := context.WithValue(context.TODO(), testPortKey("testPort"), testServerPort)
_, err = GetMSIToken(ctx, test.identity)
require.NoError(t, err)
// Validate expected query parameters present
expected := make(map[string]string)
for k, v := range alwaysExpected {
expected[k] = v
}
if test.identity != nil {
expected[test.identityParameterName] = test.identity.Value
}
for key := range expected {
value, exists := actual[key]
if assert.Truef(t, exists, "test of %s: query parameter %s was not passed",
test.identityParameterName, key) {
assert.Equalf(t, expected[key], value,
"test of %s: parameter %s has incorrect value", test.identityParameterName, key)
}
}
// Validate unexpected query parameters absent
for _, key := range test.expectedAbsent {
_, exists := actual[key]
assert.Falsef(t, exists, "query parameter %s was unexpectedly passed")
}
}
}
func errorHandler(resultCode int) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
http.Error(w, "Test error generated", resultCode)
}
}
func TestIMDSErrors(t *testing.T) {
errorCodes := []int{404, 429, 500}
for _, code := range errorCodes {
testServer := httptest.NewServer(errorHandler(code))
defer testServer.Close()
testServerPort, err := strconv.Atoi(strings.Split(testServer.URL, ":")[2])
require.NoError(t, err)
ctx := context.WithValue(context.TODO(), testPortKey("testPort"), testServerPort)
_, err = GetMSIToken(ctx, nil)
require.Error(t, err)
httpErr, ok := err.(httpError)
require.Truef(t, ok, "HTTP error %d did not result in an httpError object", code)
assert.Equalf(t, httpErr.Response.StatusCode, code, "desired error %d but didn't get it", code)
}
}

View File

@@ -1,3 +1,4 @@
// Package api provides types used by the Backblaze B2 API.
package api
import (
@@ -238,7 +239,7 @@ type GetFileInfoRequest struct {
// If the original source of the file being uploaded has a last
// modified time concept, Backblaze recommends using
// src_last_modified_millis as the name, and a string holding the base
// 10 number number of milliseconds since midnight, January 1, 1970
// 10 number of milliseconds since midnight, January 1, 1970
// UTC. This fits in a 64 bit integer such as the type "long" in the
// programming language Java. It is intended to be compatible with
// Java's time long. For example, it can be passed directly into the

View File

@@ -1,4 +1,4 @@
// Package b2 provides an interface to the Backblaze B2 object storage system
// Package b2 provides an interface to the Backblaze B2 object storage system.
package b2
// FIXME should we remove sha1 checks from here as rclone now supports
@@ -9,6 +9,7 @@ import (
"bytes"
"context"
"crypto/sha1"
"errors"
"fmt"
gohash "hash"
"io"
@@ -19,7 +20,6 @@ import (
"sync"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/backend/b2/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
@@ -54,17 +54,18 @@ const (
decayConstant = 1 // bigger for slower decay, exponential
maxParts = 10000
maxVersions = 100 // maximum number of versions we search in --b2-versions mode
minChunkSize = 5 * fs.MebiByte
defaultChunkSize = 96 * fs.MebiByte
defaultUploadCutoff = 200 * fs.MebiByte
largeFileCopyCutoff = 4 * fs.GibiByte // 5E9 is the max
minChunkSize = 5 * fs.Mebi
defaultChunkSize = 96 * fs.Mebi
defaultUploadCutoff = 200 * fs.Mebi
largeFileCopyCutoff = 4 * fs.Gibi // 5E9 is the max
memoryPoolFlushTime = fs.Duration(time.Minute) // flush the cached buffers after this long
memoryPoolUseMmap = false
)
// Globals
var (
errNotWithVersions = errors.New("can't modify or delete files in --b2-versions mode")
errNotWithVersions = errors.New("can't modify or delete files in --b2-versions mode")
errNotWithVersionAt = errors.New("can't modify or delete files in --b2-version-at mode")
)
// Register with Fs
@@ -75,15 +76,15 @@ func init() {
NewFs: NewFs,
Options: []fs.Option{{
Name: "account",
Help: "Account ID or Application Key ID",
Help: "Account ID or Application Key ID.",
Required: true,
}, {
Name: "key",
Help: "Application Key",
Help: "Application Key.",
Required: true,
}, {
Name: "endpoint",
Help: "Endpoint for the service.\nLeave blank normally.",
Help: "Endpoint for the service.\n\nLeave blank normally.",
Advanced: true,
}, {
Name: "test_mode",
@@ -103,9 +104,14 @@ in the [b2 integrations checklist](https://www.backblaze.com/b2/docs/integration
Advanced: true,
}, {
Name: "versions",
Help: "Include old versions in directory listings.\nNote that when using this no file write operations are permitted,\nso you can't upload files or delete them.",
Help: "Include old versions in directory listings.\n\nNote that when using this no file write operations are permitted,\nso you can't upload files or delete them.",
Default: false,
Advanced: true,
}, {
Name: "version_at",
Help: "Show file versions as they were at the specified time.\n\nNote that when using this no file write operations are permitted,\nso you can't upload files or delete them.",
Default: fs.Time{},
Advanced: true,
}, {
Name: "hard_delete",
Help: "Permanently delete files on remote removal, otherwise hide files.",
@@ -116,32 +122,34 @@ in the [b2 integrations checklist](https://www.backblaze.com/b2/docs/integration
Files above this size will be uploaded in chunks of "--b2-chunk-size".
This value should be set no larger than 4.657GiB (== 5GB).`,
This value should be set no larger than 4.657 GiB (== 5 GB).`,
Default: defaultUploadCutoff,
Advanced: true,
}, {
Name: "copy_cutoff",
Help: `Cutoff for switching to multipart copy
Help: `Cutoff for switching to multipart copy.
Any files larger than this that need to be server-side copied will be
copied in chunks of this size.
The minimum is 0 and the maximum is 4.6GB.`,
The minimum is 0 and the maximum is 4.6 GiB.`,
Default: largeFileCopyCutoff,
Advanced: true,
}, {
Name: "chunk_size",
Help: `Upload chunk size. Must fit in memory.
Help: `Upload chunk size.
When uploading large files, chunk the file into this size. Note that
these chunks are buffered in memory and there might a maximum of
"--transfers" chunks in progress at once. 5,000,000 Bytes is the
minimum size.`,
When uploading large files, chunk the file into this size.
Must fit in memory. These chunks are buffered in memory and there
might a maximum of "--transfers" chunks in progress at once.
5,000,000 Bytes is the minimum size.`,
Default: defaultChunkSize,
Advanced: true,
}, {
Name: "disable_checksum",
Help: `Disable checksums for large (> upload cutoff) files
Help: `Disable checksums for large (> upload cutoff) files.
Normally rclone will calculate the SHA1 checksum of the input before
uploading it so it can add it to metadata on the object. This is great
@@ -158,7 +166,15 @@ free egress for data downloaded through the Cloudflare network.
Rclone works with private buckets by sending an "Authorization" header.
If the custom endpoint rewrites the requests for authentication,
e.g., in Cloudflare Workers, this header needs to be handled properly.
Leave blank if you want to use the endpoint provided by Backblaze.`,
Leave blank if you want to use the endpoint provided by Backblaze.
The URL provided here SHOULD have the protocol and SHOULD NOT have
a trailing slash or specify the /file/bucket subpath as rclone will
request files with "{download_url}/file/{bucket_name}/{path}".
Example:
> https://mysubdomain.mydomain.tld
(No trailing "/", "file" or "bucket")`,
Advanced: true,
}, {
Name: "download_auth_duration",
@@ -201,6 +217,7 @@ type Options struct {
Endpoint string `config:"endpoint"`
TestMode string `config:"test_mode"`
Versions bool `config:"versions"`
VersionAt fs.Time `config:"version_at"`
HardDelete bool `config:"hard_delete"`
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
CopyCutoff fs.SizeSuffix `config:"copy_cutoff"`
@@ -263,7 +280,7 @@ func (f *Fs) Root() string {
// String converts this Fs to a string
func (f *Fs) String() string {
if f.rootBucket == "" {
return fmt.Sprintf("B2 root")
return "B2 root"
}
if f.rootDirectory == "" {
return fmt.Sprintf("B2 bucket %s", f.rootBucket)
@@ -364,7 +381,7 @@ func errorHandler(resp *http.Response) error {
func checkUploadChunkSize(cs fs.SizeSuffix) error {
if cs < minChunkSize {
return errors.Errorf("%s is less than %s", cs, minChunkSize)
return fmt.Errorf("%s is less than %s", cs, minChunkSize)
}
return nil
}
@@ -379,7 +396,7 @@ func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error)
func checkUploadCutoff(opt *Options, cs fs.SizeSuffix) error {
if cs < opt.ChunkSize {
return errors.Errorf("%v is less than chunk size %v", cs, opt.ChunkSize)
return fmt.Errorf("%v is less than chunk size %v", cs, opt.ChunkSize)
}
return nil
}
@@ -412,11 +429,11 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
}
err = checkUploadCutoff(opt, opt.UploadCutoff)
if err != nil {
return nil, errors.Wrap(err, "b2: upload cutoff")
return nil, fmt.Errorf("b2: upload cutoff: %w", err)
}
err = checkUploadChunkSize(opt.ChunkSize)
if err != nil {
return nil, errors.Wrap(err, "b2: chunk size")
return nil, fmt.Errorf("b2: chunk size: %w", err)
}
if opt.Account == "" {
return nil, errors.New("account not found")
@@ -461,7 +478,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
}
err = f.authorizeAccount(ctx)
if err != nil {
return nil, errors.Wrap(err, "failed to authorize account")
return nil, fmt.Errorf("failed to authorize account: %w", err)
}
// If this is a key limited to a single bucket, it must exist already
if f.rootBucket != "" && f.info.Allowed.BucketID != "" {
@@ -470,7 +487,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
return nil, errors.New("bucket that application key is restricted to no longer exists")
}
if allowedBucket != f.rootBucket {
return nil, errors.Errorf("you must use bucket %q with this application key", allowedBucket)
return nil, fmt.Errorf("you must use bucket %q with this application key", allowedBucket)
}
f.cache.MarkOK(f.rootBucket)
f.setBucketID(f.rootBucket, f.info.Allowed.BucketID)
@@ -510,7 +527,7 @@ func (f *Fs) authorizeAccount(ctx context.Context) error {
return f.shouldRetryNoReauth(ctx, resp, err)
})
if err != nil {
return errors.Wrap(err, "failed to authenticate")
return fmt.Errorf("failed to authenticate: %w", err)
}
f.srv.SetRoot(f.info.APIURL+"/b2api/v1").SetHeader("Authorization", f.info.AuthorizationToken)
return nil
@@ -556,7 +573,7 @@ func (f *Fs) getUploadURL(ctx context.Context, bucket string) (upload *api.GetUp
return f.shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "failed to get upload URL")
return nil, fmt.Errorf("failed to get upload URL: %w", err)
}
return upload, nil
}
@@ -639,15 +656,15 @@ var errEndList = errors.New("end list")
//
// (bucket, directory) is the starting directory
//
// If prefix is set then it is removed from all file names
// If prefix is set then it is removed from all file names.
//
// If addBucket is set then it adds the bucket to the start of the
// remotes generated
// remotes generated.
//
// If recurse is set the function will recursively list
// If recurse is set the function will recursively list.
//
// If limit is > 0 then it limits to that many files (must be less
// than 1000)
// than 1000).
//
// If hidden is set then it will list the hidden (deleted) files too.
//
@@ -686,9 +703,12 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
Method: "POST",
Path: "/b2_list_file_names",
}
if hidden {
if hidden || f.opt.VersionAt.IsSet() {
opts.Path = "/b2_list_file_versions"
}
lastFileName := ""
for {
var response api.ListFileNamesResponse
err := f.pacer.Call(func() (bool, error) {
@@ -718,7 +738,21 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
if addBucket {
remote = path.Join(bucket, remote)
}
if f.opt.VersionAt.IsSet() {
if time.Time(file.UploadTimestamp).After(time.Time(f.opt.VersionAt)) {
// Ignore versions that were created after the specified time
continue
}
if file.Name == lastFileName {
// Ignore versions before the already returned version
continue
}
}
// Send object
lastFileName = file.Name
err = fn(remote, file, isDirectory)
if err != nil {
if err == errEndList {
@@ -991,7 +1025,7 @@ func (f *Fs) clearBucketID(bucket string) {
// Put the object into the bucket
//
// Copy the reader in to the new object which is returned
// Copy the reader in to the new object which is returned.
//
// The new object may have been created if an error is returned
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
@@ -1046,7 +1080,7 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) error {
}
}
}
return errors.Wrap(err, "failed to create bucket")
return fmt.Errorf("failed to create bucket: %w", err)
}
f.setBucketID(bucket, response.ID)
f.setBucketType(bucket, response.Type)
@@ -1081,7 +1115,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
return f.shouldRetry(ctx, resp, err)
})
if err != nil {
return errors.Wrap(err, "failed to delete bucket")
return fmt.Errorf("failed to delete bucket: %w", err)
}
f.clearBucketID(bucket)
f.clearBucketType(bucket)
@@ -1122,7 +1156,7 @@ func (f *Fs) hide(ctx context.Context, bucket, bucketPath string) error {
return nil
}
}
return errors.Wrapf(err, "failed to hide %q", bucketPath)
return fmt.Errorf("failed to hide %q: %w", bucketPath, err)
}
return nil
}
@@ -1143,7 +1177,7 @@ func (f *Fs) deleteByID(ctx context.Context, ID, Name string) error {
return f.shouldRetry(ctx, resp, err)
})
if err != nil {
return errors.Wrapf(err, "failed to delete %q", Name)
return fmt.Errorf("failed to delete %q: %w", Name, err)
}
return nil
}
@@ -1171,10 +1205,7 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool) error {
}
}
var isUnfinishedUploadStale = func(timestamp api.Timestamp) bool {
if time.Since(time.Time(timestamp)).Hours() > 24 {
return true
}
return false
return time.Since(time.Time(timestamp)).Hours() > 24
}
// Delete Config.Transfers in parallel
@@ -1190,7 +1221,7 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool) error {
fs.Errorf(object.Name, "Can't create object %v", err)
continue
}
tr := accounting.Stats(ctx).NewCheckingTransfer(oi)
tr := accounting.Stats(ctx).NewCheckingTransfer(oi, "deleting")
err = f.deleteByID(ctx, object.ID, object.Name)
checkErr(err)
tr.Done(ctx, err)
@@ -1204,7 +1235,7 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool) error {
if err != nil {
fs.Errorf(object, "Can't create object %+v", err)
}
tr := accounting.Stats(ctx).NewCheckingTransfer(oi)
tr := accounting.Stats(ctx).NewCheckingTransfer(oi, "checking")
if oldOnly && last != remote {
// Check current version of the file
if object.Action == "hide" {
@@ -1303,9 +1334,9 @@ func (f *Fs) copy(ctx context.Context, dstObj *Object, srcObj *Object, newInfo *
// Copy src to this remote using server-side copy operations.
//
// This is stored with the remote path given
// This is stored with the remote path given.
//
// It returns the destination Object and a possible error
// It returns the destination Object and a possible error.
//
// Will only be called if src.Fs().Name() == f.Name()
//
@@ -1362,7 +1393,7 @@ func (f *Fs) getDownloadAuthorization(ctx context.Context, bucket, remote string
return f.shouldRetry(ctx, resp, err)
})
if err != nil {
return "", errors.Wrap(err, "failed to get download authorization")
return "", fmt.Errorf("failed to get download authorization: %w", err)
}
return response.AuthorizationToken, nil
}
@@ -1447,26 +1478,23 @@ func (o *Object) Size() int64 {
// Clean the SHA1
//
// Make sure it is lower case
// Make sure it is lower case.
//
// Remove unverified prefix - see https://www.backblaze.com/b2/docs/uploading.html
// Some tools (e.g. Cyberduck) use this
func cleanSHA1(sha1 string) (out string) {
out = strings.ToLower(sha1)
func cleanSHA1(sha1 string) string {
const unverified = "unverified:"
if strings.HasPrefix(out, unverified) {
out = out[len(unverified):]
}
return out
return strings.TrimPrefix(strings.ToLower(sha1), unverified)
}
// decodeMetaDataRaw sets the metadata from the data passed in
//
// Sets
// o.id
// o.modTime
// o.size
// o.sha1
//
// o.id
// o.modTime
// o.size
// o.sha1
func (o *Object) decodeMetaDataRaw(ID, SHA1 string, Size int64, UploadTimestamp api.Timestamp, Info map[string]string, mimeType string) (err error) {
o.id = ID
o.sha1 = SHA1
@@ -1485,10 +1513,11 @@ func (o *Object) decodeMetaDataRaw(ID, SHA1 string, Size int64, UploadTimestamp
// decodeMetaData sets the metadata in the object from an api.File
//
// Sets
// o.id
// o.modTime
// o.size
// o.sha1
//
// o.id
// o.modTime
// o.size
// o.sha1
func (o *Object) decodeMetaData(info *api.File) (err error) {
return o.decodeMetaDataRaw(info.ID, info.SHA1, info.Size, info.UploadTimestamp, info.Info, info.ContentType)
}
@@ -1496,10 +1525,11 @@ func (o *Object) decodeMetaData(info *api.File) (err error) {
// decodeMetaDataFileInfo sets the metadata in the object from an api.FileInfo
//
// Sets
// o.id
// o.modTime
// o.size
// o.sha1
//
// o.id
// o.modTime
// o.size
// o.sha1
func (o *Object) decodeMetaDataFileInfo(info *api.FileInfo) (err error) {
return o.decodeMetaDataRaw(info.ID, info.SHA1, info.Size, info.UploadTimestamp, info.Info, info.ContentType)
}
@@ -1557,10 +1587,11 @@ func (o *Object) getMetaData(ctx context.Context) (info *api.File, err error) {
// readMetaData gets the metadata if it hasn't already been fetched
//
// Sets
// o.id
// o.modTime
// o.size
// o.sha1
//
// o.id
// o.modTime
// o.size
// o.sha1
func (o *Object) readMetaData(ctx context.Context) (err error) {
if o.id != "" {
return nil
@@ -1667,14 +1698,14 @@ func (file *openFile) Close() (err error) {
// Check to see we read the correct number of bytes
if file.o.Size() != file.bytes {
return errors.Errorf("object corrupted on transfer - length mismatch (want %d got %d)", file.o.Size(), file.bytes)
return fmt.Errorf("object corrupted on transfer - length mismatch (want %d got %d)", file.o.Size(), file.bytes)
}
// Check the SHA1
receivedSHA1 := file.o.sha1
calculatedSHA1 := fmt.Sprintf("%x", file.hash.Sum(nil))
if receivedSHA1 != "" && receivedSHA1 != calculatedSHA1 {
return errors.Errorf("object corrupted on transfer - SHA1 mismatch (want %q got %q)", receivedSHA1, calculatedSHA1)
return fmt.Errorf("object corrupted on transfer - SHA1 mismatch (want %q got %q)", receivedSHA1, calculatedSHA1)
}
return nil
@@ -1714,7 +1745,7 @@ func (o *Object) getOrHead(ctx context.Context, method string, options []fs.Open
if resp != nil && (resp.StatusCode == http.StatusNotFound || resp.StatusCode == http.StatusBadRequest) {
return nil, nil, fs.ErrorObjectNotFound
}
return nil, nil, errors.Wrapf(err, "failed to %s for download", method)
return nil, nil, fmt.Errorf("failed to %s for download: %w", method, err)
}
// NB resp may be Open here - don't return err != nil without closing
@@ -1818,6 +1849,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
if o.fs.opt.Versions {
return errNotWithVersions
}
if o.fs.opt.VersionAt.IsSet() {
return errNotWithVersionAt
}
size := src.Size()
bucket, bucketPath := o.split()
@@ -1973,6 +2007,9 @@ func (o *Object) Remove(ctx context.Context) error {
if o.fs.opt.Versions {
return errNotWithVersions
}
if o.fs.opt.VersionAt.IsSet() {
return errNotWithVersionAt
}
if o.fs.opt.HardDelete {
return o.fs.deleteByID(ctx, o.id, bucketPath)
}

View File

@@ -14,13 +14,15 @@ import (
"io"
"strings"
"sync"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/backend/b2/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/chunksize"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/atexit"
"github.com/rclone/rclone/lib/pool"
"github.com/rclone/rclone/lib/rest"
"golang.org/x/sync/errgroup"
)
@@ -89,21 +91,19 @@ type largeUpload struct {
// newLargeUpload starts an upload of object o from in with metadata in src
//
// If newInfo is set then metadata from that will be used instead of reading it from src
func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs.ObjectInfo, chunkSize fs.SizeSuffix, doCopy bool, newInfo *api.File) (up *largeUpload, err error) {
remote := o.remote
func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs.ObjectInfo, defaultChunkSize fs.SizeSuffix, doCopy bool, newInfo *api.File) (up *largeUpload, err error) {
size := src.Size()
parts := int64(0)
sha1SliceSize := int64(maxParts)
chunkSize := defaultChunkSize
if size == -1 {
fs.Debugf(o, "Streaming upload with --b2-chunk-size %s allows uploads of up to %s and will fail only when that limit is reached.", f.opt.ChunkSize, maxParts*f.opt.ChunkSize)
} else {
chunkSize = chunksize.Calculator(o, size, maxParts, defaultChunkSize)
parts = size / int64(chunkSize)
if size%int64(chunkSize) != 0 {
parts++
}
if parts > maxParts {
return nil, errors.Errorf("%q too big (%d bytes) makes too many parts %d > %d - increase --b2-chunk-size", remote, size, parts, maxParts)
}
sha1SliceSize = parts
}
@@ -185,7 +185,7 @@ func (up *largeUpload) getUploadURL(ctx context.Context) (upload *api.GetUploadP
return up.f.shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "failed to get upload URL")
return nil, fmt.Errorf("failed to get upload URL: %w", err)
}
} else {
upload, up.uploads = up.uploads[0], up.uploads[1:]
@@ -230,14 +230,14 @@ func (up *largeUpload) transferChunk(ctx context.Context, part int64, body []byt
//
// The number of bytes in the file being uploaded. Note that
// this header is required; you cannot leave it out and just
// use chunked encoding. The minimum size of every part but
// the last one is 100MB.
// use chunked encoding. The minimum size of every part but
// the last one is 100 MB (100,000,000 bytes)
//
// X-Bz-Content-Sha1
//
// The SHA1 checksum of the this part of the file. B2 will
// check this when the part is uploaded, to make sure that the
// data arrived correctly. The same SHA1 checksum must be
// data arrived correctly. The same SHA1 checksum must be
// passed to b2_finish_large_file.
opts := rest.Opts{
Method: "POST",
@@ -406,7 +406,7 @@ func (up *largeUpload) Stream(ctx context.Context, initialUploadBlock []byte) (e
up.size += int64(n)
if part > maxParts {
up.f.putBuf(buf, false)
return errors.Errorf("%q too big (%d bytes so far) makes too many parts %d > %d - increase --b2-chunk-size", up.o, up.size, up.parts, maxParts)
return fmt.Errorf("%q too big (%d bytes so far) makes too many parts %d > %d - increase --b2-chunk-size", up.o, up.size, up.parts, maxParts)
}
part := part // for the closure
@@ -430,18 +430,47 @@ func (up *largeUpload) Upload(ctx context.Context) (err error) {
defer atexit.OnError(&err, func() { _ = up.cancel(ctx) })()
fs.Debugf(up.o, "Starting %s of large file in %d chunks (id %q)", up.what, up.parts, up.id)
var (
g, gCtx = errgroup.WithContext(ctx)
remaining = up.size
g, gCtx = errgroup.WithContext(ctx)
remaining = up.size
uploadPool *pool.Pool
ci = fs.GetConfig(ctx)
)
// If using large chunk size then make a temporary pool
if up.chunkSize <= int64(up.f.opt.ChunkSize) {
uploadPool = up.f.pool
} else {
uploadPool = pool.New(
time.Duration(up.f.opt.MemoryPoolFlushTime),
int(up.chunkSize),
ci.Transfers,
up.f.opt.MemoryPoolUseMmap,
)
defer uploadPool.Flush()
}
// Get an upload token and a buffer
getBuf := func() (buf []byte) {
up.f.getBuf(true)
if !up.doCopy {
buf = uploadPool.Get()
}
return buf
}
// Put an upload token and a buffer
putBuf := func(buf []byte) {
if !up.doCopy {
uploadPool.Put(buf)
}
up.f.putBuf(nil, true)
}
g.Go(func() error {
for part := int64(1); part <= up.parts; part++ {
// Get a block of memory from the pool and token which limits concurrency.
buf := up.f.getBuf(up.doCopy)
buf := getBuf()
// Fail fast, in case an errgroup managed function returns an error
// gCtx is cancelled. There is no point in uploading all the other parts.
if gCtx.Err() != nil {
up.f.putBuf(buf, up.doCopy)
putBuf(buf)
return nil
}
@@ -455,14 +484,14 @@ func (up *largeUpload) Upload(ctx context.Context) (err error) {
buf = buf[:reqSize]
_, err = io.ReadFull(up.in, buf)
if err != nil {
up.f.putBuf(buf, up.doCopy)
putBuf(buf)
return err
}
}
part := part // for the closure
g.Go(func() (err error) {
defer up.f.putBuf(buf, up.doCopy)
defer putBuf(buf)
if !up.doCopy {
err = up.transferChunk(gCtx, part, buf)
} else {

View File

@@ -14,7 +14,7 @@ const (
timeFormat = `"` + time.RFC3339 + `"`
)
// Time represents represents date and time information for the
// Time represents date and time information for the
// box API, by using RFC3339
type Time time.Time
@@ -61,7 +61,7 @@ func (e *Error) Error() string {
var _ error = (*Error)(nil)
// ItemFields are the fields needed for FileInfo
var ItemFields = "type,id,sequence_id,etag,sha1,name,size,created_at,modified_at,content_created_at,content_modified_at,item_status,shared_link"
var ItemFields = "type,id,sequence_id,etag,sha1,name,size,created_at,modified_at,content_created_at,content_modified_at,item_status,shared_link,owned_by"
// Types of things in Item
const (
@@ -90,6 +90,12 @@ type Item struct {
URL string `json:"url,omitempty"`
Access string `json:"access,omitempty"`
} `json:"shared_link"`
OwnedBy struct {
Type string `json:"type"`
ID string `json:"id"`
Name string `json:"name"`
Login string `json:"login"`
} `json:"owned_by"`
}
// ModTime returns the modification time of the item
@@ -103,10 +109,11 @@ func (i *Item) ModTime() (t time.Time) {
// FolderItems is returned from the GetFolderItems call
type FolderItems struct {
TotalCount int `json:"total_count"`
Entries []Item `json:"entries"`
Offset int `json:"offset"`
Limit int `json:"limit"`
TotalCount int `json:"total_count"`
Entries []Item `json:"entries"`
Offset int `json:"offset"`
Limit int `json:"limit"`
NextMarker *string `json:"next_marker,omitempty"`
Order []struct {
By string `json:"by"`
Direction string `json:"direction"`

View File

@@ -14,24 +14,19 @@ import (
"crypto/rsa"
"encoding/json"
"encoding/pem"
"errors"
"fmt"
"io"
"io/ioutil"
"log"
"net/http"
"net/url"
"os"
"path"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/env"
"github.com/rclone/rclone/lib/jwtutil"
"github.com/youmark/pkcs8"
"github.com/pkg/errors"
"github.com/rclone/rclone/backend/box/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
@@ -42,9 +37,13 @@ import (
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/dircache"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/env"
"github.com/rclone/rclone/lib/jwtutil"
"github.com/rclone/rclone/lib/oauthutil"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/rest"
"github.com/youmark/pkcs8"
"golang.org/x/oauth2"
"golang.org/x/oauth2/jws"
)
@@ -57,7 +56,6 @@ const (
decayConstant = 2 // bigger for slower decay, exponential
rootURL = "https://api.box.com/2.0"
uploadURL = "https://upload.box.com/api/2.0"
listChunks = 1000 // chunk size to read directory listings
minUploadCutoff = 50000000 // upload cutoff can be no lower than this
defaultUploadCutoff = 50 * 1024 * 1024
tokenURL = "https://api.box.com/oauth2/token"
@@ -84,7 +82,7 @@ func init() {
Name: "box",
Description: "Box",
NewFs: NewFs,
Config: func(ctx context.Context, name string, m configmap.Mapper) {
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
jsonFile, ok := m.Get("box_config_file")
boxSubType, boxSubTypeOk := m.Get("box_sub_type")
boxAccessToken, boxAccessTokenOk := m.Get("access_token")
@@ -93,15 +91,15 @@ func init() {
if ok && boxSubTypeOk && jsonFile != "" && boxSubType != "" {
err = refreshJWTToken(ctx, jsonFile, boxSubType, name, m)
if err != nil {
log.Fatalf("Failed to configure token with jwt authentication: %v", err)
return nil, fmt.Errorf("failed to configure token with jwt authentication: %w", err)
}
// Else, if not using an access token, use oauth2
} else if boxAccessToken == "" || !boxAccessTokenOk {
err = oauthutil.Config(ctx, "box", name, m, oauthConfig, nil)
if err != nil {
log.Fatalf("Failed to configure token with oauth authentication: %v", err)
}
return oauthutil.ConfigOut("", &oauthutil.Options{
OAuth2Config: oauthConfig,
})
}
return nil, nil
},
Options: append(oauthutil.SharedOptions, []fs.Option{{
Name: "root_folder_id",
@@ -110,23 +108,23 @@ func init() {
Advanced: true,
}, {
Name: "box_config_file",
Help: "Box App config.json location\nLeave blank normally." + env.ShellExpandHelp,
Help: "Box App config.json location\n\nLeave blank normally." + env.ShellExpandHelp,
}, {
Name: "access_token",
Help: "Box App Primary Access Token\nLeave blank normally.",
Help: "Box App Primary Access Token\n\nLeave blank normally.",
}, {
Name: "box_sub_type",
Default: "user",
Examples: []fs.OptionExample{{
Value: "user",
Help: "Rclone should act on behalf of a user",
Help: "Rclone should act on behalf of a user.",
}, {
Value: "enterprise",
Help: "Rclone should act on behalf of a service account",
Help: "Rclone should act on behalf of a service account.",
}},
}, {
Name: "upload_cutoff",
Help: "Cutoff for switching to multipart upload (>= 50MB).",
Help: "Cutoff for switching to multipart upload (>= 50 MiB).",
Default: fs.SizeSuffix(defaultUploadCutoff),
Advanced: true,
}, {
@@ -134,6 +132,16 @@ func init() {
Help: "Max number of times to try committing a multipart file.",
Default: 100,
Advanced: true,
}, {
Name: "list_chunk",
Default: 1000,
Help: "Size of listing chunk 1-1000.",
Advanced: true,
}, {
Name: "owned_by",
Default: "",
Help: "Only show items owned by the login (email address) passed in.",
Advanced: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
@@ -157,15 +165,15 @@ func refreshJWTToken(ctx context.Context, jsonFile string, boxSubType string, na
jsonFile = env.ShellExpand(jsonFile)
boxConfig, err := getBoxConfig(jsonFile)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
return fmt.Errorf("get box config: %w", err)
}
privateKey, err := getDecryptedPrivateKey(boxConfig)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
return fmt.Errorf("get decrypted private key: %w", err)
}
claims, err := getClaims(boxConfig, boxSubType)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
return fmt.Errorf("get claims: %w", err)
}
signingHeaders := getSigningHeaders(boxConfig)
queryParams := getQueryParams(boxConfig)
@@ -175,13 +183,13 @@ func refreshJWTToken(ctx context.Context, jsonFile string, boxSubType string, na
}
func getBoxConfig(configFile string) (boxConfig *api.ConfigJSON, err error) {
file, err := ioutil.ReadFile(configFile)
file, err := os.ReadFile(configFile)
if err != nil {
return nil, errors.Wrap(err, "box: failed to read Box config")
return nil, fmt.Errorf("box: failed to read Box config: %w", err)
}
err = json.Unmarshal(file, &boxConfig)
if err != nil {
return nil, errors.Wrap(err, "box: failed to parse Box config")
return nil, fmt.Errorf("box: failed to parse Box config: %w", err)
}
return boxConfig, nil
}
@@ -189,7 +197,7 @@ func getBoxConfig(configFile string) (boxConfig *api.ConfigJSON, err error) {
func getClaims(boxConfig *api.ConfigJSON, boxSubType string) (claims *jws.ClaimSet, err error) {
val, err := jwtutil.RandomHex(20)
if err != nil {
return nil, errors.Wrap(err, "box: failed to generate random string for jti")
return nil, fmt.Errorf("box: failed to generate random string for jti: %w", err)
}
claims = &jws.ClaimSet{
@@ -230,12 +238,12 @@ func getDecryptedPrivateKey(boxConfig *api.ConfigJSON) (key *rsa.PrivateKey, err
block, rest := pem.Decode([]byte(boxConfig.BoxAppSettings.AppAuth.PrivateKey))
if len(rest) > 0 {
return nil, errors.Wrap(err, "box: extra data included in private key")
return nil, fmt.Errorf("box: extra data included in private key: %w", err)
}
rsaKey, err := pkcs8.ParsePKCS8PrivateKey(block.Bytes, []byte(boxConfig.BoxAppSettings.AppAuth.Passphrase))
if err != nil {
return nil, errors.Wrap(err, "box: failed to decrypt private key")
return nil, fmt.Errorf("box: failed to decrypt private key: %w", err)
}
return rsaKey.(*rsa.PrivateKey), nil
@@ -248,6 +256,8 @@ type Options struct {
Enc encoder.MultiEncoder `config:"encoding"`
RootFolderID string `config:"root_folder_id"`
AccessToken string `config:"access_token"`
ListChunk int `config:"list_chunk"`
OwnedBy string `config:"owned_by"`
}
// Fs represents a remote box
@@ -256,7 +266,7 @@ type Fs struct {
root string // the path we are working on
opt Options // parsed options
features *fs.Features // optional features
srv *rest.Client // the connection to the one drive server
srv *rest.Client // the connection to the server
dirCache *dircache.DirCache // Map of directory path to directory id
pacer *fs.Pacer // pacer for API calls
tokenRenewer *oauthutil.Renew // renew the token on expiry
@@ -327,6 +337,13 @@ func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, err
authRetry = true
fs.Debugf(nil, "Should retry: %v", err)
}
// Box API errors which should be retries
if apiErr, ok := err.(*api.Error); ok && apiErr.Code == "operation_blocked_temporary" {
fs.Debugf(nil, "Retrying API error %v", err)
return true, err
}
return authRetry || fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
}
@@ -341,7 +358,7 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.It
return nil, err
}
found, err := f.listAll(ctx, directoryID, false, true, func(item *api.Item) bool {
found, err := f.listAll(ctx, directoryID, false, true, true, func(item *api.Item) bool {
if strings.EqualFold(item.Name, leaf) {
info = item
return true
@@ -384,7 +401,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
}
if opt.UploadCutoff < minUploadCutoff {
return nil, errors.Errorf("box: upload cutoff (%v) must be greater than equal to %v", opt.UploadCutoff, fs.SizeSuffix(minUploadCutoff))
return nil, fmt.Errorf("box: upload cutoff (%v) must be greater than equal to %v", opt.UploadCutoff, fs.SizeSuffix(minUploadCutoff))
}
root = parsePath(root)
@@ -395,7 +412,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
if opt.AccessToken == "" {
client, ts, err = oauthutil.NewClient(ctx, name, m, oauthConfig)
if err != nil {
return nil, errors.Wrap(err, "failed to configure Box")
return nil, fmt.Errorf("failed to configure Box: %w", err)
}
}
@@ -516,7 +533,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
// FindLeaf finds a directory of name leaf in the folder with ID pathID
func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) {
// Find the leaf in pathID
found, err = f.listAll(ctx, pathID, true, false, func(item *api.Item) bool {
found, err = f.listAll(ctx, pathID, true, false, true, func(item *api.Item) bool {
if strings.EqualFold(item.Name, leaf) {
pathIDOut = item.ID
return true
@@ -572,17 +589,20 @@ type listAllFn func(*api.Item) bool
// Lists the directory required calling the user function on each item found
//
// If the user fn ever returns true then it early exits with found = true
func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, filesOnly bool, activeOnly bool, fn listAllFn) (found bool, err error) {
opts := rest.Opts{
Method: "GET",
Path: "/folders/" + dirID + "/items",
Parameters: fieldsValue(),
}
opts.Parameters.Set("limit", strconv.Itoa(listChunks))
offset := 0
opts.Parameters.Set("limit", strconv.Itoa(f.opt.ListChunk))
opts.Parameters.Set("usemarker", "true")
var marker *string
OUTER:
for {
opts.Parameters.Set("offset", strconv.Itoa(offset))
if marker != nil {
opts.Parameters.Set("marker", *marker)
}
var result api.FolderItems
var resp *http.Response
@@ -591,7 +611,7 @@ OUTER:
return shouldRetry(ctx, resp, err)
})
if err != nil {
return found, errors.Wrap(err, "couldn't list files")
return found, fmt.Errorf("couldn't list files: %w", err)
}
for i := range result.Entries {
item := &result.Entries[i]
@@ -607,7 +627,10 @@ OUTER:
fs.Debugf(f, "Ignoring %q - unknown type %q", item.Name, item.Type)
continue
}
if item.ItemStatus != api.ItemStatusActive {
if activeOnly && item.ItemStatus != api.ItemStatusActive {
continue
}
if f.opt.OwnedBy != "" && f.opt.OwnedBy != item.OwnedBy.Login {
continue
}
item.Name = f.opt.Enc.ToStandardName(item.Name)
@@ -616,8 +639,8 @@ OUTER:
break OUTER
}
}
offset += result.Limit
if offset >= result.TotalCount {
marker = result.NextMarker
if marker == nil {
break
}
}
@@ -639,7 +662,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
return nil, err
}
var iErr error
_, err = f.listAll(ctx, directoryID, false, false, func(info *api.Item) bool {
_, err = f.listAll(ctx, directoryID, false, false, true, func(info *api.Item) bool {
remote := path.Join(dir, info.Name)
if info.Type == api.ItemTypeFolder {
// cache the directory ID for later lookups
@@ -669,7 +692,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// Creates from the parameters passed in a half finished Object which
// must have setMetaData called on it
//
// Returns the object, leaf, directoryID and error
// Returns the object, leaf, directoryID and error.
//
// Used to create new objects
func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time, size int64) (o *Object, leaf string, directoryID string, err error) {
@@ -715,21 +738,21 @@ func (f *Fs) preUploadCheck(ctx context.Context, leaf, directoryID string, size
var conflict api.PreUploadCheckConflict
err = json.Unmarshal(apiErr.ContextInfo, &conflict)
if err != nil {
return "", errors.Wrap(err, "pre-upload check: JSON decode failed")
return "", fmt.Errorf("pre-upload check: JSON decode failed: %w", err)
}
if conflict.Conflicts.Type != api.ItemTypeFile {
return "", errors.Wrap(err, "pre-upload check: can't overwrite non file with file")
return "", fmt.Errorf("pre-upload check: can't overwrite non file with file: %w", err)
}
return conflict.Conflicts.ID, nil
}
return "", errors.Wrap(err, "pre-upload check")
return "", fmt.Errorf("pre-upload check: %w", err)
}
return "", nil
}
// Put the object
//
// Copy the reader in to the new object which is returned
// Copy the reader in to the new object which is returned.
//
// The new object may have been created if an error is returned
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
@@ -769,9 +792,9 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
// PutUnchecked the object into the container
//
// This will produce an error if the object already exists
// This will produce an error if the object already exists.
//
// Copy the reader in to the new object which is returned
// Copy the reader in to the new object which is returned.
//
// The new object may have been created if an error is returned
func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
@@ -831,7 +854,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
return shouldRetry(ctx, resp, err)
})
if err != nil {
return errors.Wrap(err, "rmdir failed")
return fmt.Errorf("rmdir failed: %w", err)
}
f.dirCache.FlushDir(dir)
if err != nil {
@@ -854,9 +877,9 @@ func (f *Fs) Precision() time.Duration {
// Copy src to this remote using server-side copy operations.
//
// This is stored with the remote path given
// This is stored with the remote path given.
//
// It returns the destination Object and a possible error
// It returns the destination Object and a possible error.
//
// Will only be called if src.Fs().Name() == f.Name()
//
@@ -874,8 +897,8 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
srcPath := srcObj.fs.rootSlash() + srcObj.remote
dstPath := f.rootSlash() + remote
if strings.ToLower(srcPath) == strings.ToLower(dstPath) {
return nil, errors.Errorf("can't copy %q -> %q as are same name when lowercase", srcPath, dstPath)
if strings.EqualFold(srcPath, dstPath) {
return nil, fmt.Errorf("can't copy %q -> %q as are same name when lowercase", srcPath, dstPath)
}
// Create temporary object
@@ -959,7 +982,7 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "failed to read user info")
return nil, fmt.Errorf("failed to read user info: %w", err)
}
// FIXME max upload size would be useful to use in Update
usage = &fs.Usage{
@@ -972,9 +995,9 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
// Move src to this remote using server-side move operations.
//
// This is stored with the remote path given
// This is stored with the remote path given.
//
// It returns the destination Object and a possible error
// It returns the destination Object and a possible error.
//
// Will only be called if src.Fs().Name() == f.Name()
//
@@ -1093,45 +1116,36 @@ func (f *Fs) deletePermanently(ctx context.Context, itemType, id string) error {
// CleanUp empties the trash
func (f *Fs) CleanUp(ctx context.Context) (err error) {
opts := rest.Opts{
Method: "GET",
Path: "/folders/trash/items",
Parameters: url.Values{
"fields": []string{"type", "id"},
},
}
opts.Parameters.Set("limit", strconv.Itoa(listChunks))
offset := 0
for {
opts.Parameters.Set("offset", strconv.Itoa(offset))
var result api.FolderItems
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
return shouldRetry(ctx, resp, err)
})
if err != nil {
return errors.Wrap(err, "couldn't list trash")
}
for i := range result.Entries {
item := &result.Entries[i]
if item.Type == api.ItemTypeFolder || item.Type == api.ItemTypeFile {
var (
deleteErrors = int64(0)
concurrencyControl = make(chan struct{}, fs.GetConfig(ctx).Checkers)
wg sync.WaitGroup
)
_, err = f.listAll(ctx, "trash", false, false, false, func(item *api.Item) bool {
if item.Type == api.ItemTypeFolder || item.Type == api.ItemTypeFile {
wg.Add(1)
concurrencyControl <- struct{}{}
go func() {
defer func() {
<-concurrencyControl
wg.Done()
}()
err := f.deletePermanently(ctx, item.Type, item.ID)
if err != nil {
return errors.Wrap(err, "failed to delete file")
fs.Errorf(f, "failed to delete trash item %q (%q): %v", item.Name, item.ID, err)
atomic.AddInt64(&deleteErrors, 1)
}
} else {
fs.Debugf(f, "Ignoring %q - unknown type %q", item.Name, item.Type)
continue
}
}
offset += result.Limit
if offset >= result.TotalCount {
break
}()
} else {
fs.Debugf(f, "Ignoring %q - unknown type %q", item.Name, item.Type)
}
return false
})
wg.Wait()
if deleteErrors != 0 {
return fmt.Errorf("failed to delete %d trash items", deleteErrors)
}
return
return err
}
// DirCacheFlush resets the directory cache - used in testing as an
@@ -1185,8 +1199,11 @@ func (o *Object) Size() int64 {
// setMetaData sets the metadata from info
func (o *Object) setMetaData(info *api.Item) (err error) {
if info.Type == api.ItemTypeFolder {
return fs.ErrorIsDir
}
if info.Type != api.ItemTypeFile {
return errors.Wrapf(fs.ErrorNotAFile, "%q is %q", o.remote, info.Type)
return fmt.Errorf("%q is %q: %w", o.remote, info.Type, fs.ErrorNotAFile)
}
o.hasMetaData = true
o.size = int64(info.Size)
@@ -1218,7 +1235,6 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
// ModTime returns the modification time of the object
//
//
// It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers
func (o *Object) ModTime(ctx context.Context) time.Time {
@@ -1286,7 +1302,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
// upload does a single non-multipart upload
//
// This is recommended for less than 50 MB of content
// This is recommended for less than 50 MiB of content
func (o *Object) upload(ctx context.Context, in io.Reader, leaf, directoryID string, modTime time.Time, options ...fs.OpenOption) (err error) {
upload := api.UploadFile{
Name: o.fs.opt.Enc.FromStandardName(leaf),
@@ -1322,16 +1338,16 @@ func (o *Object) upload(ctx context.Context, in io.Reader, leaf, directoryID str
return err
}
if result.TotalCount != 1 || len(result.Entries) != 1 {
return errors.Errorf("failed to upload %v - not sure why", o)
return fmt.Errorf("failed to upload %v - not sure why", o)
}
return o.setMetaData(&result.Entries[0])
}
// Update the object with the contents of the io.Reader, modTime and size
//
// If existing is set then it updates the object rather than creating a new one
// If existing is set then it updates the object rather than creating a new one.
//
// The new object may have been created if an error is returned
// The new object may have been created if an error is returned.
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
if o.fs.tokenRenewer != nil {
o.fs.tokenRenewer.Start()

View File

@@ -8,6 +8,7 @@ import (
"crypto/sha1"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
@@ -15,7 +16,6 @@ import (
"sync"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/backend/box/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
@@ -140,7 +140,7 @@ outer:
}
}
default:
return nil, errors.Errorf("unknown HTTP status return %q (%d)", resp.Status, resp.StatusCode)
return nil, fmt.Errorf("unknown HTTP status return %q (%d)", resp.Status, resp.StatusCode)
}
}
fs.Debugf(o, "commit multipart upload failed %d/%d - trying again in %d seconds (%s)", tries+1, maxTries, delay, why)
@@ -151,7 +151,7 @@ outer:
}
err = json.Unmarshal(body, &result)
if err != nil {
return nil, errors.Wrapf(err, "couldn't decode commit response: %q", body)
return nil, fmt.Errorf("couldn't decode commit response: %q: %w", body, err)
}
return result, nil
}
@@ -177,7 +177,7 @@ func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, leaf, direct
// Create upload session
session, err := o.createUploadSession(ctx, leaf, directoryID, size)
if err != nil {
return errors.Wrap(err, "multipart upload create session failed")
return fmt.Errorf("multipart upload create session failed: %w", err)
}
chunkSize := session.PartSize
fs.Debugf(o, "Multipart upload session started for %d parts of size %v", session.TotalParts, fs.SizeSuffix(chunkSize))
@@ -222,7 +222,7 @@ outer:
// Read the chunk
_, err = io.ReadFull(in, buf)
if err != nil {
err = errors.Wrap(err, "multipart upload failed to read source")
err = fmt.Errorf("multipart upload failed to read source: %w", err)
break outer
}
@@ -238,7 +238,7 @@ outer:
fs.Debugf(o, "Uploading part %d/%d offset %v/%v part size %v", part+1, session.TotalParts, fs.SizeSuffix(position), fs.SizeSuffix(size), fs.SizeSuffix(chunkSize))
partResponse, err := o.uploadPart(ctx, session.ID, position, size, buf, wrap, options...)
if err != nil {
err = errors.Wrap(err, "multipart upload failed to upload part")
err = fmt.Errorf("multipart upload failed to upload part: %w", err)
select {
case errs <- err:
default:
@@ -266,11 +266,11 @@ outer:
// Finalise the upload session
result, err := o.commitUpload(ctx, session.ID, parts, modTime, hash.Sum(nil))
if err != nil {
return errors.Wrap(err, "multipart upload failed to finalize")
return fmt.Errorf("multipart upload failed to finalize: %w", err)
}
if result.TotalCount != 1 || len(result.Entries) != 1 {
return errors.Errorf("multipart upload failed %v - not sure why", o)
return fmt.Errorf("multipart upload failed %v - not sure why", o)
}
return o.setMetaData(&result.Entries[0])
}

111
backend/cache/cache.go vendored
View File

@@ -1,9 +1,12 @@
//go:build !plan9 && !js
// +build !plan9,!js
// Package cache implements a virtual provider to cache existing remotes.
package cache
import (
"context"
"errors"
"fmt"
"io"
"math"
@@ -18,7 +21,6 @@ import (
"syscall"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/backend/crypt"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/cache"
@@ -68,26 +70,26 @@ func init() {
CommandHelp: commandHelp,
Options: []fs.Option{{
Name: "remote",
Help: "Remote to cache.\nNormally should contain a ':' and a path, e.g. \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
Help: "Remote to cache.\n\nNormally should contain a ':' and a path, e.g. \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
Required: true,
}, {
Name: "plex_url",
Help: "The URL of the Plex server",
Help: "The URL of the Plex server.",
}, {
Name: "plex_username",
Help: "The username of the Plex user",
Help: "The username of the Plex user.",
}, {
Name: "plex_password",
Help: "The password of the Plex user",
Help: "The password of the Plex user.",
IsPassword: true,
}, {
Name: "plex_token",
Help: "The plex token for authentication - auto set normally",
Help: "The plex token for authentication - auto set normally.",
Hide: fs.OptionHideBoth,
Advanced: true,
}, {
Name: "plex_insecure",
Help: "Skip all certificate verification when connecting to the Plex server",
Help: "Skip all certificate verification when connecting to the Plex server.",
Advanced: true,
}, {
Name: "chunk_size",
@@ -98,14 +100,14 @@ changed, any downloaded chunks will be invalid and cache-chunk-path
will need to be cleared or unexpected EOF errors will occur.`,
Default: DefCacheChunkSize,
Examples: []fs.OptionExample{{
Value: "1m",
Help: "1MB",
Value: "1M",
Help: "1 MiB",
}, {
Value: "5M",
Help: "5 MB",
Help: "5 MiB",
}, {
Value: "10M",
Help: "10 MB",
Help: "10 MiB",
}},
}, {
Name: "info_age",
@@ -132,22 +134,22 @@ oldest chunks until it goes under this value.`,
Default: DefCacheTotalChunkSize,
Examples: []fs.OptionExample{{
Value: "500M",
Help: "500 MB",
Help: "500 MiB",
}, {
Value: "1G",
Help: "1 GB",
Help: "1 GiB",
}, {
Value: "10G",
Help: "10 GB",
Help: "10 GiB",
}},
}, {
Name: "db_path",
Default: filepath.Join(config.CacheDir, "cache-backend"),
Help: "Directory to store file structure metadata DB.\nThe remote name is used as the DB file name.",
Default: filepath.Join(config.GetCacheDir(), "cache-backend"),
Help: "Directory to store file structure metadata DB.\n\nThe remote name is used as the DB file name.",
Advanced: true,
}, {
Name: "chunk_path",
Default: filepath.Join(config.CacheDir, "cache-backend"),
Default: filepath.Join(config.GetCacheDir(), "cache-backend"),
Help: `Directory to cache chunk files.
Path to where partial file data (chunks) are stored locally. The remote
@@ -167,6 +169,7 @@ then "--cache-chunk-path" will use the same path as "--cache-db-path".`,
Name: "chunk_clean_interval",
Default: DefCacheChunkCleanInterval,
Help: `How often should the cache perform cleanups of the chunk storage.
The default value should be ok for most people. If you find that the
cache goes over "cache-chunk-total-size" too often then try to lower
this value to force it to perform cleanups more often.`,
@@ -220,7 +223,7 @@ available on the local machine.`,
}, {
Name: "rps",
Default: int(DefCacheRps),
Help: `Limits the number of requests per second to the source FS (-1 to disable)
Help: `Limits the number of requests per second to the source FS (-1 to disable).
This setting places a hard limit on the number of requests per second
that cache will be doing to the cloud provider remote and try to
@@ -241,7 +244,7 @@ still pass.`,
}, {
Name: "writes",
Default: DefCacheWrites,
Help: `Cache file data on writes through the FS
Help: `Cache file data on writes through the FS.
If you need to read files immediately after you upload them through
cache you can enable this flag to have their data stored in the
@@ -262,7 +265,7 @@ provider`,
}, {
Name: "tmp_wait_time",
Default: DefCacheTmpWaitTime,
Help: `How long should files be stored in local cache before being uploaded
Help: `How long should files be stored in local cache before being uploaded.
This is the duration that a file must wait in the temporary location
_cache-tmp-upload-path_ before it is selected for upload.
@@ -273,7 +276,7 @@ to start the upload if a queue formed for this purpose.`,
}, {
Name: "db_wait_time",
Default: DefCacheDbWaitTime,
Help: `How long to wait for the DB to be available - 0 is unlimited
Help: `How long to wait for the DB to be available - 0 is unlimited.
Only one process can have the DB open at any one time, so rclone waits
for this duration for the DB to become available before it gives an
@@ -339,8 +342,14 @@ func parseRootPath(path string) (string, error) {
return strings.Trim(path, "/"), nil
}
var warnDeprecated sync.Once
// NewFs constructs an Fs from the path, container:path
func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.Fs, error) {
warnDeprecated.Do(func() {
fs.Logf(nil, "WARNING: Cache backend is deprecated and may be removed in future. Please use VFS instead.")
})
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
@@ -348,7 +357,7 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
return nil, err
}
if opt.ChunkTotalSize < opt.ChunkSize*fs.SizeSuffix(opt.TotalWorkers) {
return nil, errors.Errorf("don't set cache-chunk-total-size(%v) less than cache-chunk-size(%v) * cache-workers(%v)",
return nil, fmt.Errorf("don't set cache-chunk-total-size(%v) less than cache-chunk-size(%v) * cache-workers(%v)",
opt.ChunkTotalSize, opt.ChunkSize, opt.TotalWorkers)
}
@@ -358,13 +367,13 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
rpath, err := parseRootPath(rootPath)
if err != nil {
return nil, errors.Wrapf(err, "failed to clean root path %q", rootPath)
return nil, fmt.Errorf("failed to clean root path %q: %w", rootPath, err)
}
remotePath := fspath.JoinRootPath(opt.Remote, rootPath)
wrappedFs, wrapErr := cache.Get(ctx, remotePath)
if wrapErr != nil && wrapErr != fs.ErrorIsFile {
return nil, errors.Wrapf(wrapErr, "failed to make remote %q to wrap", remotePath)
return nil, fmt.Errorf("failed to make remote %q to wrap: %w", remotePath, wrapErr)
}
var fsErr error
fs.Debugf(name, "wrapped %v:%v at root %v", wrappedFs.Name(), wrappedFs.Root(), rpath)
@@ -386,14 +395,18 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
notifiedRemotes: make(map[string]bool),
}
cache.PinUntilFinalized(f.Fs, f)
f.rateLimiter = rate.NewLimiter(rate.Limit(float64(opt.Rps)), opt.TotalWorkers)
rps := rate.Inf
if opt.Rps > 0 {
rps = rate.Limit(float64(opt.Rps))
}
f.rateLimiter = rate.NewLimiter(rps, opt.TotalWorkers)
f.plexConnector = &plexConnector{}
if opt.PlexURL != "" {
if opt.PlexToken != "" {
f.plexConnector, err = newPlexConnectorWithToken(f, opt.PlexURL, opt.PlexToken, opt.PlexInsecure)
if err != nil {
return nil, errors.Wrapf(err, "failed to connect to the Plex API %v", opt.PlexURL)
return nil, fmt.Errorf("failed to connect to the Plex API %v: %w", opt.PlexURL, err)
}
} else {
if opt.PlexPassword != "" && opt.PlexUsername != "" {
@@ -405,7 +418,7 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
m.Set("plex_token", token)
})
if err != nil {
return nil, errors.Wrapf(err, "failed to connect to the Plex API %v", opt.PlexURL)
return nil, fmt.Errorf("failed to connect to the Plex API %v: %w", opt.PlexURL, err)
}
}
}
@@ -414,8 +427,8 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
dbPath := f.opt.DbPath
chunkPath := f.opt.ChunkPath
// if the dbPath is non default but the chunk path is default, we overwrite the last to follow the same one as dbPath
if dbPath != filepath.Join(config.CacheDir, "cache-backend") &&
chunkPath == filepath.Join(config.CacheDir, "cache-backend") {
if dbPath != filepath.Join(config.GetCacheDir(), "cache-backend") &&
chunkPath == filepath.Join(config.GetCacheDir(), "cache-backend") {
chunkPath = dbPath
}
if filepath.Ext(dbPath) != "" {
@@ -426,11 +439,11 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
}
err = os.MkdirAll(dbPath, os.ModePerm)
if err != nil {
return nil, errors.Wrapf(err, "failed to create cache directory %v", dbPath)
return nil, fmt.Errorf("failed to create cache directory %v: %w", dbPath, err)
}
err = os.MkdirAll(chunkPath, os.ModePerm)
if err != nil {
return nil, errors.Wrapf(err, "failed to create cache directory %v", chunkPath)
return nil, fmt.Errorf("failed to create cache directory %v: %w", chunkPath, err)
}
dbPath = filepath.Join(dbPath, name+".db")
@@ -442,7 +455,7 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
DbWaitTime: time.Duration(opt.DbWaitTime),
})
if err != nil {
return nil, errors.Wrapf(err, "failed to start cache db")
return nil, fmt.Errorf("failed to start cache db: %w", err)
}
// Trap SIGINT and SIGTERM to close the DB handle gracefully
c := make(chan os.Signal, 1)
@@ -476,12 +489,12 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
if f.opt.TempWritePath != "" {
err = os.MkdirAll(f.opt.TempWritePath, os.ModePerm)
if err != nil {
return nil, errors.Wrapf(err, "failed to create cache directory %v", f.opt.TempWritePath)
return nil, fmt.Errorf("failed to create cache directory %v: %w", f.opt.TempWritePath, err)
}
f.opt.TempWritePath = filepath.ToSlash(f.opt.TempWritePath)
f.tempFs, err = cache.Get(ctx, f.opt.TempWritePath)
if err != nil {
return nil, errors.Wrapf(err, "failed to create temp fs: %v", err)
return nil, fmt.Errorf("failed to create temp fs: %w", err)
}
fs.Infof(name, "Upload Temp Rest Time: %v", f.opt.TempWaitTime)
fs.Infof(name, "Upload Temp FS: %v", f.opt.TempWritePath)
@@ -598,7 +611,7 @@ func (f *Fs) httpStats(ctx context.Context, in rc.Params) (out rc.Params, err er
out = make(rc.Params)
m, err := f.Stats()
if err != nil {
return out, errors.Errorf("error while getting cache stats")
return out, fmt.Errorf("error while getting cache stats")
}
out["status"] = "ok"
out["stats"] = m
@@ -625,7 +638,7 @@ func (f *Fs) httpExpireRemote(ctx context.Context, in rc.Params) (out rc.Params,
out = make(rc.Params)
remoteInt, ok := in["remote"]
if !ok {
return out, errors.Errorf("remote is needed")
return out, fmt.Errorf("remote is needed")
}
remote := remoteInt.(string)
withData := false
@@ -636,7 +649,7 @@ func (f *Fs) httpExpireRemote(ctx context.Context, in rc.Params) (out rc.Params,
remote = f.unwrapRemote(remote)
if !f.cache.HasEntry(path.Join(f.Root(), remote)) {
return out, errors.Errorf("%s doesn't exist in cache", remote)
return out, fmt.Errorf("%s doesn't exist in cache", remote)
}
co := NewObject(f, remote)
@@ -645,7 +658,7 @@ func (f *Fs) httpExpireRemote(ctx context.Context, in rc.Params) (out rc.Params,
cd := NewDirectory(f, remote)
err := f.cache.ExpireDir(cd)
if err != nil {
return out, errors.WithMessage(err, "error expiring directory")
return out, fmt.Errorf("error expiring directory: %w", err)
}
// notify vfs too
f.notifyChangeUpstream(cd.Remote(), fs.EntryDirectory)
@@ -656,7 +669,7 @@ func (f *Fs) httpExpireRemote(ctx context.Context, in rc.Params) (out rc.Params,
// expire the entry
err = f.cache.ExpireObject(co, withData)
if err != nil {
return out, errors.WithMessage(err, "error expiring file")
return out, fmt.Errorf("error expiring file: %w", err)
}
// notify vfs too
f.notifyChangeUpstream(co.Remote(), fs.EntryObject)
@@ -677,24 +690,24 @@ func (f *Fs) rcFetch(ctx context.Context, in rc.Params) (rc.Params, error) {
case 1:
start, err = strconv.ParseInt(ints[0], 10, 64)
if err != nil {
return nil, errors.Errorf("invalid range: %q", part)
return nil, fmt.Errorf("invalid range: %q", part)
}
end = start + 1
case 2:
if ints[0] != "" {
start, err = strconv.ParseInt(ints[0], 10, 64)
if err != nil {
return nil, errors.Errorf("invalid range: %q", part)
return nil, fmt.Errorf("invalid range: %q", part)
}
}
if ints[1] != "" {
end, err = strconv.ParseInt(ints[1], 10, 64)
if err != nil {
return nil, errors.Errorf("invalid range: %q", part)
return nil, fmt.Errorf("invalid range: %q", part)
}
}
default:
return nil, errors.Errorf("invalid range: %q", part)
return nil, fmt.Errorf("invalid range: %q", part)
}
crs = append(crs, chunkRange{start: start, end: end})
}
@@ -749,18 +762,18 @@ func (f *Fs) rcFetch(ctx context.Context, in rc.Params) (rc.Params, error) {
delete(in, "chunks")
crs, err := parseChunks(s)
if err != nil {
return nil, errors.Wrap(err, "invalid chunks parameter")
return nil, fmt.Errorf("invalid chunks parameter: %w", err)
}
var files [][2]string
for k, v := range in {
if !strings.HasPrefix(k, "file") {
return nil, errors.Errorf("invalid parameter %s=%s", k, v)
return nil, fmt.Errorf("invalid parameter %s=%s", k, v)
}
switch v := v.(type) {
case string:
files = append(files, [2]string{v, f.unwrapRemote(v)})
default:
return nil, errors.Errorf("invalid parameter %s=%s", k, v)
return nil, fmt.Errorf("invalid parameter %s=%s", k, v)
}
}
type fileStatus struct {
@@ -1025,7 +1038,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
}
fs.Debugf(dir, "list: remove entry: %v", entryRemote)
}
entries = nil
entries = nil //nolint:ineffassign
// and then iterate over the ones from source (temp Objects will override source ones)
var batchDirectories []*Directory
@@ -1116,7 +1129,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
case fs.Directory:
_ = f.cache.AddDir(DirectoryFromOriginal(ctx, f, o))
default:
return errors.Errorf("Unknown object type %T", entry)
return fmt.Errorf("unknown object type %T", entry)
}
}
@@ -1735,7 +1748,7 @@ func (f *Fs) CleanUp(ctx context.Context) error {
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
do := f.Fs.Features().About
if do == nil {
return nil, errors.New("About not supported")
return nil, errors.New("not supported by underlying remote")
}
return do(ctx)
}

View File

@@ -1,5 +1,5 @@
// +build !plan9,!js
// +build !race
//go:build !plan9 && !js && !race
// +build !plan9,!js,!race
package cache_test
@@ -7,21 +7,21 @@ import (
"bytes"
"context"
"encoding/base64"
"errors"
goflag "flag"
"fmt"
"io"
"io/ioutil"
"log"
"math/rand"
"os"
"path"
"path/filepath"
"runtime"
"runtime/debug"
"strings"
"testing"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/backend/cache"
"github.com/rclone/rclone/backend/crypt"
_ "github.com/rclone/rclone/backend/drive"
@@ -101,14 +101,12 @@ func TestMain(m *testing.M) {
func TestInternalListRootAndInnerRemotes(t *testing.T) {
id := fmt.Sprintf("tilrair%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
// Instantiate inner fs
innerFolder := "inner"
runInstance.mkdir(t, rootFs, innerFolder)
rootFs2, boltDb2 := runInstance.newCacheFs(t, remoteName, id+"/"+innerFolder, true, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs2, boltDb2)
rootFs2, _ := runInstance.newCacheFs(t, remoteName, id+"/"+innerFolder, true, true, nil)
runInstance.writeObjectString(t, rootFs2, "one", "content")
listRoot, err := runInstance.list(t, rootFs, "")
@@ -166,7 +164,7 @@ func TestInternalVfsCache(t *testing.T) {
li2 := [2]string{path.Join("test", "one"), path.Join("test", "second")}
for _, r := range li2 {
var err error
ci, err := ioutil.ReadDir(path.Join(runInstance.chunkPath, runInstance.encryptRemoteIfNeeded(t, path.Join(id, r))))
ci, err := os.ReadDir(path.Join(runInstance.chunkPath, runInstance.encryptRemoteIfNeeded(t, path.Join(id, r))))
if err != nil || len(ci) == 0 {
log.Printf("========== '%v' not in cache", r)
} else {
@@ -225,8 +223,7 @@ func TestInternalVfsCache(t *testing.T) {
func TestInternalObjWrapFsFound(t *testing.T) {
id := fmt.Sprintf("tiowff%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
cfs, err := runInstance.getCacheFs(rootFs)
require.NoError(t, err)
@@ -258,8 +255,7 @@ func TestInternalObjWrapFsFound(t *testing.T) {
func TestInternalObjNotFound(t *testing.T) {
id := fmt.Sprintf("tionf%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil)
obj, err := rootFs.NewObject(context.Background(), "404")
require.Error(t, err)
@@ -269,8 +265,7 @@ func TestInternalObjNotFound(t *testing.T) {
func TestInternalCachedWrittenContentMatches(t *testing.T) {
testy.SkipUnreliable(t)
id := fmt.Sprintf("ticwcm%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil)
cfs, err := runInstance.getCacheFs(rootFs)
require.NoError(t, err)
@@ -293,9 +288,11 @@ func TestInternalCachedWrittenContentMatches(t *testing.T) {
}
func TestInternalDoubleWrittenContentMatches(t *testing.T) {
if runtime.GOOS == "windows" && runtime.GOARCH == "386" {
t.Skip("Skip test on windows/386")
}
id := fmt.Sprintf("tidwcm%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil)
// write the object
runInstance.writeRemoteString(t, rootFs, "one", "one content")
@@ -313,8 +310,7 @@ func TestInternalDoubleWrittenContentMatches(t *testing.T) {
func TestInternalCachedUpdatedContentMatches(t *testing.T) {
testy.SkipUnreliable(t)
id := fmt.Sprintf("ticucm%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil)
var err error
// create some rand test data
@@ -343,8 +339,7 @@ func TestInternalCachedUpdatedContentMatches(t *testing.T) {
func TestInternalWrappedWrittenContentMatches(t *testing.T) {
id := fmt.Sprintf("tiwwcm%v", time.Now().Unix())
vfsflags.Opt.DirCacheTime = time.Second
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
if runInstance.rootIsCrypt {
t.Skip("test skipped with crypt remote")
}
@@ -374,8 +369,7 @@ func TestInternalWrappedWrittenContentMatches(t *testing.T) {
func TestInternalLargeWrittenContentMatches(t *testing.T) {
id := fmt.Sprintf("tilwcm%v", time.Now().Unix())
vfsflags.Opt.DirCacheTime = time.Second
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
if runInstance.rootIsCrypt {
t.Skip("test skipped with crypt remote")
}
@@ -401,8 +395,7 @@ func TestInternalLargeWrittenContentMatches(t *testing.T) {
func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
id := fmt.Sprintf("tiwfcns%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil)
cfs, err := runInstance.getCacheFs(rootFs)
require.NoError(t, err)
@@ -442,7 +435,7 @@ func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
return err
}
if coSize != expectedSize {
return errors.Errorf("%v <> %v", coSize, expectedSize)
return fmt.Errorf("%v <> %v", coSize, expectedSize)
}
return nil
}, 12, time.Second*10)
@@ -456,8 +449,7 @@ func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
func TestInternalMoveWithNotify(t *testing.T) {
id := fmt.Sprintf("timwn%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil)
if !runInstance.wrappedIsExternal {
t.Skipf("Not external")
}
@@ -498,7 +490,7 @@ func TestInternalMoveWithNotify(t *testing.T) {
}
if len(li) != 2 {
log.Printf("not expected listing /test: %v", li)
return errors.Errorf("not expected listing /test: %v", li)
return fmt.Errorf("not expected listing /test: %v", li)
}
li, err = runInstance.list(t, rootFs, "test/one")
@@ -508,7 +500,7 @@ func TestInternalMoveWithNotify(t *testing.T) {
}
if len(li) != 0 {
log.Printf("not expected listing /test/one: %v", li)
return errors.Errorf("not expected listing /test/one: %v", li)
return fmt.Errorf("not expected listing /test/one: %v", li)
}
li, err = runInstance.list(t, rootFs, "test/second")
@@ -518,21 +510,21 @@ func TestInternalMoveWithNotify(t *testing.T) {
}
if len(li) != 1 {
log.Printf("not expected listing /test/second: %v", li)
return errors.Errorf("not expected listing /test/second: %v", li)
return fmt.Errorf("not expected listing /test/second: %v", li)
}
if fi, ok := li[0].(os.FileInfo); ok {
if fi.Name() != "data.bin" {
log.Printf("not expected name: %v", fi.Name())
return errors.Errorf("not expected name: %v", fi.Name())
return fmt.Errorf("not expected name: %v", fi.Name())
}
} else if di, ok := li[0].(fs.DirEntry); ok {
if di.Remote() != "test/second/data.bin" {
log.Printf("not expected remote: %v", di.Remote())
return errors.Errorf("not expected remote: %v", di.Remote())
return fmt.Errorf("not expected remote: %v", di.Remote())
}
} else {
log.Printf("unexpected listing: %v", li)
return errors.Errorf("unexpected listing: %v", li)
return fmt.Errorf("unexpected listing: %v", li)
}
log.Printf("complete listing: %v", li)
@@ -543,8 +535,7 @@ func TestInternalMoveWithNotify(t *testing.T) {
func TestInternalNotifyCreatesEmptyParts(t *testing.T) {
id := fmt.Sprintf("tincep%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil)
if !runInstance.wrappedIsExternal {
t.Skipf("Not external")
}
@@ -587,17 +578,17 @@ func TestInternalNotifyCreatesEmptyParts(t *testing.T) {
found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test")))
if !found {
log.Printf("not found /test")
return errors.Errorf("not found /test")
return fmt.Errorf("not found /test")
}
found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test"), runInstance.encryptRemoteIfNeeded(t, "one")))
if !found {
log.Printf("not found /test/one")
return errors.Errorf("not found /test/one")
return fmt.Errorf("not found /test/one")
}
found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test"), runInstance.encryptRemoteIfNeeded(t, "one"), runInstance.encryptRemoteIfNeeded(t, "test2")))
if !found {
log.Printf("not found /test/one/test2")
return errors.Errorf("not found /test/one/test2")
return fmt.Errorf("not found /test/one/test2")
}
li, err := runInstance.list(t, rootFs, "test/one")
if err != nil {
@@ -606,21 +597,21 @@ func TestInternalNotifyCreatesEmptyParts(t *testing.T) {
}
if len(li) != 1 {
log.Printf("not expected listing /test/one: %v", li)
return errors.Errorf("not expected listing /test/one: %v", li)
return fmt.Errorf("not expected listing /test/one: %v", li)
}
if fi, ok := li[0].(os.FileInfo); ok {
if fi.Name() != "test2" {
log.Printf("not expected name: %v", fi.Name())
return errors.Errorf("not expected name: %v", fi.Name())
return fmt.Errorf("not expected name: %v", fi.Name())
}
} else if di, ok := li[0].(fs.DirEntry); ok {
if di.Remote() != "test/one/test2" {
log.Printf("not expected remote: %v", di.Remote())
return errors.Errorf("not expected remote: %v", di.Remote())
return fmt.Errorf("not expected remote: %v", di.Remote())
}
} else {
log.Printf("unexpected listing: %v", li)
return errors.Errorf("unexpected listing: %v", li)
return fmt.Errorf("unexpected listing: %v", li)
}
log.Printf("complete listing /test/one/test2")
return nil
@@ -630,8 +621,7 @@ func TestInternalNotifyCreatesEmptyParts(t *testing.T) {
func TestInternalChangeSeenAfterDirCacheFlush(t *testing.T) {
id := fmt.Sprintf("ticsadcf%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil)
cfs, err := runInstance.getCacheFs(rootFs)
require.NoError(t, err)
@@ -663,8 +653,7 @@ func TestInternalChangeSeenAfterDirCacheFlush(t *testing.T) {
func TestInternalCacheWrites(t *testing.T) {
id := "ticw"
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, map[string]string{"writes": "true"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, map[string]string{"writes": "true"})
cfs, err := runInstance.getCacheFs(rootFs)
require.NoError(t, err)
@@ -681,9 +670,11 @@ func TestInternalCacheWrites(t *testing.T) {
}
func TestInternalMaxChunkSizeRespected(t *testing.T) {
if runtime.GOOS == "windows" && runtime.GOARCH == "386" {
t.Skip("Skip test on windows/386")
}
id := fmt.Sprintf("timcsr%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, map[string]string{"workers": "1"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, map[string]string{"workers": "1"})
cfs, err := runInstance.getCacheFs(rootFs)
require.NoError(t, err)
@@ -718,8 +709,7 @@ func TestInternalMaxChunkSizeRespected(t *testing.T) {
func TestInternalExpiredEntriesRemoved(t *testing.T) {
id := fmt.Sprintf("tieer%v", time.Now().Unix())
vfsflags.Opt.DirCacheTime = time.Second * 4 // needs to be lower than the defined
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, map[string]string{"info_age": "5s"}, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
cfs, err := runInstance.getCacheFs(rootFs)
require.NoError(t, err)
@@ -756,9 +746,7 @@ func TestInternalBug2117(t *testing.T) {
vfsflags.Opt.DirCacheTime = time.Second * 10
id := fmt.Sprintf("tib2117%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil,
map[string]string{"info_age": "72h", "chunk_clean_interval": "15m"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, map[string]string{"info_age": "72h", "chunk_clean_interval": "15m"})
if runInstance.rootIsCrypt {
t.Skipf("skipping crypt")
@@ -834,9 +822,9 @@ func newRun() *run {
}
if uploadDir == "" {
r.tmpUploadDir, err = ioutil.TempDir("", "rclonecache-tmp")
r.tmpUploadDir, err = os.MkdirTemp("", "rclonecache-tmp")
if err != nil {
log.Fatalf("Failed to create temp dir: %v", err)
panic(fmt.Sprintf("Failed to create temp dir: %v", err))
}
} else {
r.tmpUploadDir = uploadDir
@@ -859,7 +847,7 @@ func (r *run) encryptRemoteIfNeeded(t *testing.T, remote string) string {
return enc
}
func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool, cfg map[string]string, flags map[string]string) (fs.Fs, *cache.Persistent) {
func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool, flags map[string]string) (fs.Fs, *cache.Persistent) {
fstest.Initialise()
remoteExists := false
for _, s := range config.FileSections() {
@@ -919,9 +907,9 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
}
}
runInstance.rootIsCrypt = rootIsCrypt
runInstance.dbPath = filepath.Join(config.CacheDir, "cache-backend", cacheRemote+".db")
runInstance.chunkPath = filepath.Join(config.CacheDir, "cache-backend", cacheRemote)
runInstance.vfsCachePath = filepath.Join(config.CacheDir, "vfs", remote)
runInstance.dbPath = filepath.Join(config.GetCacheDir(), "cache-backend", cacheRemote+".db")
runInstance.chunkPath = filepath.Join(config.GetCacheDir(), "cache-backend", cacheRemote)
runInstance.vfsCachePath = filepath.Join(config.GetCacheDir(), "vfs", remote)
boltDb, err := cache.GetPersistent(runInstance.dbPath, runInstance.chunkPath, &cache.Features{PurgeDb: true})
require.NoError(t, err)
@@ -952,10 +940,15 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
}
err = f.Mkdir(context.Background(), "")
require.NoError(t, err)
t.Cleanup(func() {
runInstance.cleanupFs(t, f)
})
return f, boltDb
}
func (r *run) cleanupFs(t *testing.T, f fs.Fs, b *cache.Persistent) {
func (r *run) cleanupFs(t *testing.T, f fs.Fs) {
err := f.Features().Purge(context.Background(), "")
require.NoError(t, err)
cfs, err := r.getCacheFs(f)
@@ -977,7 +970,7 @@ func (r *run) randomReader(t *testing.T, size int64) io.ReadCloser {
chunk := int64(1024)
cnt := size / chunk
left := size % chunk
f, err := ioutil.TempFile("", "rclonecache-tempfile")
f, err := os.CreateTemp("", "rclonecache-tempfile")
require.NoError(t, err)
for i := 0; i < int(cnt); i++ {
@@ -1055,7 +1048,7 @@ func (r *run) readDataFromRemote(t *testing.T, f fs.Fs, remote string, offset, e
checkSample = r.readDataFromObj(t, co, offset, end, noLengthCheck)
if !noLengthCheck && size != int64(len(checkSample)) {
return checkSample, errors.Errorf("read size doesn't match expected: %v <> %v", len(checkSample), size)
return checkSample, fmt.Errorf("read size doesn't match expected: %v <> %v", len(checkSample), size)
}
return checkSample, nil
}
@@ -1250,7 +1243,7 @@ func (r *run) listenForBackgroundUpload(t *testing.T, f fs.Fs, remote string) ch
case state = <-buCh:
// continue
case <-time.After(maxDuration):
waitCh <- errors.Errorf("Timed out waiting for background upload: %v", remote)
waitCh <- fmt.Errorf("Timed out waiting for background upload: %v", remote)
return
}
checkRemote := state.Remote
@@ -1267,7 +1260,7 @@ func (r *run) listenForBackgroundUpload(t *testing.T, f fs.Fs, remote string) ch
return
}
}
waitCh <- errors.Errorf("Too many attempts to wait for the background upload: %v", remote)
waitCh <- fmt.Errorf("Too many attempts to wait for the background upload: %v", remote)
}()
return waitCh
}

View File

@@ -1,7 +1,7 @@
// Test Cache filesystem interface
// +build !plan9,!js
// +build !race
//go:build !plan9 && !js && !race
// +build !plan9,!js,!race
package cache_test
@@ -19,7 +19,7 @@ func TestIntegration(t *testing.T) {
RemoteName: "TestCache:",
NilObject: (*cache.Object)(nil),
UnimplementableFsMethods: []string{"PublicLink", "OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType", "ID", "GetTier", "SetTier"},
UnimplementableObjectMethods: []string{"MimeType", "ID", "GetTier", "SetTier", "Metadata"},
SkipInvalidUTF8: true, // invalid UTF-8 confuses the cache
})
}

View File

@@ -1,6 +1,7 @@
// Build for cache for unsupported platforms to stop go complaining
// about "no buildable Go source files "
//go:build plan9 || js
// +build plan9 js
package cache

View File

@@ -1,5 +1,5 @@
// +build !plan9,!js
// +build !race
//go:build !plan9 && !js && !race
// +build !plan9,!js,!race
package cache_test
@@ -21,10 +21,8 @@ import (
func TestInternalUploadTempDirCreated(t *testing.T) {
id := fmt.Sprintf("tiutdc%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true,
nil,
runInstance.newCacheFs(t, remoteName, id, false, true,
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id)})
defer runInstance.cleanupFs(t, rootFs, boltDb)
_, err := os.Stat(path.Join(runInstance.tmpUploadDir, id))
require.NoError(t, err)
@@ -63,9 +61,7 @@ func testInternalUploadQueueOneFile(t *testing.T, id string, rootFs fs.Fs, boltD
func TestInternalUploadQueueOneFileNoRest(t *testing.T) {
id := fmt.Sprintf("tiuqofnr%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "0s"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
testInternalUploadQueueOneFile(t, id, rootFs, boltDb)
}
@@ -73,19 +69,15 @@ func TestInternalUploadQueueOneFileNoRest(t *testing.T) {
func TestInternalUploadQueueOneFileWithRest(t *testing.T) {
id := fmt.Sprintf("tiuqofwr%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1m"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
testInternalUploadQueueOneFile(t, id, rootFs, boltDb)
}
func TestInternalUploadMoveExistingFile(t *testing.T) {
id := fmt.Sprintf("tiumef%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true,
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "3s"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
err := rootFs.Mkdir(context.Background(), "one")
require.NoError(t, err)
@@ -119,10 +111,8 @@ func TestInternalUploadMoveExistingFile(t *testing.T) {
func TestInternalUploadTempPathCleaned(t *testing.T) {
id := fmt.Sprintf("tiutpc%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true,
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "5s"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
err := rootFs.Mkdir(context.Background(), "one")
require.NoError(t, err)
@@ -162,10 +152,8 @@ func TestInternalUploadTempPathCleaned(t *testing.T) {
func TestInternalUploadQueueMoreFiles(t *testing.T) {
id := fmt.Sprintf("tiuqmf%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true,
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1s"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
err := rootFs.Mkdir(context.Background(), "test")
require.NoError(t, err)
@@ -213,9 +201,7 @@ func TestInternalUploadQueueMoreFiles(t *testing.T) {
func TestInternalUploadTempFileOperations(t *testing.T) {
id := "tiutfo"
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1h"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
boltDb.PurgeTempUploads()
@@ -343,9 +329,7 @@ func TestInternalUploadTempFileOperations(t *testing.T) {
func TestInternalUploadUploadingFileOperations(t *testing.T) {
id := "tiuufo"
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1h"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
boltDb.PurgeTempUploads()

View File

@@ -1,3 +1,4 @@
//go:build !plan9 && !js
// +build !plan9,!js
package cache

View File

@@ -1,9 +1,11 @@
//go:build !plan9 && !js
// +build !plan9,!js
package cache
import (
"context"
"errors"
"fmt"
"io"
"path"
@@ -12,7 +14,6 @@ import (
"sync"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/operations"
)
@@ -242,7 +243,7 @@ func (r *Handle) getChunk(chunkStart int64) ([]byte, error) {
return nil, io.ErrUnexpectedEOF
}
return nil, errors.Errorf("chunk not found %v", chunkStart)
return nil, fmt.Errorf("chunk not found %v", chunkStart)
}
// first chunk will be aligned with the start
@@ -322,7 +323,7 @@ func (r *Handle) Seek(offset int64, whence int) (int64, error) {
fs.Debugf(r, "moving offset end (%v) from %v to %v", r.cachedObject.Size(), r.offset, r.cachedObject.Size()+offset)
r.offset = r.cachedObject.Size() + offset
default:
err = errors.Errorf("cache: unimplemented seek whence %v", whence)
err = fmt.Errorf("cache: unimplemented seek whence %v", whence)
}
chunkStart := r.offset - (r.offset % int64(r.cacheFs().opt.ChunkSize))

View File

@@ -1,15 +1,16 @@
//go:build !plan9 && !js
// +build !plan9,!js
package cache
import (
"context"
"fmt"
"io"
"path"
"sync"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/readers"
@@ -177,10 +178,14 @@ func (o *Object) refreshFromSource(ctx context.Context, force bool) error {
}
if o.isTempFile() {
liveObject, err = o.ParentFs.NewObject(ctx, o.Remote())
err = errors.Wrapf(err, "in parent fs %v", o.ParentFs)
if err != nil {
err = fmt.Errorf("in parent fs %v: %w", o.ParentFs, err)
}
} else {
liveObject, err = o.CacheFs.Fs.NewObject(ctx, o.Remote())
err = errors.Wrapf(err, "in cache fs %v", o.CacheFs.Fs)
if err != nil {
err = fmt.Errorf("in cache fs %v: %w", o.CacheFs.Fs, err)
}
}
if err != nil {
fs.Errorf(o, "error refreshing object in : %v", err)
@@ -252,7 +257,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
defer o.CacheFs.backgroundRunner.play()
// don't allow started uploads
if o.isTempFile() && o.tempFileStartedUpload() {
return errors.Errorf("%v is currently uploading, can't update", o)
return fmt.Errorf("%v is currently uploading, can't update", o)
}
}
fs.Debugf(o, "updating object contents with size %v", src.Size())
@@ -291,7 +296,7 @@ func (o *Object) Remove(ctx context.Context) error {
defer o.CacheFs.backgroundRunner.play()
// don't allow started uploads
if o.isTempFile() && o.tempFileStartedUpload() {
return errors.Errorf("%v is currently uploading, can't delete", o)
return fmt.Errorf("%v is currently uploading, can't delete", o)
}
}
err := o.Object.Remove(ctx)

View File

@@ -1,3 +1,4 @@
//go:build !plan9 && !js
// +build !plan9,!js
package cache
@@ -7,7 +8,7 @@ import (
"crypto/tls"
"encoding/json"
"fmt"
"io/ioutil"
"io"
"net/http"
"net/url"
"strings"
@@ -166,7 +167,7 @@ func (p *plexConnector) listenWebsocket() {
continue
}
var data []byte
data, err = ioutil.ReadAll(resp.Body)
data, err = io.ReadAll(resp.Body)
if err != nil {
continue
}
@@ -212,7 +213,7 @@ func (p *plexConnector) authenticate() error {
var data map[string]interface{}
err = json.NewDecoder(resp.Body).Decode(&data)
if err != nil {
return fmt.Errorf("failed to obtain token: %v", err)
return fmt.Errorf("failed to obtain token: %w", err)
}
tokenGen, ok := get(data, "user", "authToken")
if !ok {

View File

@@ -1,14 +1,15 @@
//go:build !plan9 && !js
// +build !plan9,!js
package cache
import (
"fmt"
"strconv"
"strings"
"time"
cache "github.com/patrickmn/go-cache"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
)
@@ -52,7 +53,7 @@ func (m *Memory) GetChunk(cachedObject *Object, offset int64) ([]byte, error) {
return data, nil
}
return nil, errors.Errorf("couldn't get cached object data at offset %v", offset)
return nil, fmt.Errorf("couldn't get cached object data at offset %v", offset)
}
// AddChunk adds a new chunk of a cached object
@@ -75,10 +76,7 @@ func (m *Memory) CleanChunksByAge(chunkAge time.Duration) {
// CleanChunksByNeed will cleanup chunks after the FS passes a specific chunk
func (m *Memory) CleanChunksByNeed(offset int64) {
var items map[string]cache.Item
items = m.db.Items()
for key := range items {
for key := range m.db.Items() {
sepIdx := strings.LastIndex(key, "-")
keyOffset, err := strconv.ParseInt(key[sepIdx+1:], 10, 64)
if err != nil {

View File

@@ -1,3 +1,4 @@
//go:build !plan9 && !js
// +build !plan9,!js
package cache
@@ -8,7 +9,6 @@ import (
"encoding/binary"
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path"
"strconv"
@@ -16,7 +16,6 @@ import (
"sync"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/walk"
bolt "go.etcd.io/bbolt"
@@ -119,11 +118,11 @@ func (b *Persistent) connect() error {
err = os.MkdirAll(b.dataPath, os.ModePerm)
if err != nil {
return errors.Wrapf(err, "failed to create a data directory %q", b.dataPath)
return fmt.Errorf("failed to create a data directory %q: %w", b.dataPath, err)
}
b.db, err = bolt.Open(b.dbPath, 0644, &bolt.Options{Timeout: b.features.DbWaitTime})
if err != nil {
return errors.Wrapf(err, "failed to open a cache connection to %q", b.dbPath)
return fmt.Errorf("failed to open a cache connection to %q: %w", b.dbPath, err)
}
if b.features.PurgeDb {
b.Purge()
@@ -175,7 +174,7 @@ func (b *Persistent) GetDir(remote string) (*Directory, error) {
err := b.db.View(func(tx *bolt.Tx) error {
bucket := b.getBucket(remote, false, tx)
if bucket == nil {
return errors.Errorf("couldn't open bucket (%v)", remote)
return fmt.Errorf("couldn't open bucket (%v)", remote)
}
data := bucket.Get([]byte("."))
@@ -183,7 +182,7 @@ func (b *Persistent) GetDir(remote string) (*Directory, error) {
return json.Unmarshal(data, cd)
}
return errors.Errorf("%v not found", remote)
return fmt.Errorf("%v not found", remote)
})
return cd, err
@@ -208,7 +207,7 @@ func (b *Persistent) AddBatchDir(cachedDirs []*Directory) error {
bucket = b.getBucket(cachedDirs[0].Dir, true, tx)
}
if bucket == nil {
return errors.Errorf("couldn't open bucket (%v)", cachedDirs[0].Dir)
return fmt.Errorf("couldn't open bucket (%v)", cachedDirs[0].Dir)
}
for _, cachedDir := range cachedDirs {
@@ -225,7 +224,7 @@ func (b *Persistent) AddBatchDir(cachedDirs []*Directory) error {
encoded, err := json.Marshal(cachedDir)
if err != nil {
return errors.Errorf("couldn't marshal object (%v): %v", cachedDir, err)
return fmt.Errorf("couldn't marshal object (%v): %v", cachedDir, err)
}
err = b.Put([]byte("."), encoded)
if err != nil {
@@ -243,17 +242,17 @@ func (b *Persistent) GetDirEntries(cachedDir *Directory) (fs.DirEntries, error)
err := b.db.View(func(tx *bolt.Tx) error {
bucket := b.getBucket(cachedDir.abs(), false, tx)
if bucket == nil {
return errors.Errorf("couldn't open bucket (%v)", cachedDir.abs())
return fmt.Errorf("couldn't open bucket (%v)", cachedDir.abs())
}
val := bucket.Get([]byte("."))
if val != nil {
err := json.Unmarshal(val, cachedDir)
if err != nil {
return errors.Errorf("error during unmarshalling obj: %v", err)
return fmt.Errorf("error during unmarshalling obj: %w", err)
}
} else {
return errors.Errorf("missing cached dir: %v", cachedDir)
return fmt.Errorf("missing cached dir: %v", cachedDir)
}
c := bucket.Cursor()
@@ -268,7 +267,7 @@ func (b *Persistent) GetDirEntries(cachedDir *Directory) (fs.DirEntries, error)
// we try to find a cached meta for the dir
currentBucket := c.Bucket().Bucket(k)
if currentBucket == nil {
return errors.Errorf("couldn't open bucket (%v)", string(k))
return fmt.Errorf("couldn't open bucket (%v)", string(k))
}
metaKey := currentBucket.Get([]byte("."))
@@ -317,7 +316,7 @@ func (b *Persistent) RemoveDir(fp string) error {
err = b.db.Update(func(tx *bolt.Tx) error {
bucket := b.getBucket(cleanPath(parentDir), false, tx)
if bucket == nil {
return errors.Errorf("couldn't open bucket (%v)", fp)
return fmt.Errorf("couldn't open bucket (%v)", fp)
}
// delete the cached dir
err := bucket.DeleteBucket([]byte(cleanPath(dirName)))
@@ -377,13 +376,13 @@ func (b *Persistent) GetObject(cachedObject *Object) (err error) {
return b.db.View(func(tx *bolt.Tx) error {
bucket := b.getBucket(cachedObject.Dir, false, tx)
if bucket == nil {
return errors.Errorf("couldn't open parent bucket for %v", cachedObject.Dir)
return fmt.Errorf("couldn't open parent bucket for %v", cachedObject.Dir)
}
val := bucket.Get([]byte(cachedObject.Name))
if val != nil {
return json.Unmarshal(val, cachedObject)
}
return errors.Errorf("couldn't find object (%v)", cachedObject.Name)
return fmt.Errorf("couldn't find object (%v)", cachedObject.Name)
})
}
@@ -392,16 +391,16 @@ func (b *Persistent) AddObject(cachedObject *Object) error {
return b.db.Update(func(tx *bolt.Tx) error {
bucket := b.getBucket(cachedObject.Dir, true, tx)
if bucket == nil {
return errors.Errorf("couldn't open parent bucket for %v", cachedObject)
return fmt.Errorf("couldn't open parent bucket for %v", cachedObject)
}
// cache Object Info
encoded, err := json.Marshal(cachedObject)
if err != nil {
return errors.Errorf("couldn't marshal object (%v) info: %v", cachedObject, err)
return fmt.Errorf("couldn't marshal object (%v) info: %v", cachedObject, err)
}
err = bucket.Put([]byte(cachedObject.Name), encoded)
if err != nil {
return errors.Errorf("couldn't cache object (%v) info: %v", cachedObject, err)
return fmt.Errorf("couldn't cache object (%v) info: %v", cachedObject, err)
}
return nil
})
@@ -413,7 +412,7 @@ func (b *Persistent) RemoveObject(fp string) error {
return b.db.Update(func(tx *bolt.Tx) error {
bucket := b.getBucket(cleanPath(parentDir), false, tx)
if bucket == nil {
return errors.Errorf("couldn't open parent bucket for %v", cleanPath(parentDir))
return fmt.Errorf("couldn't open parent bucket for %v", cleanPath(parentDir))
}
err := bucket.Delete([]byte(cleanPath(objName)))
if err != nil {
@@ -445,7 +444,7 @@ func (b *Persistent) HasEntry(remote string) bool {
err := b.db.View(func(tx *bolt.Tx) error {
bucket := b.getBucket(dir, false, tx)
if bucket == nil {
return errors.Errorf("couldn't open parent bucket for %v", remote)
return fmt.Errorf("couldn't open parent bucket for %v", remote)
}
if f := bucket.Bucket([]byte(name)); f != nil {
return nil
@@ -454,12 +453,9 @@ func (b *Persistent) HasEntry(remote string) bool {
return nil
}
return errors.Errorf("couldn't find object (%v)", remote)
return fmt.Errorf("couldn't find object (%v)", remote)
})
if err == nil {
return true
}
return false
return err == nil
}
// HasChunk confirms the existence of a single chunk of an object
@@ -476,7 +472,7 @@ func (b *Persistent) GetChunk(cachedObject *Object, offset int64) ([]byte, error
var data []byte
fp := path.Join(b.dataPath, cachedObject.abs(), strconv.FormatInt(offset, 10))
data, err := ioutil.ReadFile(fp)
data, err := os.ReadFile(fp)
if err != nil {
return nil, err
}
@@ -489,7 +485,7 @@ func (b *Persistent) AddChunk(fp string, data []byte, offset int64) error {
_ = os.MkdirAll(path.Join(b.dataPath, fp), os.ModePerm)
filePath := path.Join(b.dataPath, fp, strconv.FormatInt(offset, 10))
err := ioutil.WriteFile(filePath, data, os.ModePerm)
err := os.WriteFile(filePath, data, os.ModePerm)
if err != nil {
return err
}
@@ -554,7 +550,7 @@ func (b *Persistent) CleanChunksBySize(maxSize int64) {
err := b.db.Update(func(tx *bolt.Tx) error {
dataTsBucket := tx.Bucket([]byte(DataTsBucket))
if dataTsBucket == nil {
return errors.Errorf("Couldn't open (%v) bucket", DataTsBucket)
return fmt.Errorf("couldn't open (%v) bucket", DataTsBucket)
}
// iterate through ts
c := dataTsBucket.Cursor()
@@ -732,7 +728,7 @@ func (b *Persistent) GetChunkTs(path string, offset int64) (time.Time, error) {
return nil
}
}
return errors.Errorf("not found %v-%v", path, offset)
return fmt.Errorf("not found %v-%v", path, offset)
})
return t, err
@@ -772,7 +768,7 @@ func (b *Persistent) addPendingUpload(destPath string, started bool) error {
return b.db.Update(func(tx *bolt.Tx) error {
bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket))
if err != nil {
return errors.Errorf("couldn't bucket for %v", tempBucket)
return fmt.Errorf("couldn't bucket for %v", tempBucket)
}
tempObj := &tempUploadInfo{
DestPath: destPath,
@@ -783,11 +779,11 @@ func (b *Persistent) addPendingUpload(destPath string, started bool) error {
// cache Object Info
encoded, err := json.Marshal(tempObj)
if err != nil {
return errors.Errorf("couldn't marshal object (%v) info: %v", destPath, err)
return fmt.Errorf("couldn't marshal object (%v) info: %v", destPath, err)
}
err = bucket.Put([]byte(destPath), encoded)
if err != nil {
return errors.Errorf("couldn't cache object (%v) info: %v", destPath, err)
return fmt.Errorf("couldn't cache object (%v) info: %v", destPath, err)
}
return nil
@@ -802,7 +798,7 @@ func (b *Persistent) getPendingUpload(inRoot string, waitTime time.Duration) (de
err = b.db.Update(func(tx *bolt.Tx) error {
bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket))
if err != nil {
return errors.Errorf("couldn't bucket for %v", tempBucket)
return fmt.Errorf("couldn't bucket for %v", tempBucket)
}
c := bucket.Cursor()
@@ -835,7 +831,7 @@ func (b *Persistent) getPendingUpload(inRoot string, waitTime time.Duration) (de
return nil
}
return errors.Errorf("no pending upload found")
return fmt.Errorf("no pending upload found")
})
return destPath, err
@@ -846,14 +842,14 @@ func (b *Persistent) SearchPendingUpload(remote string) (started bool, err error
err = b.db.View(func(tx *bolt.Tx) error {
bucket := tx.Bucket([]byte(tempBucket))
if bucket == nil {
return errors.Errorf("couldn't bucket for %v", tempBucket)
return fmt.Errorf("couldn't bucket for %v", tempBucket)
}
var tempObj = &tempUploadInfo{}
v := bucket.Get([]byte(remote))
err = json.Unmarshal(v, tempObj)
if err != nil {
return errors.Errorf("pending upload (%v) not found %v", remote, err)
return fmt.Errorf("pending upload (%v) not found %v", remote, err)
}
started = tempObj.Started
@@ -868,7 +864,7 @@ func (b *Persistent) searchPendingUploadFromDir(dir string) (remotes []string, e
err = b.db.View(func(tx *bolt.Tx) error {
bucket := tx.Bucket([]byte(tempBucket))
if bucket == nil {
return errors.Errorf("couldn't bucket for %v", tempBucket)
return fmt.Errorf("couldn't bucket for %v", tempBucket)
}
c := bucket.Cursor()
@@ -898,22 +894,22 @@ func (b *Persistent) rollbackPendingUpload(remote string) error {
return b.db.Update(func(tx *bolt.Tx) error {
bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket))
if err != nil {
return errors.Errorf("couldn't bucket for %v", tempBucket)
return fmt.Errorf("couldn't bucket for %v", tempBucket)
}
var tempObj = &tempUploadInfo{}
v := bucket.Get([]byte(remote))
err = json.Unmarshal(v, tempObj)
if err != nil {
return errors.Errorf("pending upload (%v) not found %v", remote, err)
return fmt.Errorf("pending upload (%v) not found: %w", remote, err)
}
tempObj.Started = false
v2, err := json.Marshal(tempObj)
if err != nil {
return errors.Errorf("pending upload not updated %v", err)
return fmt.Errorf("pending upload not updated: %w", err)
}
err = bucket.Put([]byte(tempObj.DestPath), v2)
if err != nil {
return errors.Errorf("pending upload not updated %v", err)
return fmt.Errorf("pending upload not updated: %w", err)
}
return nil
})
@@ -926,7 +922,7 @@ func (b *Persistent) removePendingUpload(remote string) error {
return b.db.Update(func(tx *bolt.Tx) error {
bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket))
if err != nil {
return errors.Errorf("couldn't bucket for %v", tempBucket)
return fmt.Errorf("couldn't bucket for %v", tempBucket)
}
return bucket.Delete([]byte(remote))
})
@@ -941,17 +937,17 @@ func (b *Persistent) updatePendingUpload(remote string, fn func(item *tempUpload
return b.db.Update(func(tx *bolt.Tx) error {
bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket))
if err != nil {
return errors.Errorf("couldn't bucket for %v", tempBucket)
return fmt.Errorf("couldn't bucket for %v", tempBucket)
}
var tempObj = &tempUploadInfo{}
v := bucket.Get([]byte(remote))
err = json.Unmarshal(v, tempObj)
if err != nil {
return errors.Errorf("pending upload (%v) not found %v", remote, err)
return fmt.Errorf("pending upload (%v) not found %v", remote, err)
}
if tempObj.Started {
return errors.Errorf("pending upload already started %v", remote)
return fmt.Errorf("pending upload already started %v", remote)
}
err = fn(tempObj)
if err != nil {
@@ -969,11 +965,11 @@ func (b *Persistent) updatePendingUpload(remote string, fn func(item *tempUpload
}
v2, err := json.Marshal(tempObj)
if err != nil {
return errors.Errorf("pending upload not updated %v", err)
return fmt.Errorf("pending upload not updated: %w", err)
}
err = bucket.Put([]byte(tempObj.DestPath), v2)
if err != nil {
return errors.Errorf("pending upload not updated %v", err)
return fmt.Errorf("pending upload not updated: %w", err)
}
return nil
@@ -1014,11 +1010,11 @@ func (b *Persistent) ReconcileTempUploads(ctx context.Context, cacheFs *Fs) erro
// cache Object Info
encoded, err := json.Marshal(tempObj)
if err != nil {
return errors.Errorf("couldn't marshal object (%v) info: %v", queuedEntry, err)
return fmt.Errorf("couldn't marshal object (%v) info: %v", queuedEntry, err)
}
err = bucket.Put([]byte(destPath), encoded)
if err != nil {
return errors.Errorf("couldn't cache object (%v) info: %v", destPath, err)
return fmt.Errorf("couldn't cache object (%v) info: %v", destPath, err)
}
fs.Debugf(cacheFs, "reconciled temporary upload: %v", destPath)
}

View File

@@ -8,10 +8,10 @@ import (
"crypto/sha1"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
gohash "hash"
"io"
"io/ioutil"
"math/rand"
"path"
"regexp"
@@ -21,7 +21,6 @@ import (
"sync"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/cache"
@@ -32,7 +31,6 @@ import (
"github.com/rclone/rclone/fs/operations"
)
//
// Chunker's composite files have one or more chunks
// and optional metadata object. If it's present,
// meta object is named after the original file.
@@ -65,7 +63,7 @@ import (
// length of 13 decimals it makes a 7-digit base-36 number.
//
// When transactions is set to the norename style, data chunks will
// keep their temporary chunk names (with the transacion identifier
// keep their temporary chunk names (with the transaction identifier
// suffix). To distinguish them from temporary chunks, the txn field
// of the metadata file is set to match the transaction identifier of
// the data chunks.
@@ -79,7 +77,6 @@ import (
// Metadata format v1 does not define any control chunk types,
// they are currently ignored aka reserved.
// In future they can be used to implement resumable uploads etc.
//
const (
ctrlTypeRegStr = `[a-z][a-z0-9]{2,6}`
tempSuffixFormat = `_%04s`
@@ -150,12 +147,13 @@ func init() {
Name: "remote",
Required: true,
Help: `Remote to chunk/unchunk.
Normally should contain a ':' and a path, e.g. "myremote:path/to/dir",
"myremote:bucket" or maybe "myremote:" (not recommended).`,
}, {
Name: "chunk_size",
Advanced: false,
Default: fs.SizeSuffix(2147483648), // 2GB
Default: fs.SizeSuffix(2147483648), // 2 GiB
Help: `Files larger than chunk size will be split in chunks.`,
}, {
Name: "name_format",
@@ -163,6 +161,7 @@ Normally should contain a ':' and a path, e.g. "myremote:path/to/dir",
Hide: fs.OptionHideCommandLine,
Default: `*.rclone_chunk.###`,
Help: `String format of chunk file names.
The two placeholders are: base file name (*) and chunk number (#...).
There must be one and only one asterisk and one or more consecutive hash characters.
If chunk number has less digits than the number of hashes, it is left-padded by zeros.
@@ -174,48 +173,57 @@ Possible chunk files are ignored if their name does not match given format.`,
Hide: fs.OptionHideCommandLine,
Default: 1,
Help: `Minimum valid chunk number. Usually 0 or 1.
By default chunk numbers start from 1.`,
}, {
Name: "meta_format",
Advanced: true,
Hide: fs.OptionHideCommandLine,
Default: "simplejson",
Help: `Format of the metadata object or "none". By default "simplejson".
Help: `Format of the metadata object or "none".
By default "simplejson".
Metadata is a small JSON file named after the composite file.`,
Examples: []fs.OptionExample{{
Value: "none",
Help: `Do not use metadata files at all. Requires hash type "none".`,
Help: `Do not use metadata files at all.
Requires hash type "none".`,
}, {
Value: "simplejson",
Help: `Simple JSON supports hash sums and chunk validation.
It has the following fields: ver, size, nchunks, md5, sha1.`,
}},
}, {
Name: "hash_type",
Advanced: false,
Default: "md5",
Help: `Choose how chunker handles hash sums. All modes but "none" require metadata.`,
Help: `Choose how chunker handles hash sums.
All modes but "none" require metadata.`,
Examples: []fs.OptionExample{{
Value: "none",
Help: `Pass any hash supported by wrapped remote for non-chunked files, return nothing otherwise`,
Help: `Pass any hash supported by wrapped remote for non-chunked files.
Return nothing otherwise.`,
}, {
Value: "md5",
Help: `MD5 for composite files`,
Help: `MD5 for composite files.`,
}, {
Value: "sha1",
Help: `SHA1 for composite files`,
Help: `SHA1 for composite files.`,
}, {
Value: "md5all",
Help: `MD5 for all files`,
Help: `MD5 for all files.`,
}, {
Value: "sha1all",
Help: `SHA1 for all files`,
Help: `SHA1 for all files.`,
}, {
Value: "md5quick",
Help: `Copying a file to chunker will request MD5 from the source falling back to SHA1 if unsupported`,
Help: `Copying a file to chunker will request MD5 from the source.
Falling back to SHA1 if unsupported.`,
}, {
Value: "sha1quick",
Help: `Similar to "md5quick" but prefers SHA1 over MD5`,
Help: `Similar to "md5quick" but prefers SHA1 over MD5.`,
}},
}, {
Name: "fail_hard",
@@ -279,13 +287,13 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
baseName, basePath, err := fspath.SplitFs(remote)
if err != nil {
return nil, errors.Wrapf(err, "failed to parse remote %q to wrap", remote)
return nil, fmt.Errorf("failed to parse remote %q to wrap: %w", remote, err)
}
// Look for a file first
remotePath := fspath.JoinRootPath(basePath, rpath)
baseFs, err := cache.Get(ctx, baseName+remotePath)
if err != fs.ErrorIsFile && err != nil {
return nil, errors.Wrapf(err, "failed to make remote %q to wrap", baseName+remotePath)
return nil, fmt.Errorf("failed to make remote %q to wrap: %w", baseName+remotePath, err)
}
if !operations.CanServerSideMove(baseFs) {
return nil, errors.New("can't use chunker on a backend which doesn't support server-side move or copy")
@@ -375,7 +383,7 @@ type Fs struct {
// configure must be called only from NewFs or by unit tests.
func (f *Fs) configure(nameFormat, metaFormat, hashType, transactionMode string) error {
if err := f.setChunkNameFormat(nameFormat); err != nil {
return errors.Wrapf(err, "invalid name format '%s'", nameFormat)
return fmt.Errorf("invalid name format '%s': %w", nameFormat, err)
}
if err := f.setMetaFormat(metaFormat); err != nil {
return err
@@ -432,10 +440,10 @@ func (f *Fs) setHashType(hashType string) error {
f.hashFallback = true
case "md5all":
f.useMD5 = true
f.hashAll = !f.base.Hashes().Contains(hash.MD5)
f.hashAll = !f.base.Hashes().Contains(hash.MD5) || f.base.Features().SlowHash
case "sha1all":
f.useSHA1 = true
f.hashAll = !f.base.Hashes().Contains(hash.SHA1)
f.hashAll = !f.base.Hashes().Contains(hash.SHA1) || f.base.Features().SlowHash
default:
return fmt.Errorf("unsupported hash type '%s'", hashType)
}
@@ -504,7 +512,7 @@ func (f *Fs) setChunkNameFormat(pattern string) error {
strRegex := regexp.QuoteMeta(pattern)
strRegex = reHashes.ReplaceAllLiteralString(strRegex, reDataOrCtrl)
strRegex = strings.Replace(strRegex, "\\*", mainNameRegStr, -1)
strRegex = strings.ReplaceAll(strRegex, "\\*", mainNameRegStr)
strRegex = fmt.Sprintf("^%s(?:%s|%s)?$", strRegex, tempSuffixRegStr, tempSuffixRegOld)
f.nameRegexp = regexp.MustCompile(strRegex)
@@ -513,7 +521,7 @@ func (f *Fs) setChunkNameFormat(pattern string) error {
if numDigits > 1 {
fmtDigits = fmt.Sprintf("%%0%dd", numDigits)
}
strFmt := strings.Replace(pattern, "%", "%%", -1)
strFmt := strings.ReplaceAll(pattern, "%", "%%")
strFmt = strings.Replace(strFmt, "*", "%s", 1)
f.dataNameFmt = reHashes.ReplaceAllLiteralString(strFmt, fmtDigits)
f.ctrlNameFmt = reHashes.ReplaceAllLiteralString(strFmt, "_%s")
@@ -531,7 +539,6 @@ func (f *Fs) setChunkNameFormat(pattern string) error {
//
// xactID is a transaction identifier. Empty xactID denotes active chunk,
// otherwise temporary chunk name is produced.
//
func (f *Fs) makeChunkName(filePath string, chunkNo int, ctrlType, xactID string) string {
dir, parentName := path.Split(filePath)
var name, tempSuffix string
@@ -697,7 +704,6 @@ func (f *Fs) newXactID(ctx context.Context, filePath string) (xactID string, err
// directory together with dead chunks.
// In future a flag named like `--chunker-list-hidden` may be added to
// rclone that will tell List to reveal hidden chunks.
//
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
entries, err = f.base.List(ctx, dir)
if err != nil {
@@ -812,7 +818,7 @@ func (f *Fs) processEntries(ctx context.Context, origEntries fs.DirEntries, dirP
tempEntries = append(tempEntries, wrapDir)
default:
if f.opt.FailHard {
return nil, fmt.Errorf("Unknown object type %T", entry)
return nil, fmt.Errorf("unknown object type %T", entry)
}
fs.Debugf(f, "unknown object type %T", entry)
}
@@ -857,7 +863,6 @@ func (f *Fs) processEntries(ctx context.Context, origEntries fs.DirEntries, dirP
// Note that chunker prefers analyzing file names rather than reading
// the content of meta object assuming that directory scans are fast
// but opening even a small file can be slow on some backends.
//
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
return f.scanObject(ctx, remote, false)
}
@@ -867,7 +872,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
// ignores non-chunked objects and skips chunk size checks.
func (f *Fs) scanObject(ctx context.Context, remote string, quickScan bool) (fs.Object, error) {
if err := f.forbidChunk(false, remote); err != nil {
return nil, errors.Wrap(err, "can't access")
return nil, fmt.Errorf("can't access: %w", err)
}
var (
@@ -916,7 +921,7 @@ func (f *Fs) scanObject(ctx context.Context, remote string, quickScan bool) (fs.
case fs.ErrorDirNotFound:
entries = nil
default:
return nil, errors.Wrap(err, "can't detect composite file")
return nil, fmt.Errorf("can't detect composite file: %w", err)
}
if f.useNoRename {
@@ -1032,7 +1037,7 @@ func (o *Object) readMetadata(ctx context.Context) error {
if err != nil {
return err
}
metadata, err := ioutil.ReadAll(reader)
metadata, err := io.ReadAll(reader)
_ = reader.Close() // ensure file handle is freed on windows
if err != nil {
return err
@@ -1056,7 +1061,7 @@ func (o *Object) readMetadata(ctx context.Context) error {
case ErrMetaTooBig, ErrMetaUnknown:
return err // return these errors unwrapped for unit tests
default:
return errors.Wrap(err, "invalid metadata")
return fmt.Errorf("invalid metadata: %w", err)
}
if o.size != metaInfo.Size() || len(o.chunks) != metaInfo.nChunks {
return errors.New("metadata doesn't match file size")
@@ -1073,7 +1078,7 @@ func (o *Object) readMetadata(ctx context.Context) error {
// readXactID returns the transaction ID stored in the passed metadata object
func (o *Object) readXactID(ctx context.Context) (xactID string, err error) {
// if xactID has already been read and cahced return it now
// if xactID has already been read and cached return it now
if o.xIDCached {
return o.xactID, nil
}
@@ -1091,7 +1096,7 @@ func (o *Object) readXactID(ctx context.Context) (xactID string, err error) {
if err != nil {
return "", err
}
data, err := ioutil.ReadAll(reader)
data, err := io.ReadAll(reader)
_ = reader.Close() // ensure file handle is freed on windows
if err != nil {
return "", err
@@ -1099,7 +1104,7 @@ func (o *Object) readXactID(ctx context.Context) (xactID string, err error) {
switch o.f.opt.MetaFormat {
case "simplejson":
if data != nil && len(data) > maxMetadataSizeWritten {
if len(data) > maxMetadataSizeWritten {
return "", nil // this was likely not a metadata object, return empty xactID but don't throw error
}
var metadata metaSimpleJSON
@@ -1121,7 +1126,7 @@ func (f *Fs) put(
// Perform consistency checks
if err := f.forbidChunk(src, remote); err != nil {
return nil, errors.Wrap(err, action+" refused")
return nil, fmt.Errorf("%s refused: %w", action, err)
}
if target == nil {
// Get target object with a quick directory scan
@@ -1135,7 +1140,7 @@ func (f *Fs) put(
obj := target.(*Object)
if err := obj.readMetadata(ctx); err == ErrMetaUnknown {
// refuse to update a file of unsupported format
return nil, errors.Wrap(err, "refusing to "+action)
return nil, fmt.Errorf("refusing to %s: %w", action, err)
}
}
@@ -1214,7 +1219,7 @@ func (f *Fs) put(
// and skips the "EOF" read. Hence, switch to next limit here.
if !(c.chunkLimit == 0 || c.chunkLimit == c.chunkSize || c.sizeTotal == -1 || c.done) {
silentlyRemove(ctx, chunk)
return nil, fmt.Errorf("Destination ignored %d data bytes", c.chunkLimit)
return nil, fmt.Errorf("destination ignored %d data bytes", c.chunkLimit)
}
c.chunkLimit = c.chunkSize
@@ -1223,7 +1228,7 @@ func (f *Fs) put(
// Validate uploaded size
if c.sizeTotal != -1 && c.readCount != c.sizeTotal {
return nil, fmt.Errorf("Incorrect upload size %d != %d", c.readCount, c.sizeTotal)
return nil, fmt.Errorf("incorrect upload size %d != %d", c.readCount, c.sizeTotal)
}
// Check for input that looks like valid metadata
@@ -1260,7 +1265,7 @@ func (f *Fs) put(
sizeTotal += chunk.Size()
}
if sizeTotal != c.readCount {
return nil, fmt.Errorf("Incorrect chunks size %d != %d", sizeTotal, c.readCount)
return nil, fmt.Errorf("incorrect chunks size %d != %d", sizeTotal, c.readCount)
}
// If previous object was chunked, remove its chunks
@@ -1448,7 +1453,7 @@ func (c *chunkingReader) dummyRead(in io.Reader, size int64) error {
c.accountBytes(size)
return nil
}
const bufLen = 1048576 // 1MB
const bufLen = 1048576 // 1 MiB
buf := make([]byte, bufLen)
for size > 0 {
n := size
@@ -1553,7 +1558,7 @@ func (f *Fs) Hashes() hash.Set {
// Shouldn't return an error if it already exists
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
if err := f.forbidChunk(dir, dir); err != nil {
return errors.Wrap(err, "can't mkdir")
return fmt.Errorf("can't mkdir: %w", err)
}
return f.base.Mkdir(ctx, dir)
}
@@ -1575,7 +1580,6 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
// This command will chain to `purge` from wrapped remote.
// As a result it removes not only composite chunker files with their
// active chunks but also all hidden temporary chunks in the directory.
//
func (f *Fs) Purge(ctx context.Context, dir string) error {
do := f.base.Features().Purge
if do == nil {
@@ -1617,12 +1621,11 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
// Unsupported control chunks will get re-picked by a more recent
// rclone version with unexpected results. This can be helped by
// the `delete hidden` flag above or at least the user has been warned.
//
func (o *Object) Remove(ctx context.Context) (err error) {
if err := o.f.forbidChunk(o, o.Remote()); err != nil {
// operations.Move can still call Remove if chunker's Move refuses
// to corrupt file in hard mode. Hence, refuse to Remove, too.
return errors.Wrap(err, "refuse to corrupt")
return fmt.Errorf("refuse to corrupt: %w", err)
}
if err := o.readMetadata(ctx); err == ErrMetaUnknown {
// Proceed but warn user that unexpected things can happen.
@@ -1650,12 +1653,12 @@ func (o *Object) Remove(ctx context.Context) (err error) {
// copyOrMove implements copy or move
func (f *Fs) copyOrMove(ctx context.Context, o *Object, remote string, do copyMoveFn, md5, sha1, opName string) (fs.Object, error) {
if err := f.forbidChunk(o, remote); err != nil {
return nil, errors.Wrapf(err, "can't %s", opName)
return nil, fmt.Errorf("can't %s: %w", opName, err)
}
if err := o.readMetadata(ctx); err != nil {
// Refuse to copy/move composite files with invalid or future
// metadata format which might involve unsupported chunk types.
return nil, errors.Wrapf(err, "can't %s this file", opName)
return nil, fmt.Errorf("can't %s this file: %w", opName, err)
}
if !o.isComposite() {
fs.Debugf(o, "%s non-chunked object...", opName)
@@ -1793,9 +1796,9 @@ func (f *Fs) okForServerSide(ctx context.Context, src fs.Object, opName string)
// Copy src to this remote using server-side copy operations.
//
// This is stored with the remote path given
// This is stored with the remote path given.
//
// It returns the destination Object and a possible error
// It returns the destination Object and a possible error.
//
// Will only be called if src.Fs().Name() == f.Name()
//
@@ -1814,9 +1817,9 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
// Move src to this remote using server-side move operations.
//
// This is stored with the remote path given
// This is stored with the remote path given.
//
// It returns the destination Object and a possible error
// It returns the destination Object and a possible error.
//
// Will only be called if src.Fs().Name() == f.Name()
//
@@ -1884,7 +1887,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
func (f *Fs) CleanUp(ctx context.Context) error {
do := f.base.Features().CleanUp
if do == nil {
return errors.New("can't CleanUp")
return errors.New("not supported by underlying remote")
}
return do(ctx)
}
@@ -1893,7 +1896,7 @@ func (f *Fs) CleanUp(ctx context.Context) error {
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
do := f.base.Features().About
if do == nil {
return nil, errors.New("About not supported")
return nil, errors.New("not supported by underlying remote")
}
return do(ctx)
}
@@ -2114,7 +2117,6 @@ func (o *Object) SetModTime(ctx context.Context, mtime time.Time) error {
// file, then tries to read it from metadata. This in theory
// handles the unusual case when a small file has been tampered
// on the level of wrapped remote but chunker is unaware of that.
//
func (o *Object) Hash(ctx context.Context, hashType hash.Type) (string, error) {
if err := o.readMetadata(ctx); err != nil {
return "", err // valid metadata is required to get hash, abort
@@ -2152,7 +2154,7 @@ func (o *Object) UnWrap() fs.Object {
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.ReadCloser, err error) {
if err := o.readMetadata(ctx); err != nil {
// refuse to open unsupported format
return nil, errors.Wrap(err, "can't open")
return nil, fmt.Errorf("can't open: %w", err)
}
if !o.isComposite() {
return o.mainChunk().Open(ctx, options...) // chain to wrapped non-chunked file
@@ -2403,7 +2405,6 @@ type metaSimpleJSON struct {
// - for files larger than chunk size
// - if file contents can be mistaken as meta object
// - if consistent hashing is On but wrapped remote can't provide given hash
//
func marshalSimpleJSON(ctx context.Context, size int64, nChunks int, md5, sha1, xactID string) ([]byte, error) {
version := metadataVersion
if xactID == "" && version == 2 {
@@ -2436,11 +2437,10 @@ func marshalSimpleJSON(ctx context.Context, size int64, nChunks int, md5, sha1,
// New format will have a higher version number and cannot be correctly
// handled by current implementation.
// The version check below will then explicitly ask user to upgrade rclone.
//
func unmarshalSimpleJSON(ctx context.Context, metaObject fs.Object, data []byte) (info *ObjectInfo, madeByChunker bool, err error) {
// Be strict about JSON format
// to reduce possibility that a random small file resembles metadata.
if data != nil && len(data) > maxMetadataSizeWritten {
if len(data) > maxMetadataSizeWritten {
return nil, false, ErrMetaTooBig
}
if data == nil || len(data) < 2 || data[0] != '{' || data[len(data)-1] != '}' {

View File

@@ -5,13 +5,15 @@ import (
"context"
"flag"
"fmt"
"io/ioutil"
"io"
"path"
"regexp"
"strings"
"testing"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/fspath"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/fs/operations"
@@ -33,11 +35,35 @@ func testPutLarge(t *testing.T, f *Fs, kilobytes int) {
fstests.TestPutLarge(context.Background(), t, f, &fstest.Item{
ModTime: fstest.Time("2001-02-03T04:05:06.499999999Z"),
Path: fmt.Sprintf("chunker-upload-%dk", kilobytes),
Size: int64(kilobytes) * int64(fs.KibiByte),
Size: int64(kilobytes) * int64(fs.Kibi),
})
})
}
type settings map[string]interface{}
func deriveFs(ctx context.Context, t *testing.T, f fs.Fs, path string, opts settings) fs.Fs {
fsName := strings.Split(f.Name(), "{")[0] // strip off hash
configMap := configmap.Simple{}
for key, val := range opts {
configMap[key] = fmt.Sprintf("%v", val)
}
rpath := fspath.JoinRootPath(f.Root(), path)
remote := fmt.Sprintf("%s,%s:%s", fsName, configMap.String(), rpath)
fixFs, err := fs.NewFs(ctx, remote)
require.NoError(t, err)
return fixFs
}
var mtime1 = fstest.Time("2001-02-03T04:05:06.499999999Z")
func testPutFile(ctx context.Context, t *testing.T, f fs.Fs, name, contents, message string, check bool) fs.Object {
item := fstest.Item{Path: name, ModTime: mtime1}
obj := fstests.PutTestContents(ctx, t, f, &item, contents, check)
assert.NotNil(t, obj, message)
return obj
}
// test chunk name parser
func testChunkNameFormat(t *testing.T, f *Fs) {
saveOpt := f.opt
@@ -387,7 +413,7 @@ func testSmallFileInternals(t *testing.T, f *Fs) {
if r == nil {
return
}
data, err := ioutil.ReadAll(r)
data, err := io.ReadAll(r)
assert.NoError(t, err)
assert.Equal(t, contents, string(data))
_ = r.Close()
@@ -414,7 +440,7 @@ func testSmallFileInternals(t *testing.T, f *Fs) {
checkSmallFile := func(name, contents string) {
filename := path.Join(dir, name)
item := fstest.Item{Path: filename, ModTime: modTime}
_, put := fstests.PutTestContents(ctx, t, f, &item, contents, false)
put := fstests.PutTestContents(ctx, t, f, &item, contents, false)
assert.NotNil(t, put)
checkSmallFileInternals(put)
checkContents(put, contents)
@@ -463,7 +489,7 @@ func testPreventCorruption(t *testing.T, f *Fs) {
newFile := func(name string) fs.Object {
item := fstest.Item{Path: path.Join(dir, name), ModTime: modTime}
_, obj := fstests.PutTestContents(ctx, t, f, &item, contents, true)
obj := fstests.PutTestContents(ctx, t, f, &item, contents, true)
require.NotNil(t, obj)
return obj
}
@@ -512,7 +538,7 @@ func testPreventCorruption(t *testing.T, f *Fs) {
assert.NoError(t, err)
var chunkContents []byte
assert.NotPanics(t, func() {
chunkContents, err = ioutil.ReadAll(r)
chunkContents, err = io.ReadAll(r)
_ = r.Close()
})
assert.NoError(t, err)
@@ -547,7 +573,7 @@ func testPreventCorruption(t *testing.T, f *Fs) {
r, err = willyChunk.Open(ctx)
assert.NoError(t, err)
assert.NotPanics(t, func() {
_, err = ioutil.ReadAll(r)
_, err = io.ReadAll(r)
_ = r.Close()
})
assert.NoError(t, err)
@@ -573,7 +599,7 @@ func testChunkNumberOverflow(t *testing.T, f *Fs) {
newFile := func(f fs.Fs, name string) (obj fs.Object, filename string, txnID string) {
filename = path.Join(dir, name)
item := fstest.Item{Path: filename, ModTime: modTime}
_, obj = fstests.PutTestContents(ctx, t, f, &item, contents, true)
obj = fstests.PutTestContents(ctx, t, f, &item, contents, true)
require.NotNil(t, obj)
if chunkObj, isChunkObj := obj.(*Object); isChunkObj {
txnID = chunkObj.xactID
@@ -617,22 +643,13 @@ func testMetadataInput(t *testing.T, f *Fs) {
}()
f.opt.FailHard = false
modTime := fstest.Time("2001-02-03T04:05:06.499999999Z")
putFile := func(f fs.Fs, name, contents, message string, check bool) fs.Object {
item := fstest.Item{Path: name, ModTime: modTime}
_, obj := fstests.PutTestContents(ctx, t, f, &item, contents, check)
assert.NotNil(t, obj, message)
return obj
}
runSubtest := func(contents, name string) {
description := fmt.Sprintf("file with %s metadata", name)
filename := path.Join(dir, name)
require.True(t, len(contents) > 2 && len(contents) < minChunkForTest, description+" test data is correct")
part := putFile(f.base, f.makeChunkName(filename, 0, "", ""), "oops", "", true)
_ = putFile(f, filename, contents, "upload "+description, false)
part := testPutFile(ctx, t, f.base, f.makeChunkName(filename, 0, "", ""), "oops", "", true)
_ = testPutFile(ctx, t, f, filename, contents, "upload "+description, false)
obj, err := f.NewObject(ctx, filename)
assert.NoError(t, err, "access "+description)
@@ -655,7 +672,7 @@ func testMetadataInput(t *testing.T, f *Fs) {
assert.NoError(t, err, "open "+description)
assert.NotNil(t, r, "open stream of "+description)
if err == nil && r != nil {
data, err := ioutil.ReadAll(r)
data, err := io.ReadAll(r)
assert.NoError(t, err, "read all of "+description)
assert.Equal(t, contents, string(data), description+" contents is ok")
_ = r.Close()
@@ -678,7 +695,7 @@ func testMetadataInput(t *testing.T, f *Fs) {
// Test that chunker refuses to change on objects with future/unknown metadata
func testFutureProof(t *testing.T, f *Fs) {
if f.opt.MetaFormat == "none" {
if !f.useMeta {
t.Skip("this test requires metadata support")
}
@@ -699,7 +716,7 @@ func testFutureProof(t *testing.T, f *Fs) {
name = f.makeChunkName(name, part-1, "", "")
}
item := fstest.Item{Path: name, ModTime: modTime}
_, obj := fstests.PutTestContents(ctx, t, f.base, &item, data, true)
obj := fstests.PutTestContents(ctx, t, f.base, &item, data, true)
assert.NotNil(t, obj, msg)
}
@@ -741,8 +758,8 @@ func testFutureProof(t *testing.T, f *Fs) {
assert.Error(t, err)
// Rcat must fail
in := ioutil.NopCloser(bytes.NewBufferString("abc"))
robj, err := operations.Rcat(ctx, f, file, in, modTime)
in := io.NopCloser(bytes.NewBufferString("abc"))
robj, err := operations.Rcat(ctx, f, file, in, modTime, nil)
assert.Nil(t, robj)
assert.NotNil(t, err)
if err != nil {
@@ -773,7 +790,7 @@ func testBackwardsCompatibility(t *testing.T, f *Fs) {
newFile := func(f fs.Fs, name string) (fs.Object, string) {
filename := path.Join(dir, name)
item := fstest.Item{Path: filename, ModTime: modTime}
_, obj := fstests.PutTestContents(ctx, t, f, &item, contents, true)
obj := fstests.PutTestContents(ctx, t, f, &item, contents, true)
require.NotNil(t, obj)
return obj, filename
}
@@ -827,7 +844,7 @@ func testChunkerServerSideMove(t *testing.T, f *Fs) {
modTime := fstest.Time("2001-02-03T04:05:06.499999999Z")
item := fstest.Item{Path: "movefile", ModTime: modTime}
contents := "abcdef"
_, file := fstests.PutTestContents(ctx, t, fs1, &item, contents, true)
file := fstests.PutTestContents(ctx, t, fs1, &item, contents, true)
dstOverwritten, _ := fs2.NewObject(ctx, "movefile")
dstFile, err := operations.Move(ctx, fs2, dstOverwritten, "movefile", file)
@@ -837,13 +854,51 @@ func testChunkerServerSideMove(t *testing.T, f *Fs) {
r, err := dstFile.Open(ctx)
assert.NoError(t, err)
assert.NotNil(t, r)
data, err := ioutil.ReadAll(r)
data, err := io.ReadAll(r)
assert.NoError(t, err)
assert.Equal(t, contents, string(data))
_ = r.Close()
_ = operations.Purge(ctx, f.base, dir)
}
// Test that md5all creates metadata even for small files
func testMD5AllSlow(t *testing.T, f *Fs) {
ctx := context.Background()
fsResult := deriveFs(ctx, t, f, "md5all", settings{
"chunk_size": "1P",
"name_format": "*.#",
"hash_type": "md5all",
"transactions": "rename",
"meta_format": "simplejson",
})
chunkFs, ok := fsResult.(*Fs)
require.True(t, ok, "fs must be a chunker remote")
baseFs := chunkFs.base
if !baseFs.Features().SlowHash {
t.Skipf("this test needs a base fs with slow hash, e.g. local")
}
assert.True(t, chunkFs.useMD5, "must use md5")
assert.True(t, chunkFs.hashAll, "must hash all files")
_ = testPutFile(ctx, t, chunkFs, "file", "-", "error", true)
obj, err := chunkFs.NewObject(ctx, "file")
require.NoError(t, err)
sum, err := obj.Hash(ctx, hash.MD5)
assert.NoError(t, err)
assert.Equal(t, "336d5ebc5436534e61d16e63ddfca327", sum)
list, err := baseFs.List(ctx, "")
require.NoError(t, err)
assert.Equal(t, 2, len(list))
_, err = baseFs.NewObject(ctx, "file")
assert.NoError(t, err, "metadata must be created")
_, err = baseFs.NewObject(ctx, "file.1")
assert.NoError(t, err, "first chunk must be created")
require.NoError(t, operations.Purge(ctx, baseFs, ""))
}
// InternalTest dispatches all internal tests
func (f *Fs) InternalTest(t *testing.T) {
t.Run("PutLarge", func(t *testing.T) {
@@ -876,6 +931,9 @@ func (f *Fs) InternalTest(t *testing.T) {
t.Run("ChunkerServerSideMove", func(t *testing.T) {
testChunkerServerSideMove(t, f)
})
t.Run("MD5AllSlow", func(t *testing.T) {
testMD5AllSlow(t, f)
})
}
var _ fstests.InternalTester = (*Fs)(nil)

View File

@@ -35,6 +35,7 @@ func TestIntegration(t *testing.T) {
"MimeType",
"GetTier",
"SetTier",
"Metadata",
},
UnimplementableFsMethods: []string{
"PublicLink",
@@ -53,6 +54,7 @@ func TestIntegration(t *testing.T) {
{Name: name, Key: "type", Value: "chunker"},
{Name: name, Key: "remote", Value: tempDir},
}
opt.QuickTestOK = true
}
fstests.Run(t, &opt)
}

992
backend/combine/combine.go Normal file
View File

@@ -0,0 +1,992 @@
// Package combine implents a backend to combine multiple remotes in a directory tree
package combine
/*
Have API to add/remove branches in the combine
*/
import (
"context"
"errors"
"fmt"
"io"
"path"
"strings"
"sync"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/cache"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fs/walk"
"golang.org/x/sync/errgroup"
)
// Register with Fs
func init() {
fsi := &fs.RegInfo{
Name: "combine",
Description: "Combine several remotes into one",
NewFs: NewFs,
MetadataInfo: &fs.MetadataInfo{
Help: `Any metadata supported by the underlying remote is read and written.`,
},
Options: []fs.Option{{
Name: "upstreams",
Help: `Upstreams for combining
These should be in the form
dir=remote:path dir2=remote2:path
Where before the = is specified the root directory and after is the remote to
put there.
Embedded spaces can be added using quotes
"dir=remote:path with space" "dir2=remote2:path with space"
`,
Required: true,
Default: fs.SpaceSepList(nil),
}},
}
fs.Register(fsi)
}
// Options defines the configuration for this backend
type Options struct {
Upstreams fs.SpaceSepList `config:"upstreams"`
}
// Fs represents a combine of upstreams
type Fs struct {
name string // name of this remote
features *fs.Features // optional features
opt Options // options for this Fs
root string // the path we are working on
hashSet hash.Set // common hashes
when time.Time // directory times
upstreams map[string]*upstream // map of upstreams
}
// adjustment stores the info to add a prefix to a path or chop characters off
type adjustment struct {
root string
rootSlash string
mountpoint string
mountpointSlash string
}
// newAdjustment makes a new path adjustment adjusting between mountpoint and root
//
// mountpoint is the point the upstream is mounted and root is the combine root
func newAdjustment(root, mountpoint string) (a adjustment) {
return adjustment{
root: root,
rootSlash: root + "/",
mountpoint: mountpoint,
mountpointSlash: mountpoint + "/",
}
}
var errNotUnderRoot = errors.New("file not under root")
// do makes the adjustment on s, mapping an upstream path into a combine path
func (a *adjustment) do(s string) (string, error) {
absPath := join(a.mountpoint, s)
if a.root == "" {
return absPath, nil
}
if absPath == a.root {
return "", nil
}
if !strings.HasPrefix(absPath, a.rootSlash) {
return "", errNotUnderRoot
}
return absPath[len(a.rootSlash):], nil
}
// undo makes the adjustment on s, mapping a combine path into an upstream path
func (a *adjustment) undo(s string) (string, error) {
absPath := join(a.root, s)
if absPath == a.mountpoint {
return "", nil
}
if !strings.HasPrefix(absPath, a.mountpointSlash) {
return "", errNotUnderRoot
}
return absPath[len(a.mountpointSlash):], nil
}
// upstream represents an upstream Fs
type upstream struct {
f fs.Fs
parent *Fs
dir string // directory the upstream is mounted
pathAdjustment adjustment // how to fiddle with the path
}
// Create an upstream from the directory it is mounted on and the remote
func (f *Fs) newUpstream(ctx context.Context, dir, remote string) (*upstream, error) {
uFs, err := cache.Get(ctx, remote)
if err == fs.ErrorIsFile {
return nil, fmt.Errorf("can't combine files yet, only directories %q: %w", remote, err)
}
if err != nil {
return nil, fmt.Errorf("failed to create upstream %q: %w", remote, err)
}
u := &upstream{
f: uFs,
parent: f,
dir: dir,
pathAdjustment: newAdjustment(f.root, dir),
}
cache.PinUntilFinalized(u.f, u)
return u, nil
}
// NewFs constructs an Fs from the path.
//
// The returned Fs is the actual Fs, referenced by remote in the config
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs.Fs, err error) {
// defer log.Trace(nil, "name=%q, root=%q, m=%v", name, root, m)("f=%+v, err=%v", &outFs, &err)
// Parse config into Options struct
opt := new(Options)
err = configstruct.Set(m, opt)
if err != nil {
return nil, err
}
// Backward compatible to old config
if len(opt.Upstreams) == 0 {
return nil, errors.New("combine can't point to an empty upstream - check the value of the upstreams setting")
}
for _, u := range opt.Upstreams {
if strings.HasPrefix(u, name+":") {
return nil, errors.New("can't point combine remote at itself - check the value of the upstreams setting")
}
}
isDir := false
for strings.HasSuffix(root, "/") {
root = root[:len(root)-1]
isDir = true
}
f := &Fs{
name: name,
root: root,
opt: *opt,
upstreams: make(map[string]*upstream, len(opt.Upstreams)),
when: time.Now(),
}
g, gCtx := errgroup.WithContext(ctx)
var mu sync.Mutex
for _, upstream := range opt.Upstreams {
upstream := upstream
g.Go(func() (err error) {
equal := strings.IndexRune(upstream, '=')
if equal < 0 {
return fmt.Errorf("no \"=\" in upstream definition %q", upstream)
}
dir, remote := upstream[:equal], upstream[equal+1:]
if dir == "" {
return fmt.Errorf("empty dir in upstream definition %q", upstream)
}
if remote == "" {
return fmt.Errorf("empty remote in upstream definition %q", upstream)
}
if strings.ContainsRune(dir, '/') {
return fmt.Errorf("dirs can't contain / (yet): %q", dir)
}
u, err := f.newUpstream(gCtx, dir, remote)
if err != nil {
return err
}
mu.Lock()
if _, found := f.upstreams[dir]; found {
err = fmt.Errorf("duplicate directory name %q", dir)
} else {
f.upstreams[dir] = u
}
mu.Unlock()
return err
})
}
err = g.Wait()
if err != nil {
return nil, err
}
// check features
var features = (&fs.Features{
CaseInsensitive: true,
DuplicateFiles: false,
ReadMimeType: true,
WriteMimeType: true,
CanHaveEmptyDirectories: true,
BucketBased: true,
SetTier: true,
GetTier: true,
ReadMetadata: true,
WriteMetadata: true,
UserMetadata: true,
}).Fill(ctx, f)
canMove := true
for _, u := range f.upstreams {
features = features.Mask(ctx, u.f) // Mask all upstream fs
if !operations.CanServerSideMove(u.f) {
canMove = false
}
}
// We can move if all remotes support Move or Copy
if canMove {
features.Move = f.Move
}
// Enable ListR when upstreams either support ListR or is local
// But not when all upstreams are local
if features.ListR == nil {
for _, u := range f.upstreams {
if u.f.Features().ListR != nil {
features.ListR = f.ListR
} else if !u.f.Features().IsLocal {
features.ListR = nil
break
}
}
}
// Enable Purge when any upstreams support it
if features.Purge == nil {
for _, u := range f.upstreams {
if u.f.Features().Purge != nil {
features.Purge = f.Purge
break
}
}
}
// Enable Shutdown when any upstreams support it
if features.Shutdown == nil {
for _, u := range f.upstreams {
if u.f.Features().Shutdown != nil {
features.Shutdown = f.Shutdown
break
}
}
}
// Enable DirCacheFlush when any upstreams support it
if features.DirCacheFlush == nil {
for _, u := range f.upstreams {
if u.f.Features().DirCacheFlush != nil {
features.DirCacheFlush = f.DirCacheFlush
break
}
}
}
// Enable ChangeNotify when any upstreams support it
if features.ChangeNotify == nil {
for _, u := range f.upstreams {
if u.f.Features().ChangeNotify != nil {
features.ChangeNotify = f.ChangeNotify
break
}
}
}
f.features = features
// Get common intersection of hashes
var hashSet hash.Set
var first = true
for _, u := range f.upstreams {
if first {
hashSet = u.f.Hashes()
first = false
} else {
hashSet = hashSet.Overlap(u.f.Hashes())
}
}
f.hashSet = hashSet
// Check to see if the root is actually a file
if f.root != "" && !isDir {
_, err := f.NewObject(ctx, "")
if err != nil {
if err == fs.ErrorObjectNotFound || err == fs.ErrorNotAFile || err == fs.ErrorIsDir {
// File doesn't exist or is a directory so return old f
return f, nil
}
return nil, err
}
// Check to see if the root path is actually an existing file
f.root = path.Dir(f.root)
if f.root == "." {
f.root = ""
}
// Adjust path adjustment to remove leaf
for _, u := range f.upstreams {
u.pathAdjustment = newAdjustment(f.root, u.dir)
}
return f, fs.ErrorIsFile
}
return f, nil
}
// Run a function over all the upstreams in parallel
func (f *Fs) multithread(ctx context.Context, fn func(context.Context, *upstream) error) error {
g, gCtx := errgroup.WithContext(ctx)
for _, u := range f.upstreams {
u := u
g.Go(func() (err error) {
return fn(gCtx, u)
})
}
return g.Wait()
}
// join the elements together but unline path.Join return empty string
func join(elem ...string) string {
result := path.Join(elem...)
if result == "." {
return ""
}
if len(result) > 0 && result[0] == '/' {
result = result[1:]
}
return result
}
// find the upstream for the remote passed in, returning the upstream and the adjusted path
func (f *Fs) findUpstream(remote string) (u *upstream, uRemote string, err error) {
// defer log.Trace(remote, "")("f=%v, uRemote=%q, err=%v", &u, &uRemote, &err)
for _, u := range f.upstreams {
uRemote, err = u.pathAdjustment.undo(remote)
if err == nil {
return u, uRemote, nil
}
}
return nil, "", fmt.Errorf("combine for remote %q: %w", remote, fs.ErrorDirNotFound)
}
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string {
return f.name
}
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
return f.root
}
// String converts this Fs to a string
func (f *Fs) String() string {
return fmt.Sprintf("combine root '%s'", f.root)
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// Rmdir removes the root directory of the Fs object
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
// The root always exists
if f.root == "" && dir == "" {
return nil
}
u, uRemote, err := f.findUpstream(dir)
if err != nil {
return err
}
return u.f.Rmdir(ctx, uRemote)
}
// Hashes returns hash.HashNone to indicate remote hashing is unavailable
func (f *Fs) Hashes() hash.Set {
return f.hashSet
}
// Mkdir makes the root directory of the Fs object
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
// The root always exists
if f.root == "" && dir == "" {
return nil
}
u, uRemote, err := f.findUpstream(dir)
if err != nil {
return err
}
return u.f.Mkdir(ctx, uRemote)
}
// purge the upstream or fallback to a slow way
func (u *upstream) purge(ctx context.Context, dir string) (err error) {
if do := u.f.Features().Purge; do != nil {
err = do(ctx, dir)
} else {
err = operations.Purge(ctx, u.f, dir)
}
return err
}
// Purge all files in the directory
//
// Implement this if you have a way of deleting all the files
// quicker than just running Remove() on the result of List()
//
// Return an error if it doesn't exist
func (f *Fs) Purge(ctx context.Context, dir string) error {
if f.root == "" && dir == "" {
return f.multithread(ctx, func(ctx context.Context, u *upstream) error {
return u.purge(ctx, "")
})
}
u, uRemote, err := f.findUpstream(dir)
if err != nil {
return err
}
return u.purge(ctx, uRemote)
}
// Copy src to this remote using server-side copy operations.
//
// This is stored with the remote path given.
//
// It returns the destination Object and a possible error.
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object)
if !ok {
fs.Debugf(src, "Can't copy - not same remote type")
return nil, fs.ErrorCantCopy
}
dstU, dstRemote, err := f.findUpstream(remote)
if err != nil {
return nil, err
}
do := dstU.f.Features().Copy
if do == nil {
return nil, fs.ErrorCantCopy
}
o, err := do(ctx, srcObj.Object, dstRemote)
if err != nil {
return nil, err
}
return dstU.newObject(o), nil
}
// Move src to this remote using server-side move operations.
//
// This is stored with the remote path given.
//
// It returns the destination Object and a possible error.
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantMove
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object)
if !ok {
fs.Debugf(src, "Can't move - not same remote type")
return nil, fs.ErrorCantMove
}
dstU, dstRemote, err := f.findUpstream(remote)
if err != nil {
return nil, err
}
do := dstU.f.Features().Move
useCopy := false
if do == nil {
do = dstU.f.Features().Copy
if do == nil {
return nil, fs.ErrorCantMove
}
useCopy = true
}
o, err := do(ctx, srcObj.Object, dstRemote)
if err != nil {
return nil, err
}
// If did Copy then remove the source object
if useCopy {
err = srcObj.Remove(ctx)
if err != nil {
return nil, err
}
}
return dstU.newObject(o), nil
}
// DirMove moves src, srcRemote to this remote at dstRemote
// using server-side move operations.
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantDirMove
//
// If destination exists then return fs.ErrorDirExists
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) (err error) {
// defer log.Trace(f, "src=%v, srcRemote=%q, dstRemote=%q", src, srcRemote, dstRemote)("err=%v", &err)
srcFs, ok := src.(*Fs)
if !ok {
fs.Debugf(src, "Can't move directory - not same remote type")
return fs.ErrorCantDirMove
}
dstU, dstURemote, err := f.findUpstream(dstRemote)
if err != nil {
return err
}
srcU, srcURemote, err := srcFs.findUpstream(srcRemote)
if err != nil {
return err
}
do := dstU.f.Features().DirMove
if do == nil {
return fs.ErrorCantDirMove
}
fs.Logf(dstU.f, "srcU.f=%v, srcURemote=%q, dstURemote=%q", srcU.f, srcURemote, dstURemote)
return do(ctx, srcU.f, srcURemote, dstURemote)
}
// ChangeNotify calls the passed function with a path
// that has had changes. If the implementation
// uses polling, it should adhere to the given interval.
// At least one value will be written to the channel,
// specifying the initial value and updated values might
// follow. A 0 Duration should pause the polling.
// The ChangeNotify implementation must empty the channel
// regularly. When the channel gets closed, the implementation
// should stop polling and release resources.
func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), ch <-chan time.Duration) {
var uChans []chan time.Duration
for _, u := range f.upstreams {
u := u
if do := u.f.Features().ChangeNotify; do != nil {
ch := make(chan time.Duration)
uChans = append(uChans, ch)
wrappedNotifyFunc := func(path string, entryType fs.EntryType) {
newPath, err := u.pathAdjustment.do(path)
if err != nil {
fs.Logf(f, "ChangeNotify: unable to process %q: %s", path, err)
return
}
fs.Debugf(f, "ChangeNotify: path %q entryType %d", newPath, entryType)
notifyFunc(newPath, entryType)
}
do(ctx, wrappedNotifyFunc, ch)
}
}
go func() {
for i := range ch {
for _, c := range uChans {
c <- i
}
}
for _, c := range uChans {
close(c)
}
}()
}
// DirCacheFlush resets the directory cache - used in testing
// as an optional interface
func (f *Fs) DirCacheFlush() {
ctx := context.Background()
_ = f.multithread(ctx, func(ctx context.Context, u *upstream) error {
if do := u.f.Features().DirCacheFlush; do != nil {
do()
}
return nil
})
}
func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, stream bool, options ...fs.OpenOption) (fs.Object, error) {
srcPath := src.Remote()
u, uRemote, err := f.findUpstream(srcPath)
if err != nil {
return nil, err
}
uSrc := fs.NewOverrideRemote(src, uRemote)
var o fs.Object
if stream {
o, err = u.f.Features().PutStream(ctx, in, uSrc, options...)
} else {
o, err = u.f.Put(ctx, in, uSrc, options...)
}
if err != nil {
return nil, err
}
return u.newObject(o), nil
}
// Put in to the remote path with the modTime given of the given size
//
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
// nil and the error
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
o, err := f.NewObject(ctx, src.Remote())
switch err {
case nil:
return o, o.Update(ctx, in, src, options...)
case fs.ErrorObjectNotFound:
return f.put(ctx, in, src, false, options...)
default:
return nil, err
}
}
// PutStream uploads to the remote path with the modTime given of indeterminate size
//
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
// nil and the error
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
o, err := f.NewObject(ctx, src.Remote())
switch err {
case nil:
return o, o.Update(ctx, in, src, options...)
case fs.ErrorObjectNotFound:
return f.put(ctx, in, src, true, options...)
default:
return nil, err
}
}
// About gets quota information from the Fs
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
usage := &fs.Usage{
Total: new(int64),
Used: new(int64),
Trashed: new(int64),
Other: new(int64),
Free: new(int64),
Objects: new(int64),
}
for _, u := range f.upstreams {
doAbout := u.f.Features().About
if doAbout == nil {
continue
}
usg, err := doAbout(ctx)
if errors.Is(err, fs.ErrorDirNotFound) {
continue
}
if err != nil {
return nil, err
}
if usg.Total != nil && usage.Total != nil {
*usage.Total += *usg.Total
} else {
usage.Total = nil
}
if usg.Used != nil && usage.Used != nil {
*usage.Used += *usg.Used
} else {
usage.Used = nil
}
if usg.Trashed != nil && usage.Trashed != nil {
*usage.Trashed += *usg.Trashed
} else {
usage.Trashed = nil
}
if usg.Other != nil && usage.Other != nil {
*usage.Other += *usg.Other
} else {
usage.Other = nil
}
if usg.Free != nil && usage.Free != nil {
*usage.Free += *usg.Free
} else {
usage.Free = nil
}
if usg.Objects != nil && usage.Objects != nil {
*usage.Objects += *usg.Objects
} else {
usage.Objects = nil
}
}
return usage, nil
}
// Wraps entries for this upstream
func (u *upstream) wrapEntries(ctx context.Context, entries fs.DirEntries) (fs.DirEntries, error) {
for i, entry := range entries {
switch x := entry.(type) {
case fs.Object:
entries[i] = u.newObject(x)
case fs.Directory:
newDir := fs.NewDirCopy(ctx, x)
newPath, err := u.pathAdjustment.do(newDir.Remote())
if err != nil {
return nil, err
}
newDir.SetRemote(newPath)
entries[i] = newDir
default:
return nil, fmt.Errorf("unknown entry type %T", entry)
}
}
return entries, nil
}
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
// defer log.Trace(f, "dir=%q", dir)("entries = %v, err=%v", &entries, &err)
if f.root == "" && dir == "" {
entries = make(fs.DirEntries, 0, len(f.upstreams))
for combineDir := range f.upstreams {
d := fs.NewDir(combineDir, f.when)
entries = append(entries, d)
}
return entries, nil
}
u, uRemote, err := f.findUpstream(dir)
if err != nil {
return nil, err
}
entries, err = u.f.List(ctx, uRemote)
if err != nil {
return nil, err
}
return u.wrapEntries(ctx, entries)
}
// ListR lists the objects and directories of the Fs starting
// from dir recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
//
// Don't implement this unless you have a more efficient way
// of listing recursively that doing a directory traversal.
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
// defer log.Trace(f, "dir=%q, callback=%v", dir, callback)("err=%v", &err)
if f.root == "" && dir == "" {
rootEntries, err := f.List(ctx, "")
if err != nil {
return err
}
err = callback(rootEntries)
if err != nil {
return err
}
var mu sync.Mutex
syncCallback := func(entries fs.DirEntries) error {
mu.Lock()
defer mu.Unlock()
return callback(entries)
}
err = f.multithread(ctx, func(ctx context.Context, u *upstream) error {
return f.ListR(ctx, u.dir, syncCallback)
})
if err != nil {
return err
}
return nil
}
u, uRemote, err := f.findUpstream(dir)
if err != nil {
return err
}
wrapCallback := func(entries fs.DirEntries) error {
entries, err := u.wrapEntries(ctx, entries)
if err != nil {
return err
}
return callback(entries)
}
if do := u.f.Features().ListR; do != nil {
err = do(ctx, uRemote, wrapCallback)
} else {
err = walk.ListR(ctx, u.f, uRemote, true, -1, walk.ListAll, wrapCallback)
}
if err == fs.ErrorDirNotFound {
err = nil
}
return err
}
// NewObject creates a new remote combine file object
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
u, uRemote, err := f.findUpstream(remote)
if err != nil {
return nil, err
}
if uRemote == "" || strings.HasSuffix(uRemote, "/") {
return nil, fs.ErrorIsDir
}
o, err := u.f.NewObject(ctx, uRemote)
if err != nil {
return nil, err
}
return u.newObject(o), nil
}
// Precision is the greatest Precision of all upstreams
func (f *Fs) Precision() time.Duration {
var greatestPrecision time.Duration
for _, u := range f.upstreams {
uPrecision := u.f.Precision()
if uPrecision > greatestPrecision {
greatestPrecision = uPrecision
}
}
return greatestPrecision
}
// Shutdown the backend, closing any background tasks and any
// cached connections.
func (f *Fs) Shutdown(ctx context.Context) error {
return f.multithread(ctx, func(ctx context.Context, u *upstream) error {
if do := u.f.Features().Shutdown; do != nil {
return do(ctx)
}
return nil
})
}
// Object describes a wrapped Object
//
// This is a wrapped Object which knows its path prefix
type Object struct {
fs.Object
u *upstream
}
func (u *upstream) newObject(o fs.Object) *Object {
return &Object{
Object: o,
u: u,
}
}
// Fs returns read only access to the Fs that this object is part of
func (o *Object) Fs() fs.Info {
return o.u.parent
}
// String returns the remote path
func (o *Object) String() string {
return o.Remote()
}
// Remote returns the remote path
func (o *Object) Remote() string {
newPath, err := o.u.pathAdjustment.do(o.Object.String())
if err != nil {
fs.Errorf(o, "Bad object: %v", err)
return err.Error()
}
return newPath
}
// MimeType returns the content type of the Object if known
func (o *Object) MimeType(ctx context.Context) (mimeType string) {
if do, ok := o.Object.(fs.MimeTyper); ok {
mimeType = do.MimeType(ctx)
}
return mimeType
}
// UnWrap returns the Object that this Object is wrapping or
// nil if it isn't wrapping anything
func (o *Object) UnWrap() fs.Object {
return o.Object
}
// GetTier returns storage tier or class of the Object
func (o *Object) GetTier() string {
do, ok := o.Object.(fs.GetTierer)
if !ok {
return ""
}
return do.GetTier()
}
// ID returns the ID of the Object if known, or "" if not
func (o *Object) ID() string {
do, ok := o.Object.(fs.IDer)
if !ok {
return ""
}
return do.ID()
}
// Metadata returns metadata for an object
//
// It should return nil if there is no Metadata
func (o *Object) Metadata(ctx context.Context) (fs.Metadata, error) {
do, ok := o.Object.(fs.Metadataer)
if !ok {
return nil, nil
}
return do.Metadata(ctx)
}
// SetTier performs changing storage tier of the Object if
// multiple storage classes supported
func (o *Object) SetTier(tier string) error {
do, ok := o.Object.(fs.SetTierer)
if !ok {
return errors.New("underlying remote does not support SetTier")
}
return do.SetTier(tier)
}
// Check the interfaces are satisfied
var (
_ fs.Fs = (*Fs)(nil)
_ fs.Purger = (*Fs)(nil)
_ fs.PutStreamer = (*Fs)(nil)
_ fs.Copier = (*Fs)(nil)
_ fs.Mover = (*Fs)(nil)
_ fs.DirMover = (*Fs)(nil)
_ fs.DirCacheFlusher = (*Fs)(nil)
_ fs.ChangeNotifier = (*Fs)(nil)
_ fs.Abouter = (*Fs)(nil)
_ fs.ListRer = (*Fs)(nil)
_ fs.Shutdowner = (*Fs)(nil)
_ fs.FullObject = (*Object)(nil)
)

View File

@@ -0,0 +1,94 @@
package combine
import (
"fmt"
"testing"
"github.com/stretchr/testify/assert"
)
func TestAdjustmentDo(t *testing.T) {
for _, test := range []struct {
root string
mountpoint string
in string
want string
wantErr error
}{
{
root: "",
mountpoint: "mountpoint",
in: "path/to/file.txt",
want: "mountpoint/path/to/file.txt",
},
{
root: "mountpoint",
mountpoint: "mountpoint",
in: "path/to/file.txt",
want: "path/to/file.txt",
},
{
root: "mountpoint/path",
mountpoint: "mountpoint",
in: "path/to/file.txt",
want: "to/file.txt",
},
{
root: "mountpoint/path",
mountpoint: "mountpoint",
in: "wrongpath/to/file.txt",
want: "",
wantErr: errNotUnderRoot,
},
} {
what := fmt.Sprintf("%+v", test)
a := newAdjustment(test.root, test.mountpoint)
got, gotErr := a.do(test.in)
assert.Equal(t, test.wantErr, gotErr)
assert.Equal(t, test.want, got, what)
}
}
func TestAdjustmentUndo(t *testing.T) {
for _, test := range []struct {
root string
mountpoint string
in string
want string
wantErr error
}{
{
root: "",
mountpoint: "mountpoint",
in: "mountpoint/path/to/file.txt",
want: "path/to/file.txt",
},
{
root: "mountpoint",
mountpoint: "mountpoint",
in: "path/to/file.txt",
want: "path/to/file.txt",
},
{
root: "mountpoint/path",
mountpoint: "mountpoint",
in: "to/file.txt",
want: "path/to/file.txt",
},
{
root: "wrongmountpoint/path",
mountpoint: "mountpoint",
in: "to/file.txt",
want: "",
wantErr: errNotUnderRoot,
},
} {
what := fmt.Sprintf("%+v", test)
a := newAdjustment(test.root, test.mountpoint)
got, gotErr := a.undo(test.in)
assert.Equal(t, test.wantErr, gotErr)
assert.Equal(t, test.want, got, what)
}
}

View File

@@ -0,0 +1,81 @@
// Test Combine filesystem interface
package combine_test
import (
"testing"
_ "github.com/rclone/rclone/backend/local"
_ "github.com/rclone/rclone/backend/memory"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
if *fstest.RemoteName == "" {
t.Skip("Skipping as -remote not set")
}
fstests.Run(t, &fstests.Opt{
RemoteName: *fstest.RemoteName,
UnimplementableFsMethods: []string{"OpenWriterAt", "DuplicateFiles"},
UnimplementableObjectMethods: []string{"MimeType"},
})
}
func TestLocal(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
dirs := MakeTestDirs(t, 3)
upstreams := "dir1=" + dirs[0] + " dir2=" + dirs[1] + " dir3=" + dirs[2]
name := "TestCombineLocal"
fstests.Run(t, &fstests.Opt{
RemoteName: name + ":dir1",
ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "type", Value: "combine"},
{Name: name, Key: "upstreams", Value: upstreams},
},
QuickTestOK: true,
})
}
func TestMemory(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
upstreams := "dir1=:memory:dir1 dir2=:memory:dir2 dir3=:memory:dir3"
name := "TestCombineMemory"
fstests.Run(t, &fstests.Opt{
RemoteName: name + ":dir1",
ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "type", Value: "combine"},
{Name: name, Key: "upstreams", Value: upstreams},
},
QuickTestOK: true,
})
}
func TestMixed(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
dirs := MakeTestDirs(t, 2)
upstreams := "dir1=" + dirs[0] + " dir2=" + dirs[1] + " dir3=:memory:dir3"
name := "TestCombineMixed"
fstests.Run(t, &fstests.Opt{
RemoteName: name + ":dir1",
ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "type", Value: "combine"},
{Name: name, Key: "upstreams", Value: upstreams},
},
})
}
// MakeTestDirs makes directories in /tmp for testing
func MakeTestDirs(t *testing.T, n int) (dirs []string) {
for i := 1; i <= n; i++ {
dir := t.TempDir()
dirs = append(dirs, dir)
}
return dirs
}

View File

@@ -10,9 +10,9 @@ import (
"encoding/binary"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"regexp"
"strings"
@@ -21,7 +21,6 @@ import (
"github.com/buengese/sgzip"
"github.com/gabriel-vasile/mimetype"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/chunkedreader"
@@ -29,6 +28,7 @@ import (
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fspath"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/log"
"github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/fs/operations"
)
@@ -36,7 +36,7 @@ import (
// Globals
const (
initialChunkSize = 262144 // Initial and max sizes of chunks when reading parts of the file. Currently
maxChunkSize = 8388608 // at 256KB and 8 MB.
maxChunkSize = 8388608 // at 256 KiB and 8 MiB.
bufferSize = 8388608
heuristicBytes = 1048576
@@ -53,7 +53,7 @@ const (
Gzip = 2
)
var nameRegexp = regexp.MustCompile("^(.+?)\\.([A-Za-z0-9-_]{11})$")
var nameRegexp = regexp.MustCompile(`^(.+?)\.([A-Za-z0-9-_]{11})$`)
// Register with Fs
func init() {
@@ -70,6 +70,9 @@ func init() {
Name: "compress",
Description: "Compress a remote",
NewFs: NewFs,
MetadataInfo: &fs.MetadataInfo{
Help: `Any metadata supported by the underlying remote is read and written.`,
},
Options: []fs.Option{{
Name: "remote",
Help: "Remote to compress.",
@@ -83,23 +86,23 @@ func init() {
Name: "level",
Help: `GZIP compression level (-2 to 9).
Generally -1 (default, equivalent to 5) is recommended.
Levels 1 to 9 increase compressiong at the cost of speed.. Going past 6
generally offers very little return.
Level -2 uses Huffmann encoding only. Only use if you now what you
are doing
Level 0 turns off compression.`,
Generally -1 (default, equivalent to 5) is recommended.
Levels 1 to 9 increase compression at the cost of speed. Going past 6
generally offers very little return.
Level -2 uses Huffman encoding only. Only use if you know what you
are doing.
Level 0 turns off compression.`,
Default: sgzip.DefaultCompression,
Advanced: true,
}, {
Name: "ram_cache_limit",
Help: `Some remotes don't allow the upload of files with unknown size.
In this case the compressed file will need to be cached to determine
it's size.
Files smaller than this limit will be cached in RAM, file larger than
this limit will be cached on disk`,
In this case the compressed file will need to be cached to determine
it's size.
Files smaller than this limit will be cached in RAM, files larger than
this limit will be cached on disk.`,
Default: fs.SizeSuffix(20 * 1024 * 1024),
Advanced: true,
}},
@@ -127,7 +130,7 @@ type Fs struct {
features *fs.Features // optional features
}
// NewFs contstructs an Fs from the path, container:path
// NewFs constructs an Fs from the path, container:path
func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
@@ -143,7 +146,7 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
wInfo, wName, wPath, wConfig, err := fs.ConfigFs(remote)
if err != nil {
return nil, errors.Wrapf(err, "failed to parse remote %q to wrap", remote)
return nil, fmt.Errorf("failed to parse remote %q to wrap: %w", remote, err)
}
// Strip trailing slashes if they exist in rpath
@@ -158,7 +161,7 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
wrappedFs, err = wInfo.NewFs(ctx, wName, remotePath, wConfig)
}
if err != nil && err != fs.ErrorIsFile {
return nil, errors.Wrapf(err, "failed to make remote %s:%q to wrap", wName, remotePath)
return nil, fmt.Errorf("failed to make remote %s:%q to wrap: %w", wName, remotePath, err)
}
// Create the wrapping fs
@@ -180,6 +183,9 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
SetTier: true,
BucketBased: true,
CanHaveEmptyDirectories: true,
ReadMetadata: true,
WriteMetadata: true,
UserMetadata: true,
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
// We support reading MIME types no matter the wrapped fs
f.features.ReadMimeType = true
@@ -222,7 +228,7 @@ func processFileName(compressedFileName string) (origFileName string, extension
// Separate the filename and size from the extension
extensionPos := strings.LastIndex(compressedFileName, ".")
if extensionPos == -1 {
return "", "", 0, errors.New("File name has no extension")
return "", "", 0, errors.New("file name has no extension")
}
extension = compressedFileName[extensionPos:]
nameWithSize := compressedFileName[:extensionPos]
@@ -231,11 +237,11 @@ func processFileName(compressedFileName string) (origFileName string, extension
}
match := nameRegexp.FindStringSubmatch(nameWithSize)
if match == nil || len(match) != 3 {
return "", "", 0, errors.New("Invalid filename")
return "", "", 0, errors.New("invalid filename")
}
size, err := base64ToInt64(match[2])
if err != nil {
return "", "", 0, errors.New("Could not decode size")
return "", "", 0, errors.New("could not decode size")
}
return match[1], gzFileExt, size, nil
}
@@ -304,7 +310,7 @@ func (f *Fs) processEntries(entries fs.DirEntries) (newEntries fs.DirEntries, er
case fs.Directory:
f.addDir(&newEntries, x)
default:
return nil, errors.Errorf("Unknown object type %T", entry)
return nil, fmt.Errorf("unknown object type %T", entry)
}
}
return newEntries, nil
@@ -361,13 +367,16 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
if err != nil {
return nil, err
}
meta := readMetadata(ctx, mo)
if meta == nil {
return nil, errors.New("error decoding metadata")
meta, err := readMetadata(ctx, mo)
if err != nil {
return nil, fmt.Errorf("error decoding metadata: %w", err)
}
// Create our Object
o, err := f.Fs.NewObject(ctx, makeDataName(remote, meta.CompressionMetadata.Size, meta.Mode))
return f.newObject(o, mo, meta), err
if err != nil {
return nil, err
}
return f.newObject(o, mo, meta), nil
}
// checkCompressAndType checks if an object is compressible and determines it's mime type
@@ -401,6 +410,10 @@ func isCompressible(r io.Reader) (bool, error) {
if err != nil {
return false, err
}
err = w.Close()
if err != nil {
return false, err
}
ratio := float64(n) / float64(b.Len())
return ratio > minCompressionRatio, nil
}
@@ -410,7 +423,7 @@ func (f *Fs) verifyObjectHash(ctx context.Context, o fs.Object, hasher *hash.Mul
srcHash := hasher.Sums()[ht]
dstHash, err := o.Hash(ctx, ht)
if err != nil {
return errors.Wrap(err, "failed to read destination hash")
return fmt.Errorf("failed to read destination hash: %w", err)
}
if srcHash != "" && dstHash != "" && srcHash != dstHash {
// remove object
@@ -418,7 +431,7 @@ func (f *Fs) verifyObjectHash(ctx context.Context, o fs.Object, hasher *hash.Mul
if err != nil {
fs.Errorf(o, "Failed to remove corrupted object: %v", err)
}
return errors.Errorf("corrupted on transfer: %v compressed hashes differ %q vs %q", ht, srcHash, dstHash)
return fmt.Errorf("corrupted on transfer: %v compressed hashes differ %q vs %q", ht, srcHash, dstHash)
}
return nil
}
@@ -441,7 +454,7 @@ func (f *Fs) rcat(ctx context.Context, dstFileName string, in io.ReadCloser, mod
return f.Fs.Put(ctx, bytes.NewBuffer(buf[:n]), src, options...)
}
// Need to include what we allready read
// Need to include what we already read
in = &ReadCloserWrapper{
Reader: io.MultiReader(bytes.NewReader(buf), in),
Closer: in,
@@ -454,7 +467,7 @@ func (f *Fs) rcat(ctx context.Context, dstFileName string, in io.ReadCloser, mod
}
fs.Debugf(f, "Target remote doesn't support streaming uploads, creating temporary local file")
tempFile, err := ioutil.TempFile("", "rclone-press-")
tempFile, err := os.CreateTemp("", "rclone-press-")
defer func() {
// these errors should be relatively uncritical and the upload should've succeeded so it's okay-ish
// to ignore them
@@ -462,10 +475,10 @@ func (f *Fs) rcat(ctx context.Context, dstFileName string, in io.ReadCloser, mod
_ = os.Remove(tempFile.Name())
}()
if err != nil {
return nil, errors.Wrap(err, "Failed to create temporary local FS to spool file")
return nil, fmt.Errorf("failed to create temporary local FS to spool file: %w", err)
}
if _, err = io.Copy(tempFile, in); err != nil {
return nil, errors.Wrap(err, "Failed to write temporary local file")
return nil, fmt.Errorf("failed to write temporary local file: %w", err)
}
if _, err = tempFile.Seek(0, 0); err != nil {
return nil, err
@@ -532,8 +545,8 @@ func (f *Fs) putCompress(ctx context.Context, in io.Reader, src fs.ObjectInfo, o
}
// Transfer the data
o, err := f.rcat(ctx, makeDataName(src.Remote(), src.Size(), f.mode), ioutil.NopCloser(wrappedIn), src.ModTime(ctx), options)
//o, err := operations.Rcat(ctx, f.Fs, makeDataName(src.Remote(), src.Size(), f.mode), ioutil.NopCloser(wrappedIn), src.ModTime(ctx))
o, err := f.rcat(ctx, makeDataName(src.Remote(), src.Size(), f.mode), io.NopCloser(wrappedIn), src.ModTime(ctx), options)
//o, err := operations.Rcat(ctx, f.Fs, makeDataName(src.Remote(), src.Size(), f.mode), io.NopCloser(wrappedIn), src.ModTime(ctx))
if err != nil {
if o != nil {
removeErr := o.Remove(ctx)
@@ -626,9 +639,11 @@ func (f *Fs) putMetadata(ctx context.Context, meta *ObjectMetadata, src fs.Objec
// Put the data
mo, err = put(ctx, metaReader, f.wrapInfo(src, makeMetadataName(src.Remote()), int64(len(data))), options...)
if err != nil {
removeErr := mo.Remove(ctx)
if removeErr != nil {
fs.Errorf(mo, "Failed to remove partially transferred object: %v", err)
if mo != nil {
removeErr := mo.Remove(ctx)
if removeErr != nil {
fs.Errorf(mo, "Failed to remove partially transferred object: %v", err)
}
}
return nil, err
}
@@ -665,7 +680,7 @@ func (f *Fs) putWithCustomFunctions(ctx context.Context, in io.Reader, src fs.Ob
}
return nil, err
}
return f.newObject(dataObject, mo, meta), err
return f.newObject(dataObject, mo, meta), nil
}
// Put in to the remote path with the modTime given of the given size
@@ -714,23 +729,23 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
if found && (oldObj.(*Object).meta.Mode != Uncompressed || compressible) {
err = oldObj.(*Object).Object.Remove(ctx)
if err != nil {
return nil, errors.Wrap(err, "Could remove original object")
return nil, fmt.Errorf("couldn't remove original object: %w", err)
}
}
// If our new object is compressed we have to rename it with the correct size.
// Uncompressed objects don't store the size in the name so we they'll allready have the correct name.
// Uncompressed objects don't store the size in the name so we they'll already have the correct name.
if compressible {
wrapObj, err := operations.Move(ctx, f.Fs, nil, f.dataName(src.Remote(), newObj.size, compressible), newObj.Object)
if err != nil {
return nil, errors.Wrap(err, "Couldn't rename streamed Object.")
return nil, fmt.Errorf("couldn't rename streamed object: %w", err)
}
newObj.Object = wrapObj
}
return newObj, nil
}
// Temporarely disabled. There might be a way to implement this correctly but with the current handling metadata duplicate objects
// Temporarily disabled. There might be a way to implement this correctly but with the current handling metadata duplicate objects
// will break stuff. Right no I can't think of a way to make this work.
// PutUnchecked uploads the object
@@ -773,9 +788,9 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
// Copy src to this remote using server side copy operations.
//
// This is stored with the remote path given
// This is stored with the remote path given.
//
// It returns the destination Object and a possible error
// It returns the destination Object and a possible error.
//
// Will only be called if src.Fs().Name() == f.Name()
//
@@ -823,9 +838,9 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
// Move src to this remote using server side move operations.
//
// This is stored with the remote path given
// This is stored with the remote path given.
//
// It returns the destination Object and a possible error
// It returns the destination Object and a possible error.
//
// Will only be called if src.Fs().Name() == f.Name()
//
@@ -900,7 +915,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
func (f *Fs) CleanUp(ctx context.Context) error {
do := f.Fs.Features().CleanUp
if do == nil {
return errors.New("can't CleanUp: not supported by underlying remote")
return errors.New("not supported by underlying remote")
}
return do(ctx)
}
@@ -909,7 +924,7 @@ func (f *Fs) CleanUp(ctx context.Context) error {
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
do := f.Fs.Features().About
if do == nil {
return nil, errors.New("can't About: not supported by underlying remote")
return nil, errors.New("not supported by underlying remote")
}
return do(ctx)
}
@@ -1028,24 +1043,19 @@ func newMetadata(size int64, mode int, cmeta sgzip.GzipMetadata, md5 string, mim
}
// This function will read the metadata from a metadata object.
func readMetadata(ctx context.Context, mo fs.Object) (meta *ObjectMetadata) {
func readMetadata(ctx context.Context, mo fs.Object) (meta *ObjectMetadata, err error) {
// Open our meradata object
rc, err := mo.Open(ctx)
if err != nil {
return nil
return nil, err
}
defer func() {
err := rc.Close()
if err != nil {
fs.Errorf(mo, "Error closing object: %v", err)
}
}()
defer fs.CheckClose(rc, &err)
jr := json.NewDecoder(rc)
meta = new(ObjectMetadata)
if err = jr.Decode(meta); err != nil {
return nil
return nil, err
}
return meta
return meta, nil
}
// Remove removes this object
@@ -1090,6 +1100,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
origName := o.Remote()
if o.meta.Mode != Uncompressed || compressible {
newObject, err = o.f.putWithCustomFunctions(ctx, in, o.f.wrapInfo(src, origName, src.Size()), options, o.f.Fs.Put, updateMeta, compressible, mimeType)
if err != nil {
return err
}
if newObject.Object.Remote() != o.Object.Remote() {
if removeErr := o.Object.Remove(ctx); removeErr != nil {
return removeErr
@@ -1103,9 +1116,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
}
// If we are, just update the object and metadata
newObject, err = o.f.putWithCustomFunctions(ctx, in, src, options, update, updateMeta, compressible, mimeType)
}
if err != nil {
return err
if err != nil {
return err
}
}
// Update object metadata and return
o.Object = newObject.Object
@@ -1116,6 +1129,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// This will initialize the variables of a new press Object. The metadata object, mo, and metadata struct, meta, must be specified.
func (f *Fs) newObject(o fs.Object, mo fs.Object, meta *ObjectMetadata) *Object {
if o == nil {
log.Trace(nil, "newObject(%#v, %#v, %#v) called with nil o", o, mo, meta)
}
return &Object{
Object: o,
f: f,
@@ -1128,6 +1144,9 @@ func (f *Fs) newObject(o fs.Object, mo fs.Object, meta *ObjectMetadata) *Object
// This initializes the variables of a press Object with only the size. The metadata will be loaded later on demand.
func (f *Fs) newObjectSizeAndNameOnly(o fs.Object, moName string, size int64) *Object {
if o == nil {
log.Trace(nil, "newObjectSizeAndNameOnly(%#v, %#v, %#v) called with nil o", o, moName, size)
}
return &Object{
Object: o,
f: f,
@@ -1155,7 +1174,7 @@ func (o *Object) loadMetadataIfNotLoaded(ctx context.Context) (err error) {
return err
}
if o.meta == nil {
o.meta = readMetadata(ctx, o.mo)
o.meta, err = readMetadata(ctx, o.mo)
}
return err
}
@@ -1208,6 +1227,21 @@ func (o *Object) MimeType(ctx context.Context) string {
return o.meta.MimeType
}
// Metadata returns metadata for an object
//
// It should return nil if there is no Metadata
func (o *Object) Metadata(ctx context.Context) (fs.Metadata, error) {
err := o.loadMetadataIfNotLoaded(ctx)
if err != nil {
return nil, err
}
do, ok := o.mo.(fs.Metadataer)
if !ok {
return nil, nil
}
return do.Metadata(ctx)
}
// Hash returns the selected checksum of the file
// If no checksum is available it returns ""
func (o *Object) Hash(ctx context.Context, ht hash.Type) (string, error) {
@@ -1260,7 +1294,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.Read
return o.Object.Open(ctx, options...)
}
// Get offset and limit from OpenOptions, pass the rest to the underlying remote
var openOptions []fs.OpenOption = []fs.OpenOption{&fs.SeekOption{Offset: 0}}
var openOptions = []fs.OpenOption{&fs.SeekOption{Offset: 0}}
var offset, limit int64 = 0, -1
for _, option := range options {
switch x := option.(type) {
@@ -1354,6 +1388,51 @@ func (o *ObjectInfo) Hash(ctx context.Context, ht hash.Type) (string, error) {
return "", nil // cannot know the checksum
}
// ID returns the ID of the Object if known, or "" if not
func (o *ObjectInfo) ID() string {
do, ok := o.src.(fs.IDer)
if !ok {
return ""
}
return do.ID()
}
// MimeType returns the content type of the Object if
// known, or "" if not
func (o *ObjectInfo) MimeType(ctx context.Context) string {
do, ok := o.src.(fs.MimeTyper)
if !ok {
return ""
}
return do.MimeType(ctx)
}
// UnWrap returns the Object that this Object is wrapping or
// nil if it isn't wrapping anything
func (o *ObjectInfo) UnWrap() fs.Object {
return fs.UnWrapObjectInfo(o.src)
}
// Metadata returns metadata for an object
//
// It should return nil if there is no Metadata
func (o *ObjectInfo) Metadata(ctx context.Context) (fs.Metadata, error) {
do, ok := o.src.(fs.Metadataer)
if !ok {
return nil, nil
}
return do.Metadata(ctx)
}
// GetTier returns storage tier or class of the Object
func (o *ObjectInfo) GetTier() string {
do, ok := o.src.(fs.GetTierer)
if !ok {
return ""
}
return do.GetTier()
}
// ID returns the ID of the Object if known, or "" if not
func (o *Object) ID() string {
do, ok := o.Object.(fs.IDer)
@@ -1406,11 +1485,6 @@ var (
_ fs.ChangeNotifier = (*Fs)(nil)
_ fs.PublicLinker = (*Fs)(nil)
_ fs.Shutdowner = (*Fs)(nil)
_ fs.ObjectInfo = (*ObjectInfo)(nil)
_ fs.GetTierer = (*Object)(nil)
_ fs.SetTierer = (*Object)(nil)
_ fs.Object = (*Object)(nil)
_ fs.ObjectUnWrapper = (*Object)(nil)
_ fs.IDer = (*Object)(nil)
_ fs.MimeTyper = (*Object)(nil)
_ fs.FullObjectInfo = (*ObjectInfo)(nil)
_ fs.FullObject = (*Object)(nil)
)

View File

@@ -61,5 +61,6 @@ func TestRemoteGzip(t *testing.T) {
{Name: name, Key: "remote", Value: tempdir},
{Name: name, Key: "compression_mode", Value: "gzip"},
},
QuickTestOK: true,
})
}

View File

@@ -7,6 +7,8 @@ import (
gocipher "crypto/cipher"
"crypto/rand"
"encoding/base32"
"encoding/base64"
"errors"
"fmt"
"io"
"strconv"
@@ -15,7 +17,7 @@ import (
"time"
"unicode/utf8"
"github.com/pkg/errors"
"github.com/Max-Sum/base32768"
"github.com/rclone/rclone/backend/crypt/pkcs7"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
@@ -94,12 +96,12 @@ func NewNameEncryptionMode(s string) (mode NameEncryptionMode, err error) {
case "obfuscate":
mode = NameEncryptionObfuscated
default:
err = errors.Errorf("Unknown file name encryption mode %q", s)
err = fmt.Errorf("unknown file name encryption mode %q", s)
}
return mode, err
}
// String turns mode into a human readable string
// String turns mode into a human-readable string
func (mode NameEncryptionMode) String() (out string) {
switch mode {
case NameEncryptionOff:
@@ -114,6 +116,57 @@ func (mode NameEncryptionMode) String() (out string) {
return out
}
// fileNameEncoding are the encoding methods dealing with encrypted file names
type fileNameEncoding interface {
EncodeToString(src []byte) string
DecodeString(s string) ([]byte, error)
}
// caseInsensitiveBase32Encoding defines a file name encoding
// using a modified version of standard base32 as described in
// RFC4648
//
// The standard encoding is modified in two ways
// - it becomes lower case (no-one likes upper case filenames!)
// - we strip the padding character `=`
type caseInsensitiveBase32Encoding struct{}
// EncodeToString encodes a string using the modified version of
// base32 encoding.
func (caseInsensitiveBase32Encoding) EncodeToString(src []byte) string {
encoded := base32.HexEncoding.EncodeToString(src)
encoded = strings.TrimRight(encoded, "=")
return strings.ToLower(encoded)
}
// DecodeString decodes a string as encoded by EncodeToString
func (caseInsensitiveBase32Encoding) DecodeString(s string) ([]byte, error) {
if strings.HasSuffix(s, "=") {
return nil, ErrorBadBase32Encoding
}
// First figure out how many padding characters to add
roundUpToMultipleOf8 := (len(s) + 7) &^ 7
equals := roundUpToMultipleOf8 - len(s)
s = strings.ToUpper(s) + "========"[:equals]
return base32.HexEncoding.DecodeString(s)
}
// NewNameEncoding creates a NameEncoding from a string
func NewNameEncoding(s string) (enc fileNameEncoding, err error) {
s = strings.ToLower(s)
switch s {
case "base32":
enc = caseInsensitiveBase32Encoding{}
case "base64":
enc = base64.RawURLEncoding
case "base32768":
enc = base32768.SafeEncoding
default:
err = fmt.Errorf("unknown file name encoding mode %q", s)
}
return enc, err
}
// Cipher defines an encoding and decoding cipher for the crypt backend
type Cipher struct {
dataKey [32]byte // Key for secretbox
@@ -121,15 +174,17 @@ type Cipher struct {
nameTweak [nameCipherBlockSize]byte // used to tweak the name crypto
block gocipher.Block
mode NameEncryptionMode
fileNameEnc fileNameEncoding
buffers sync.Pool // encrypt/decrypt buffers
cryptoRand io.Reader // read crypto random numbers from here
dirNameEncrypt bool
}
// newCipher initialises the cipher. If salt is "" then it uses a built in salt val
func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bool) (*Cipher, error) {
func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bool, enc fileNameEncoding) (*Cipher, error) {
c := &Cipher{
mode: mode,
fileNameEnc: enc,
cryptoRand: rand.Reader,
dirNameEncrypt: dirNameEncrypt,
}
@@ -187,33 +242,9 @@ func (c *Cipher) putBlock(buf []byte) {
c.buffers.Put(buf)
}
// encodeFileName encodes a filename using a modified version of
// standard base32 as described in RFC4648
//
// The standard encoding is modified in two ways
// * it becomes lower case (no-one likes upper case filenames!)
// * we strip the padding character `=`
func encodeFileName(in []byte) string {
encoded := base32.HexEncoding.EncodeToString(in)
encoded = strings.TrimRight(encoded, "=")
return strings.ToLower(encoded)
}
// decodeFileName decodes a filename as encoded by encodeFileName
func decodeFileName(in string) ([]byte, error) {
if strings.HasSuffix(in, "=") {
return nil, ErrorBadBase32Encoding
}
// First figure out how many padding characters to add
roundUpToMultipleOf8 := (len(in) + 7) &^ 7
equals := roundUpToMultipleOf8 - len(in)
in = strings.ToUpper(in) + "========"[:equals]
return base32.HexEncoding.DecodeString(in)
}
// encryptSegment encrypts a path segment
//
// This uses EME with AES
// This uses EME with AES.
//
// EME (ECB-Mix-ECB) is a wide-block encryption mode presented in the
// 2003 paper "A Parallelizable Enciphering Mode" by Halevi and
@@ -223,15 +254,15 @@ func decodeFileName(in string) ([]byte, error) {
// same filename must encrypt to the same thing.
//
// This means that
// * filenames with the same name will encrypt the same
// * filenames which start the same won't have a common prefix
// - filenames with the same name will encrypt the same
// - filenames which start the same won't have a common prefix
func (c *Cipher) encryptSegment(plaintext string) string {
if plaintext == "" {
return ""
}
paddedPlaintext := pkcs7.Pad(nameCipherBlockSize, []byte(plaintext))
ciphertext := eme.Transform(c.block, c.nameTweak[:], paddedPlaintext, eme.DirectionEncrypt)
return encodeFileName(ciphertext)
return c.fileNameEnc.EncodeToString(ciphertext)
}
// decryptSegment decrypts a path segment
@@ -239,7 +270,7 @@ func (c *Cipher) decryptSegment(ciphertext string) (string, error) {
if ciphertext == "" {
return "", nil
}
rawCiphertext, err := decodeFileName(ciphertext)
rawCiphertext, err := c.fileNameEnc.DecodeString(ciphertext)
if err != nil {
return "", err
}
@@ -580,7 +611,7 @@ func (n *nonce) pointer() *[fileNonceSize]byte {
func (n *nonce) fromReader(in io.Reader) error {
read, err := io.ReadFull(in, (*n)[:])
if read != fileNonceSize {
return errors.Wrap(err, "short read of nonce")
return fmt.Errorf("short read of nonce: %w", err)
}
return nil
}
@@ -956,7 +987,7 @@ func (fh *decrypter) RangeSeek(ctx context.Context, offset int64, whence int, li
// Re-open the underlying object with the offset given
rc, err := fh.open(ctx, underlyingOffset, underlyingLimit)
if err != nil {
return 0, fh.finish(errors.Wrap(err, "couldn't reopen file with offset and limit"))
return 0, fh.finish(fmt.Errorf("couldn't reopen file with offset and limit: %w", err))
}
// Set the file handle
@@ -1054,7 +1085,7 @@ func (c *Cipher) DecryptData(rc io.ReadCloser) (io.ReadCloser, error) {
// DecryptDataSeek decrypts the data stream from offset
//
// The open function must return a ReadCloser opened to the offset supplied
// The open function must return a ReadCloser opened to the offset supplied.
//
// You must use this form of DecryptData if you might want to Seek the file handle
func (c *Cipher) DecryptDataSeek(ctx context.Context, open OpenRangeSeek, offset, limit int64) (ReadSeekCloser, error) {

View File

@@ -4,13 +4,14 @@ import (
"bytes"
"context"
"encoding/base32"
"encoding/base64"
"errors"
"fmt"
"io"
"io/ioutil"
"strings"
"testing"
"github.com/pkg/errors"
"github.com/Max-Sum/base32768"
"github.com/rclone/rclone/backend/crypt/pkcs7"
"github.com/rclone/rclone/lib/readers"
"github.com/stretchr/testify/assert"
@@ -45,11 +46,31 @@ func TestNewNameEncryptionModeString(t *testing.T) {
assert.Equal(t, NameEncryptionMode(3).String(), "Unknown mode #3")
}
func TestEncodeFileName(t *testing.T) {
for _, test := range []struct {
in string
expected string
}{
type EncodingTestCase struct {
in string
expected string
}
func testEncodeFileName(t *testing.T, encoding string, testCases []EncodingTestCase, caseInsensitive bool) {
for _, test := range testCases {
enc, err := NewNameEncoding(encoding)
assert.NoError(t, err, "There should be no error creating name encoder for base32.")
actual := enc.EncodeToString([]byte(test.in))
assert.Equal(t, actual, test.expected, fmt.Sprintf("in=%q", test.in))
recovered, err := enc.DecodeString(test.expected)
assert.NoError(t, err)
assert.Equal(t, string(recovered), test.in, fmt.Sprintf("reverse=%q", test.expected))
if caseInsensitive {
in := strings.ToUpper(test.expected)
recovered, err = enc.DecodeString(in)
assert.NoError(t, err)
assert.Equal(t, string(recovered), test.in, fmt.Sprintf("reverse=%q", in))
}
}
}
func TestEncodeFileNameBase32(t *testing.T) {
testEncodeFileName(t, "base32", []EncodingTestCase{
{"", ""},
{"1", "64"},
{"12", "64p0"},
@@ -67,20 +88,56 @@ func TestEncodeFileName(t *testing.T) {
{"12345678901234", "64p36d1l6orjge9g64p36d0"},
{"123456789012345", "64p36d1l6orjge9g64p36d1l"},
{"1234567890123456", "64p36d1l6orjge9g64p36d1l6o"},
} {
actual := encodeFileName([]byte(test.in))
assert.Equal(t, actual, test.expected, fmt.Sprintf("in=%q", test.in))
recovered, err := decodeFileName(test.expected)
assert.NoError(t, err)
assert.Equal(t, string(recovered), test.in, fmt.Sprintf("reverse=%q", test.expected))
in := strings.ToUpper(test.expected)
recovered, err = decodeFileName(in)
assert.NoError(t, err)
assert.Equal(t, string(recovered), test.in, fmt.Sprintf("reverse=%q", in))
}
}, true)
}
func TestDecodeFileName(t *testing.T) {
func TestEncodeFileNameBase64(t *testing.T) {
testEncodeFileName(t, "base64", []EncodingTestCase{
{"", ""},
{"1", "MQ"},
{"12", "MTI"},
{"123", "MTIz"},
{"1234", "MTIzNA"},
{"12345", "MTIzNDU"},
{"123456", "MTIzNDU2"},
{"1234567", "MTIzNDU2Nw"},
{"12345678", "MTIzNDU2Nzg"},
{"123456789", "MTIzNDU2Nzg5"},
{"1234567890", "MTIzNDU2Nzg5MA"},
{"12345678901", "MTIzNDU2Nzg5MDE"},
{"123456789012", "MTIzNDU2Nzg5MDEy"},
{"1234567890123", "MTIzNDU2Nzg5MDEyMw"},
{"12345678901234", "MTIzNDU2Nzg5MDEyMzQ"},
{"123456789012345", "MTIzNDU2Nzg5MDEyMzQ1"},
{"1234567890123456", "MTIzNDU2Nzg5MDEyMzQ1Ng"},
}, false)
}
func TestEncodeFileNameBase32768(t *testing.T) {
testEncodeFileName(t, "base32768", []EncodingTestCase{
{"", ""},
{"1", "㼿"},
{"12", "㻙ɟ"},
{"123", "㻙ⲿ"},
{"1234", "㻙ⲍƟ"},
{"12345", "㻙ⲍ⍟"},
{"123456", "㻙ⲍ⍆ʏ"},
{"1234567", "㻙ⲍ⍆觟"},
{"12345678", "㻙ⲍ⍆觓ɧ"},
{"123456789", "㻙ⲍ⍆觓栯"},
{"1234567890", "㻙ⲍ⍆觓栩ɣ"},
{"12345678901", "㻙ⲍ⍆觓栩朧"},
{"123456789012", "㻙ⲍ⍆觓栩朤ʅ"},
{"1234567890123", "㻙ⲍ⍆觓栩朤談"},
{"12345678901234", "㻙ⲍ⍆觓栩朤諆ɔ"},
{"123456789012345", "㻙ⲍ⍆觓栩朤諆媕"},
{"1234567890123456", "㻙ⲍ⍆觓栩朤諆媕䆿"},
}, false)
}
func TestDecodeFileNameBase32(t *testing.T) {
enc, err := NewNameEncoding("base32")
assert.NoError(t, err, "There should be no error creating name encoder for base32.")
// We've tested decoding the valid ones above, now concentrate on the invalid ones
for _, test := range []struct {
in string
@@ -90,17 +147,65 @@ func TestDecodeFileName(t *testing.T) {
{"!", base32.CorruptInputError(0)},
{"hello=hello", base32.CorruptInputError(5)},
} {
actual, actualErr := decodeFileName(test.in)
actual, actualErr := enc.DecodeString(test.in)
assert.Equal(t, test.expectedErr, actualErr, fmt.Sprintf("in=%q got actual=%q, err = %v %T", test.in, actual, actualErr, actualErr))
}
}
func TestEncryptSegment(t *testing.T) {
c, _ := newCipher(NameEncryptionStandard, "", "", true)
func TestDecodeFileNameBase64(t *testing.T) {
enc, err := NewNameEncoding("base64")
assert.NoError(t, err, "There should be no error creating name encoder for base32.")
// We've tested decoding the valid ones above, now concentrate on the invalid ones
for _, test := range []struct {
in string
expected string
in string
expectedErr error
}{
{"64=", base64.CorruptInputError(2)},
{"!", base64.CorruptInputError(0)},
{"Hello=Hello", base64.CorruptInputError(5)},
} {
actual, actualErr := enc.DecodeString(test.in)
assert.Equal(t, test.expectedErr, actualErr, fmt.Sprintf("in=%q got actual=%q, err = %v %T", test.in, actual, actualErr, actualErr))
}
}
func TestDecodeFileNameBase32768(t *testing.T) {
enc, err := NewNameEncoding("base32768")
assert.NoError(t, err, "There should be no error creating name encoder for base32.")
// We've tested decoding the valid ones above, now concentrate on the invalid ones
for _, test := range []struct {
in string
expectedErr error
}{
{"㼿c", base32768.CorruptInputError(1)},
{"!", base32768.CorruptInputError(0)},
{"㻙ⲿ=㻙ⲿ", base32768.CorruptInputError(2)},
} {
actual, actualErr := enc.DecodeString(test.in)
assert.Equal(t, test.expectedErr, actualErr, fmt.Sprintf("in=%q got actual=%q, err = %v %T", test.in, actual, actualErr, actualErr))
}
}
func testEncryptSegment(t *testing.T, encoding string, testCases []EncodingTestCase, caseInsensitive bool) {
enc, _ := NewNameEncoding(encoding)
c, _ := newCipher(NameEncryptionStandard, "", "", true, enc)
for _, test := range testCases {
actual := c.encryptSegment(test.in)
assert.Equal(t, test.expected, actual, fmt.Sprintf("Testing %q", test.in))
recovered, err := c.decryptSegment(test.expected)
assert.NoError(t, err, fmt.Sprintf("Testing reverse %q", test.expected))
assert.Equal(t, test.in, recovered, fmt.Sprintf("Testing reverse %q", test.expected))
if caseInsensitive {
in := strings.ToUpper(test.expected)
recovered, err = c.decryptSegment(in)
assert.NoError(t, err, fmt.Sprintf("Testing reverse %q", in))
assert.Equal(t, test.in, recovered, fmt.Sprintf("Testing reverse %q", in))
}
}
}
func TestEncryptSegmentBase32(t *testing.T) {
testEncryptSegment(t, "base32", []EncodingTestCase{
{"", ""},
{"1", "p0e52nreeaj0a5ea7s64m4j72s"},
{"12", "l42g6771hnv3an9cgc8cr2n1ng"},
@@ -118,26 +223,61 @@ func TestEncryptSegment(t *testing.T) {
{"12345678901234", "moq0uqdlqrblrc5pa5u5c7hq9g"},
{"123456789012345", "eeam3li4rnommi3a762h5n7meg"},
{"1234567890123456", "mijbj0frqf6ms7frcr6bd9h0env53jv96pjaaoirk7forcgpt70g"},
} {
actual := c.encryptSegment(test.in)
assert.Equal(t, test.expected, actual, fmt.Sprintf("Testing %q", test.in))
recovered, err := c.decryptSegment(test.expected)
assert.NoError(t, err, fmt.Sprintf("Testing reverse %q", test.expected))
assert.Equal(t, test.in, recovered, fmt.Sprintf("Testing reverse %q", test.expected))
in := strings.ToUpper(test.expected)
recovered, err = c.decryptSegment(in)
assert.NoError(t, err, fmt.Sprintf("Testing reverse %q", in))
assert.Equal(t, test.in, recovered, fmt.Sprintf("Testing reverse %q", in))
}
}, true)
}
func TestDecryptSegment(t *testing.T) {
func TestEncryptSegmentBase64(t *testing.T) {
testEncryptSegment(t, "base64", []EncodingTestCase{
{"", ""},
{"1", "yBxRX25ypgUVyj8MSxJnFw"},
{"12", "qQUDHOGN_jVdLIMQzYrhvA"},
{"123", "1CxFf2Mti1xIPYlGruDh-A"},
{"1234", "RL-xOTmsxsG7kuTy2XJUxw"},
{"12345", "3FP_GHoeBJdq0yLgaED8IQ"},
{"123456", "Xc4T1Gqrs3OVYnrE6dpEWQ"},
{"1234567", "uZeEzssOnDWHEOzLqjwpog"},
{"12345678", "8noiTP5WkkbEuijsPhOpxQ"},
{"123456789", "GeNxgLA0wiaGAKU3U7qL4Q"},
{"1234567890", "x1DUhdmqoVWYVBLD3dha-A"},
{"12345678901", "iEyP_3BZR6vvv_2WM6NbZw"},
{"123456789012", "4OPGvS4SZdjvS568APUaFw"},
{"1234567890123", "Y8c5Wr8OhYYUo7fPwdojdg"},
{"12345678901234", "tjQPabXW112wuVF8Vh46TA"},
{"123456789012345", "c5Vh1kTd8WtIajmFEtz2dA"},
{"1234567890123456", "tKa5gfvTzW4d-2bMtqYgdf5Rz-k2ZqViW6HfjbIZ6cE"},
}, false)
}
func TestEncryptSegmentBase32768(t *testing.T) {
testEncryptSegment(t, "base32768", []EncodingTestCase{
{"", ""},
{"1", "詮㪗鐮僀伎作㻖㢧⪟"},
{"12", "竢朧䉱虃光塬䟛⣡蓟"},
{"123", "遶㞟鋅缕袡鲅ⵝ蝁ꌟ"},
{"1234", "䢟銮䵵狌㐜燳谒颴詟"},
{"12345", "钉Ꞇ㖃蚩憶狫朰杜㜿"},
{"123456", "啇ᚵⵕ憗䋫➫➓肤卟"},
{"1234567", "茫螓翁連劘樓㶔抉矟"},
{"12345678", "龝☳䘊辄岅較络㧩襟"},
{"123456789", "ⲱ苀㱆犂媐Ꮤ锇惫靟"},
{"1234567890", "計宁憕偵匢皫╛纺ꌟ"},
{"12345678901", "檆䨿鑫㪺藝ꡖ勇䦛婟"},
{"123456789012", "雑頏䰂䲝淚哚鹡魺⪟"},
{"1234567890123", "塃璶繁躸圅㔟䗃肃懟"},
{"12345678901234", "腺ᕚ崚鏕鏥讥鼌䑺䲿"},
{"123456789012345", "怪绕滻蕶肣但⠥荖惟"},
{"1234567890123456", "肳哀旚挶靏鏻㾭䱠慟㪳ꏆ賊兲铧敻塹魀ʟ"},
}, false)
}
func TestDecryptSegmentBase32(t *testing.T) {
// We've tested the forwards above, now concentrate on the errors
longName := make([]byte, 3328)
for i := range longName {
longName[i] = 'a'
}
c, _ := newCipher(NameEncryptionStandard, "", "", true)
enc, _ := NewNameEncoding("base32")
c, _ := newCipher(NameEncryptionStandard, "", "", true, enc)
for _, test := range []struct {
in string
expectedErr error
@@ -145,118 +285,371 @@ func TestDecryptSegment(t *testing.T) {
{"64=", ErrorBadBase32Encoding},
{"!", base32.CorruptInputError(0)},
{string(longName), ErrorTooLongAfterDecode},
{encodeFileName([]byte("a")), ErrorNotAMultipleOfBlocksize},
{encodeFileName([]byte("123456789abcdef")), ErrorNotAMultipleOfBlocksize},
{encodeFileName([]byte("123456789abcdef0")), pkcs7.ErrorPaddingTooLong},
{enc.EncodeToString([]byte("a")), ErrorNotAMultipleOfBlocksize},
{enc.EncodeToString([]byte("123456789abcdef")), ErrorNotAMultipleOfBlocksize},
{enc.EncodeToString([]byte("123456789abcdef0")), pkcs7.ErrorPaddingTooLong},
} {
actual, actualErr := c.decryptSegment(test.in)
assert.Equal(t, test.expectedErr, actualErr, fmt.Sprintf("in=%q got actual=%q, err = %v %T", test.in, actual, actualErr, actualErr))
}
}
func TestEncryptFileName(t *testing.T) {
func TestDecryptSegmentBase64(t *testing.T) {
// We've tested the forwards above, now concentrate on the errors
longName := make([]byte, 2816)
for i := range longName {
longName[i] = 'a'
}
enc, _ := NewNameEncoding("base64")
c, _ := newCipher(NameEncryptionStandard, "", "", true, enc)
for _, test := range []struct {
in string
expectedErr error
}{
{"6H=", base64.CorruptInputError(2)},
{"!", base64.CorruptInputError(0)},
{string(longName), ErrorTooLongAfterDecode},
{enc.EncodeToString([]byte("a")), ErrorNotAMultipleOfBlocksize},
{enc.EncodeToString([]byte("123456789abcdef")), ErrorNotAMultipleOfBlocksize},
{enc.EncodeToString([]byte("123456789abcdef0")), pkcs7.ErrorPaddingTooLong},
} {
actual, actualErr := c.decryptSegment(test.in)
assert.Equal(t, test.expectedErr, actualErr, fmt.Sprintf("in=%q got actual=%q, err = %v %T", test.in, actual, actualErr, actualErr))
}
}
func TestDecryptSegmentBase32768(t *testing.T) {
// We've tested the forwards above, now concentrate on the errors
longName := strings.Repeat("怪", 1280)
enc, _ := NewNameEncoding("base32768")
c, _ := newCipher(NameEncryptionStandard, "", "", true, enc)
for _, test := range []struct {
in string
expectedErr error
}{
{"怪=", base32768.CorruptInputError(1)},
{"!", base32768.CorruptInputError(0)},
{longName, ErrorTooLongAfterDecode},
{enc.EncodeToString([]byte("a")), ErrorNotAMultipleOfBlocksize},
{enc.EncodeToString([]byte("123456789abcdef")), ErrorNotAMultipleOfBlocksize},
{enc.EncodeToString([]byte("123456789abcdef0")), pkcs7.ErrorPaddingTooLong},
} {
actual, actualErr := c.decryptSegment(test.in)
assert.Equal(t, test.expectedErr, actualErr, fmt.Sprintf("in=%q got actual=%q, err = %v %T", test.in, actual, actualErr, actualErr))
}
}
func testStandardEncryptFileName(t *testing.T, encoding string, testCasesEncryptDir []EncodingTestCase, testCasesNoEncryptDir []EncodingTestCase) {
// First standard mode
c, _ := newCipher(NameEncryptionStandard, "", "", true)
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s", c.EncryptFileName("1"))
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng", c.EncryptFileName("1/12"))
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng/qgm4avr35m5loi1th53ato71v0", c.EncryptFileName("1/12/123"))
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s-v2001-02-03-040506-123", c.EncryptFileName("1-v2001-02-03-040506-123"))
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng-v2001-02-03-040506-123", c.EncryptFileName("1/12-v2001-02-03-040506-123"))
enc, _ := NewNameEncoding(encoding)
c, _ := newCipher(NameEncryptionStandard, "", "", true, enc)
for _, test := range testCasesEncryptDir {
assert.Equal(t, test.expected, c.EncryptFileName(test.in))
}
// Standard mode with directory name encryption off
c, _ = newCipher(NameEncryptionStandard, "", "", false)
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s", c.EncryptFileName("1"))
assert.Equal(t, "1/l42g6771hnv3an9cgc8cr2n1ng", c.EncryptFileName("1/12"))
assert.Equal(t, "1/12/qgm4avr35m5loi1th53ato71v0", c.EncryptFileName("1/12/123"))
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s-v2001-02-03-040506-123", c.EncryptFileName("1-v2001-02-03-040506-123"))
assert.Equal(t, "1/l42g6771hnv3an9cgc8cr2n1ng-v2001-02-03-040506-123", c.EncryptFileName("1/12-v2001-02-03-040506-123"))
// Now off mode
c, _ = newCipher(NameEncryptionOff, "", "", true)
c, _ = newCipher(NameEncryptionStandard, "", "", false, enc)
for _, test := range testCasesNoEncryptDir {
assert.Equal(t, test.expected, c.EncryptFileName(test.in))
}
}
func TestStandardEncryptFileNameBase32(t *testing.T) {
testStandardEncryptFileName(t, "base32", []EncodingTestCase{
{"1", "p0e52nreeaj0a5ea7s64m4j72s"},
{"1/12", "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng"},
{"1/12/123", "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng/qgm4avr35m5loi1th53ato71v0"},
{"1-v2001-02-03-040506-123", "p0e52nreeaj0a5ea7s64m4j72s-v2001-02-03-040506-123"},
{"1/12-v2001-02-03-040506-123", "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng-v2001-02-03-040506-123"},
}, []EncodingTestCase{
{"1", "p0e52nreeaj0a5ea7s64m4j72s"},
{"1/12", "1/l42g6771hnv3an9cgc8cr2n1ng"},
{"1/12/123", "1/12/qgm4avr35m5loi1th53ato71v0"},
{"1-v2001-02-03-040506-123", "p0e52nreeaj0a5ea7s64m4j72s-v2001-02-03-040506-123"},
{"1/12-v2001-02-03-040506-123", "1/l42g6771hnv3an9cgc8cr2n1ng-v2001-02-03-040506-123"},
})
}
func TestStandardEncryptFileNameBase64(t *testing.T) {
testStandardEncryptFileName(t, "base64", []EncodingTestCase{
{"1", "yBxRX25ypgUVyj8MSxJnFw"},
{"1/12", "yBxRX25ypgUVyj8MSxJnFw/qQUDHOGN_jVdLIMQzYrhvA"},
{"1/12/123", "yBxRX25ypgUVyj8MSxJnFw/qQUDHOGN_jVdLIMQzYrhvA/1CxFf2Mti1xIPYlGruDh-A"},
{"1-v2001-02-03-040506-123", "yBxRX25ypgUVyj8MSxJnFw-v2001-02-03-040506-123"},
{"1/12-v2001-02-03-040506-123", "yBxRX25ypgUVyj8MSxJnFw/qQUDHOGN_jVdLIMQzYrhvA-v2001-02-03-040506-123"},
}, []EncodingTestCase{
{"1", "yBxRX25ypgUVyj8MSxJnFw"},
{"1/12", "1/qQUDHOGN_jVdLIMQzYrhvA"},
{"1/12/123", "1/12/1CxFf2Mti1xIPYlGruDh-A"},
{"1-v2001-02-03-040506-123", "yBxRX25ypgUVyj8MSxJnFw-v2001-02-03-040506-123"},
{"1/12-v2001-02-03-040506-123", "1/qQUDHOGN_jVdLIMQzYrhvA-v2001-02-03-040506-123"},
})
}
func TestStandardEncryptFileNameBase32768(t *testing.T) {
testStandardEncryptFileName(t, "base32768", []EncodingTestCase{
{"1", "詮㪗鐮僀伎作㻖㢧⪟"},
{"1/12", "詮㪗鐮僀伎作㻖㢧⪟/竢朧䉱虃光塬䟛⣡蓟"},
{"1/12/123", "詮㪗鐮僀伎作㻖㢧⪟/竢朧䉱虃光塬䟛⣡蓟/遶㞟鋅缕袡鲅ⵝ蝁ꌟ"},
{"1-v2001-02-03-040506-123", "詮㪗鐮僀伎作㻖㢧⪟-v2001-02-03-040506-123"},
{"1/12-v2001-02-03-040506-123", "詮㪗鐮僀伎作㻖㢧⪟/竢朧䉱虃光塬䟛⣡蓟-v2001-02-03-040506-123"},
}, []EncodingTestCase{
{"1", "詮㪗鐮僀伎作㻖㢧⪟"},
{"1/12", "1/竢朧䉱虃光塬䟛⣡蓟"},
{"1/12/123", "1/12/遶㞟鋅缕袡鲅ⵝ蝁ꌟ"},
{"1-v2001-02-03-040506-123", "詮㪗鐮僀伎作㻖㢧⪟-v2001-02-03-040506-123"},
{"1/12-v2001-02-03-040506-123", "1/竢朧䉱虃光塬䟛⣡蓟-v2001-02-03-040506-123"},
})
}
func TestNonStandardEncryptFileName(t *testing.T) {
// Off mode
c, _ := newCipher(NameEncryptionOff, "", "", true, nil)
assert.Equal(t, "1/12/123.bin", c.EncryptFileName("1/12/123"))
// Obfuscation mode
c, _ = newCipher(NameEncryptionObfuscated, "", "", true)
c, _ = newCipher(NameEncryptionObfuscated, "", "", true, nil)
assert.Equal(t, "49.6/99.23/150.890/53.!!lipps", c.EncryptFileName("1/12/123/!hello"))
assert.Equal(t, "49.6/99.23/150.890/53-v2001-02-03-040506-123.!!lipps", c.EncryptFileName("1/12/123/!hello-v2001-02-03-040506-123"))
assert.Equal(t, "49.6/99.23/150.890/162.uryyB-v2001-02-03-040506-123.GKG", c.EncryptFileName("1/12/123/hello-v2001-02-03-040506-123.txt"))
assert.Equal(t, "161.\u00e4", c.EncryptFileName("\u00a1"))
assert.Equal(t, "160.\u03c2", c.EncryptFileName("\u03a0"))
// Obfuscation mode with directory name encryption off
c, _ = newCipher(NameEncryptionObfuscated, "", "", false)
c, _ = newCipher(NameEncryptionObfuscated, "", "", false, nil)
assert.Equal(t, "1/12/123/53.!!lipps", c.EncryptFileName("1/12/123/!hello"))
assert.Equal(t, "1/12/123/53-v2001-02-03-040506-123.!!lipps", c.EncryptFileName("1/12/123/!hello-v2001-02-03-040506-123"))
assert.Equal(t, "161.\u00e4", c.EncryptFileName("\u00a1"))
assert.Equal(t, "160.\u03c2", c.EncryptFileName("\u03a0"))
}
func TestDecryptFileName(t *testing.T) {
for _, test := range []struct {
mode NameEncryptionMode
dirNameEncrypt bool
in string
expected string
expectedErr error
}{
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s", "1", nil},
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng", "1/12", nil},
{NameEncryptionStandard, true, "p0e52nreeAJ0A5EA7S64M4J72S/L42G6771HNv3an9cgc8cr2n1ng", "1/12", nil},
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng/qgm4avr35m5loi1th53ato71v0", "1/12/123", nil},
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1/qgm4avr35m5loi1th53ato71v0", "", ErrorNotAMultipleOfBlocksize},
{NameEncryptionStandard, false, "1/12/qgm4avr35m5loi1th53ato71v0", "1/12/123", nil},
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s-v2001-02-03-040506-123", "1-v2001-02-03-040506-123", nil},
{NameEncryptionOff, true, "1/12/123.bin", "1/12/123", nil},
{NameEncryptionOff, true, "1/12/123.bix", "", ErrorNotAnEncryptedFile},
{NameEncryptionOff, true, ".bin", "", ErrorNotAnEncryptedFile},
{NameEncryptionOff, true, "1/12/123-v2001-02-03-040506-123.bin", "1/12/123-v2001-02-03-040506-123", nil},
{NameEncryptionOff, true, "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.bin", "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123", nil},
{NameEncryptionOff, true, "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.txt.bin", "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.txt", nil},
{NameEncryptionObfuscated, true, "!.hello", "hello", nil},
{NameEncryptionObfuscated, true, "hello", "", ErrorNotAnEncryptedFile},
{NameEncryptionObfuscated, true, "161.\u00e4", "\u00a1", nil},
{NameEncryptionObfuscated, true, "160.\u03c2", "\u03a0", nil},
{NameEncryptionObfuscated, false, "1/12/123/53.!!lipps", "1/12/123/!hello", nil},
{NameEncryptionObfuscated, false, "1/12/123/53-v2001-02-03-040506-123.!!lipps", "1/12/123/!hello-v2001-02-03-040506-123", nil},
} {
c, _ := newCipher(test.mode, "", "", test.dirNameEncrypt)
func testStandardDecryptFileName(t *testing.T, encoding string, testCases []EncodingTestCase, caseInsensitive bool) {
enc, _ := NewNameEncoding(encoding)
for _, test := range testCases {
// Test when dirNameEncrypt=true
c, _ := newCipher(NameEncryptionStandard, "", "", true, enc)
actual, actualErr := c.DecryptFileName(test.in)
what := fmt.Sprintf("Testing %q (mode=%v)", test.in, test.mode)
assert.Equal(t, test.expected, actual, what)
assert.Equal(t, test.expectedErr, actualErr, what)
assert.NoError(t, actualErr)
assert.Equal(t, test.expected, actual)
if caseInsensitive {
c, _ := newCipher(NameEncryptionStandard, "", "", true, enc)
actual, actualErr := c.DecryptFileName(strings.ToUpper(test.in))
assert.NoError(t, actualErr)
assert.Equal(t, test.expected, actual)
}
// Add a character should raise ErrorNotAMultipleOfBlocksize
actual, actualErr = c.DecryptFileName(enc.EncodeToString([]byte("1")) + test.in)
assert.Equal(t, ErrorNotAMultipleOfBlocksize, actualErr)
assert.Equal(t, "", actual)
// Test when dirNameEncrypt=false
noDirEncryptIn := test.in
if strings.LastIndex(test.expected, "/") != -1 {
noDirEncryptIn = test.expected[:strings.LastIndex(test.expected, "/")] + test.in[strings.LastIndex(test.in, "/"):]
}
c, _ = newCipher(NameEncryptionStandard, "", "", false, enc)
actual, actualErr = c.DecryptFileName(noDirEncryptIn)
assert.NoError(t, actualErr)
assert.Equal(t, test.expected, actual)
}
}
func TestStandardDecryptFileNameBase32(t *testing.T) {
testStandardDecryptFileName(t, "base32", []EncodingTestCase{
{"p0e52nreeaj0a5ea7s64m4j72s", "1"},
{"p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng", "1/12"},
{"p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng/qgm4avr35m5loi1th53ato71v0", "1/12/123"},
}, true)
}
func TestStandardDecryptFileNameBase64(t *testing.T) {
testStandardDecryptFileName(t, "base64", []EncodingTestCase{
{"yBxRX25ypgUVyj8MSxJnFw", "1"},
{"yBxRX25ypgUVyj8MSxJnFw/qQUDHOGN_jVdLIMQzYrhvA", "1/12"},
{"yBxRX25ypgUVyj8MSxJnFw/qQUDHOGN_jVdLIMQzYrhvA/1CxFf2Mti1xIPYlGruDh-A", "1/12/123"},
}, false)
}
func TestStandardDecryptFileNameBase32768(t *testing.T) {
testStandardDecryptFileName(t, "base32768", []EncodingTestCase{
{"詮㪗鐮僀伎作㻖㢧⪟", "1"},
{"詮㪗鐮僀伎作㻖㢧⪟/竢朧䉱虃光塬䟛⣡蓟", "1/12"},
{"詮㪗鐮僀伎作㻖㢧⪟/竢朧䉱虃光塬䟛⣡蓟/遶㞟鋅缕袡鲅ⵝ蝁ꌟ", "1/12/123"},
}, false)
}
func TestNonStandardDecryptFileName(t *testing.T) {
for _, encoding := range []string{"base32", "base64", "base32768"} {
enc, _ := NewNameEncoding(encoding)
for _, test := range []struct {
mode NameEncryptionMode
dirNameEncrypt bool
in string
expected string
expectedErr error
}{
{NameEncryptionOff, true, "1/12/123.bin", "1/12/123", nil},
{NameEncryptionOff, true, "1/12/123.bix", "", ErrorNotAnEncryptedFile},
{NameEncryptionOff, true, ".bin", "", ErrorNotAnEncryptedFile},
{NameEncryptionOff, true, "1/12/123-v2001-02-03-040506-123.bin", "1/12/123-v2001-02-03-040506-123", nil},
{NameEncryptionOff, true, "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.bin", "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123", nil},
{NameEncryptionOff, true, "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.txt.bin", "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.txt", nil},
{NameEncryptionObfuscated, true, "!.hello", "hello", nil},
{NameEncryptionObfuscated, true, "hello", "", ErrorNotAnEncryptedFile},
{NameEncryptionObfuscated, true, "161.\u00e4", "\u00a1", nil},
{NameEncryptionObfuscated, true, "160.\u03c2", "\u03a0", nil},
{NameEncryptionObfuscated, false, "1/12/123/53.!!lipps", "1/12/123/!hello", nil},
{NameEncryptionObfuscated, false, "1/12/123/53-v2001-02-03-040506-123.!!lipps", "1/12/123/!hello-v2001-02-03-040506-123", nil},
} {
c, _ := newCipher(test.mode, "", "", test.dirNameEncrypt, enc)
actual, actualErr := c.DecryptFileName(test.in)
what := fmt.Sprintf("Testing %q (mode=%v)", test.in, test.mode)
assert.Equal(t, test.expected, actual, what)
assert.Equal(t, test.expectedErr, actualErr, what)
}
}
}
func TestEncDecMatches(t *testing.T) {
for _, test := range []struct {
mode NameEncryptionMode
in string
}{
{NameEncryptionStandard, "1/2/3/4"},
{NameEncryptionOff, "1/2/3/4"},
{NameEncryptionObfuscated, "1/2/3/4/!hello\u03a0"},
{NameEncryptionObfuscated, "Avatar The Last Airbender"},
} {
c, _ := newCipher(test.mode, "", "", true)
out, err := c.DecryptFileName(c.EncryptFileName(test.in))
what := fmt.Sprintf("Testing %q (mode=%v)", test.in, test.mode)
assert.Equal(t, out, test.in, what)
assert.Equal(t, err, nil, what)
for _, encoding := range []string{"base32", "base64", "base32768"} {
enc, _ := NewNameEncoding(encoding)
for _, test := range []struct {
mode NameEncryptionMode
in string
}{
{NameEncryptionStandard, "1/2/3/4"},
{NameEncryptionOff, "1/2/3/4"},
{NameEncryptionObfuscated, "1/2/3/4/!hello\u03a0"},
{NameEncryptionObfuscated, "Avatar The Last Airbender"},
} {
c, _ := newCipher(test.mode, "", "", true, enc)
out, err := c.DecryptFileName(c.EncryptFileName(test.in))
what := fmt.Sprintf("Testing %q (mode=%v)", test.in, test.mode)
assert.Equal(t, out, test.in, what)
assert.Equal(t, err, nil, what)
}
}
}
func TestEncryptDirName(t *testing.T) {
func testStandardEncryptDirName(t *testing.T, encoding string, testCases []EncodingTestCase) {
enc, _ := NewNameEncoding(encoding)
c, _ := newCipher(NameEncryptionStandard, "", "", true, enc)
// First standard mode
c, _ := newCipher(NameEncryptionStandard, "", "", true)
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s", c.EncryptDirName("1"))
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng", c.EncryptDirName("1/12"))
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng/qgm4avr35m5loi1th53ato71v0", c.EncryptDirName("1/12/123"))
// Standard mode with dir name encryption off
c, _ = newCipher(NameEncryptionStandard, "", "", false)
assert.Equal(t, "1/12", c.EncryptDirName("1/12"))
assert.Equal(t, "1/12/123", c.EncryptDirName("1/12/123"))
// Now off mode
c, _ = newCipher(NameEncryptionOff, "", "", true)
assert.Equal(t, "1/12/123", c.EncryptDirName("1/12/123"))
for _, test := range testCases {
assert.Equal(t, test.expected, c.EncryptDirName(test.in))
}
}
func TestDecryptDirName(t *testing.T) {
func TestStandardEncryptDirNameBase32(t *testing.T) {
testStandardEncryptDirName(t, "base32", []EncodingTestCase{
{"1", "p0e52nreeaj0a5ea7s64m4j72s"},
{"1/12", "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng"},
{"1/12/123", "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng/qgm4avr35m5loi1th53ato71v0"},
})
}
func TestStandardEncryptDirNameBase64(t *testing.T) {
testStandardEncryptDirName(t, "base64", []EncodingTestCase{
{"1", "yBxRX25ypgUVyj8MSxJnFw"},
{"1/12", "yBxRX25ypgUVyj8MSxJnFw/qQUDHOGN_jVdLIMQzYrhvA"},
{"1/12/123", "yBxRX25ypgUVyj8MSxJnFw/qQUDHOGN_jVdLIMQzYrhvA/1CxFf2Mti1xIPYlGruDh-A"},
})
}
func TestStandardEncryptDirNameBase32768(t *testing.T) {
testStandardEncryptDirName(t, "base32768", []EncodingTestCase{
{"1", "詮㪗鐮僀伎作㻖㢧⪟"},
{"1/12", "詮㪗鐮僀伎作㻖㢧⪟/竢朧䉱虃光塬䟛⣡蓟"},
{"1/12/123", "詮㪗鐮僀伎作㻖㢧⪟/竢朧䉱虃光塬䟛⣡蓟/遶㞟鋅缕袡鲅ⵝ蝁ꌟ"},
})
}
func TestNonStandardEncryptDirName(t *testing.T) {
for _, encoding := range []string{"base32", "base64", "base32768"} {
enc, _ := NewNameEncoding(encoding)
c, _ := newCipher(NameEncryptionStandard, "", "", false, enc)
assert.Equal(t, "1/12", c.EncryptDirName("1/12"))
assert.Equal(t, "1/12/123", c.EncryptDirName("1/12/123"))
// Now off mode
c, _ = newCipher(NameEncryptionOff, "", "", true, enc)
assert.Equal(t, "1/12/123", c.EncryptDirName("1/12/123"))
}
}
func testStandardDecryptDirName(t *testing.T, encoding string, testCases []EncodingTestCase, caseInsensitive bool) {
enc, _ := NewNameEncoding(encoding)
for _, test := range testCases {
// Test dirNameEncrypt=true
c, _ := newCipher(NameEncryptionStandard, "", "", true, enc)
actual, actualErr := c.DecryptDirName(test.in)
assert.Equal(t, test.expected, actual)
assert.NoError(t, actualErr)
if caseInsensitive {
actual, actualErr := c.DecryptDirName(strings.ToUpper(test.in))
assert.Equal(t, actual, test.expected)
assert.NoError(t, actualErr)
}
actual, actualErr = c.DecryptDirName(enc.EncodeToString([]byte("1")) + test.in)
assert.Equal(t, "", actual)
assert.Equal(t, ErrorNotAMultipleOfBlocksize, actualErr)
// Test dirNameEncrypt=false
c, _ = newCipher(NameEncryptionStandard, "", "", false, enc)
actual, actualErr = c.DecryptDirName(test.in)
assert.Equal(t, test.in, actual)
assert.NoError(t, actualErr)
actual, actualErr = c.DecryptDirName(test.expected)
assert.Equal(t, test.expected, actual)
assert.NoError(t, actualErr)
// Test dirNameEncrypt=false
}
}
/*
enc, _ := NewNameEncoding(encoding)
for _, test := range []struct {
mode NameEncryptionMode
dirNameEncrypt bool
in string
expected string
expectedErr error
}{
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s", "1", nil},
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng", "1/12", nil},
{NameEncryptionStandard, true, "p0e52nreeAJ0A5EA7S64M4J72S/L42G6771HNv3an9cgc8cr2n1ng", "1/12", nil},
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng/qgm4avr35m5loi1th53ato71v0", "1/12/123", nil},
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1/qgm4avr35m5loi1th53ato71v0", "", ErrorNotAMultipleOfBlocksize},
{NameEncryptionStandard, false, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng", "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng", nil},
{NameEncryptionStandard, false, "1/12/123", "1/12/123", nil},
} {
c, _ := newCipher(test.mode, "", "", test.dirNameEncrypt, enc)
actual, actualErr := c.DecryptDirName(test.in)
what := fmt.Sprintf("Testing %q (mode=%v)", test.in, test.mode)
assert.Equal(t, test.expected, actual, what)
assert.Equal(t, test.expectedErr, actualErr, what)
}
*/
func TestStandardDecryptDirNameBase32(t *testing.T) {
testStandardDecryptDirName(t, "base32", []EncodingTestCase{
{"p0e52nreeaj0a5ea7s64m4j72s", "1"},
{"p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng", "1/12"},
{"p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng/qgm4avr35m5loi1th53ato71v0", "1/12/123"},
}, true)
}
func TestStandardDecryptDirNameBase64(t *testing.T) {
testStandardDecryptDirName(t, "base64", []EncodingTestCase{
{"yBxRX25ypgUVyj8MSxJnFw", "1"},
{"yBxRX25ypgUVyj8MSxJnFw/qQUDHOGN_jVdLIMQzYrhvA", "1/12"},
{"yBxRX25ypgUVyj8MSxJnFw/qQUDHOGN_jVdLIMQzYrhvA/1CxFf2Mti1xIPYlGruDh-A", "1/12/123"},
}, false)
}
func TestStandardDecryptDirNameBase32768(t *testing.T) {
testStandardDecryptDirName(t, "base32768", []EncodingTestCase{
{"詮㪗鐮僀伎作㻖㢧⪟", "1"},
{"詮㪗鐮僀伎作㻖㢧⪟/竢朧䉱虃光塬䟛⣡蓟", "1/12"},
{"詮㪗鐮僀伎作㻖㢧⪟/竢朧䉱虃光塬䟛⣡蓟/遶㞟鋅缕袡鲅ⵝ蝁ꌟ", "1/12/123"},
}, false)
}
func TestNonStandardDecryptDirName(t *testing.T) {
for _, test := range []struct {
mode NameEncryptionMode
dirNameEncrypt bool
@@ -264,18 +657,11 @@ func TestDecryptDirName(t *testing.T) {
expected string
expectedErr error
}{
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s", "1", nil},
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng", "1/12", nil},
{NameEncryptionStandard, true, "p0e52nreeAJ0A5EA7S64M4J72S/L42G6771HNv3an9cgc8cr2n1ng", "1/12", nil},
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng/qgm4avr35m5loi1th53ato71v0", "1/12/123", nil},
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1/qgm4avr35m5loi1th53ato71v0", "", ErrorNotAMultipleOfBlocksize},
{NameEncryptionStandard, false, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng", "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng", nil},
{NameEncryptionStandard, false, "1/12/123", "1/12/123", nil},
{NameEncryptionOff, true, "1/12/123.bin", "1/12/123.bin", nil},
{NameEncryptionOff, true, "1/12/123", "1/12/123", nil},
{NameEncryptionOff, true, ".bin", ".bin", nil},
} {
c, _ := newCipher(test.mode, "", "", test.dirNameEncrypt)
c, _ := newCipher(test.mode, "", "", test.dirNameEncrypt, nil)
actual, actualErr := c.DecryptDirName(test.in)
what := fmt.Sprintf("Testing %q (mode=%v)", test.in, test.mode)
assert.Equal(t, test.expected, actual, what)
@@ -284,7 +670,7 @@ func TestDecryptDirName(t *testing.T) {
}
func TestEncryptedSize(t *testing.T) {
c, _ := newCipher(NameEncryptionStandard, "", "", true)
c, _ := newCipher(NameEncryptionStandard, "", "", true, nil)
for _, test := range []struct {
in int64
expected int64
@@ -308,7 +694,7 @@ func TestEncryptedSize(t *testing.T) {
func TestDecryptedSize(t *testing.T) {
// Test the errors since we tested the reverse above
c, _ := newCipher(NameEncryptionStandard, "", "", true)
c, _ := newCipher(NameEncryptionStandard, "", "", true, nil)
for _, test := range []struct {
in int64
expectedErr error
@@ -637,7 +1023,7 @@ func (r *randomSource) Read(p []byte) (n int, err error) {
func (r *randomSource) Write(p []byte) (n int, err error) {
for i := range p {
if p[i] != r.next() {
return 0, errors.Errorf("Error in stream at %d", r.counter)
return 0, fmt.Errorf("Error in stream at %d", r.counter)
}
}
return len(p), nil
@@ -679,14 +1065,14 @@ func (z *zeroes) Read(p []byte) (n int, err error) {
// Test encrypt decrypt with different buffer sizes
func testEncryptDecrypt(t *testing.T, bufSize int, copySize int64) {
c, err := newCipher(NameEncryptionStandard, "", "", true)
c, err := newCipher(NameEncryptionStandard, "", "", true, nil)
assert.NoError(t, err)
c.cryptoRand = &zeroes{} // zero out the nonce
buf := make([]byte, bufSize)
source := newRandomSource(copySize)
encrypted, err := c.newEncrypter(source, nil)
assert.NoError(t, err)
decrypted, err := c.newDecrypter(ioutil.NopCloser(encrypted))
decrypted, err := c.newDecrypter(io.NopCloser(encrypted))
assert.NoError(t, err)
sink := newRandomSource(copySize)
n, err := io.CopyBuffer(sink, decrypted, buf)
@@ -749,7 +1135,7 @@ func TestEncryptData(t *testing.T) {
{[]byte{1}, file1},
{[]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}, file16},
} {
c, err := newCipher(NameEncryptionStandard, "", "", true)
c, err := newCipher(NameEncryptionStandard, "", "", true, nil)
assert.NoError(t, err)
c.cryptoRand = newRandomSource(1e8) // nodge the crypto rand generator
@@ -757,22 +1143,22 @@ func TestEncryptData(t *testing.T) {
buf := bytes.NewBuffer(test.in)
encrypted, err := c.EncryptData(buf)
assert.NoError(t, err)
out, err := ioutil.ReadAll(encrypted)
out, err := io.ReadAll(encrypted)
assert.NoError(t, err)
assert.Equal(t, test.expected, out)
// Check we can decode the data properly too...
buf = bytes.NewBuffer(out)
decrypted, err := c.DecryptData(ioutil.NopCloser(buf))
decrypted, err := c.DecryptData(io.NopCloser(buf))
assert.NoError(t, err)
out, err = ioutil.ReadAll(decrypted)
out, err = io.ReadAll(decrypted)
assert.NoError(t, err)
assert.Equal(t, test.in, out)
}
}
func TestNewEncrypter(t *testing.T) {
c, err := newCipher(NameEncryptionStandard, "", "", true)
c, err := newCipher(NameEncryptionStandard, "", "", true, nil)
assert.NoError(t, err)
c.cryptoRand = newRandomSource(1e8) // nodge the crypto rand generator
@@ -788,20 +1174,19 @@ func TestNewEncrypter(t *testing.T) {
fh, err = c.newEncrypter(z, nil)
assert.Nil(t, fh)
assert.Error(t, err, "short read of nonce")
}
// Test the stream returning 0, io.ErrUnexpectedEOF - this used to
// cause a fatal loop
func TestNewEncrypterErrUnexpectedEOF(t *testing.T) {
c, err := newCipher(NameEncryptionStandard, "", "", true)
c, err := newCipher(NameEncryptionStandard, "", "", true, nil)
assert.NoError(t, err)
in := &readers.ErrorReader{Err: io.ErrUnexpectedEOF}
fh, err := c.newEncrypter(in, nil)
assert.NoError(t, err)
n, err := io.CopyN(ioutil.Discard, fh, 1e6)
n, err := io.CopyN(io.Discard, fh, 1e6)
assert.Equal(t, io.ErrUnexpectedEOF, err)
assert.Equal(t, int64(32), n)
}
@@ -823,7 +1208,7 @@ func (c *closeDetector) Close() error {
}
func TestNewDecrypter(t *testing.T) {
c, err := newCipher(NameEncryptionStandard, "", "", true)
c, err := newCipher(NameEncryptionStandard, "", "", true, nil)
assert.NoError(t, err)
c.cryptoRand = newRandomSource(1e8) // nodge the crypto rand generator
@@ -866,36 +1251,36 @@ func TestNewDecrypter(t *testing.T) {
// Test the stream returning 0, io.ErrUnexpectedEOF
func TestNewDecrypterErrUnexpectedEOF(t *testing.T) {
c, err := newCipher(NameEncryptionStandard, "", "", true)
c, err := newCipher(NameEncryptionStandard, "", "", true, nil)
assert.NoError(t, err)
in2 := &readers.ErrorReader{Err: io.ErrUnexpectedEOF}
in1 := bytes.NewBuffer(file16)
in := ioutil.NopCloser(io.MultiReader(in1, in2))
in := io.NopCloser(io.MultiReader(in1, in2))
fh, err := c.newDecrypter(in)
assert.NoError(t, err)
n, err := io.CopyN(ioutil.Discard, fh, 1e6)
n, err := io.CopyN(io.Discard, fh, 1e6)
assert.Equal(t, io.ErrUnexpectedEOF, err)
assert.Equal(t, int64(16), n)
}
func TestNewDecrypterSeekLimit(t *testing.T) {
c, err := newCipher(NameEncryptionStandard, "", "", true)
c, err := newCipher(NameEncryptionStandard, "", "", true, nil)
assert.NoError(t, err)
c.cryptoRand = &zeroes{} // nodge the crypto rand generator
// Make random data
const dataSize = 150000
plaintext, err := ioutil.ReadAll(newRandomSource(dataSize))
plaintext, err := io.ReadAll(newRandomSource(dataSize))
assert.NoError(t, err)
// Encrypt the data
buf := bytes.NewBuffer(plaintext)
encrypted, err := c.EncryptData(buf)
assert.NoError(t, err)
ciphertext, err := ioutil.ReadAll(encrypted)
ciphertext, err := io.ReadAll(encrypted)
assert.NoError(t, err)
trials := []int{0, 1, 2, 3, 4, 5, 7, 8, 9, 15, 16, 17, 31, 32, 33, 63, 64, 65,
@@ -914,7 +1299,7 @@ func TestNewDecrypterSeekLimit(t *testing.T) {
end = len(ciphertext)
}
}
reader = ioutil.NopCloser(bytes.NewBuffer(ciphertext[int(underlyingOffset):end]))
reader = io.NopCloser(bytes.NewBuffer(ciphertext[int(underlyingOffset):end]))
return reader, nil
}
@@ -1088,7 +1473,7 @@ func TestDecrypterCalculateUnderlying(t *testing.T) {
}
func TestDecrypterRead(t *testing.T) {
c, err := newCipher(NameEncryptionStandard, "", "", true)
c, err := newCipher(NameEncryptionStandard, "", "", true, nil)
assert.NoError(t, err)
// Test truncating the file at each possible point
@@ -1104,7 +1489,7 @@ func TestDecrypterRead(t *testing.T) {
assert.NoError(t, err, what)
continue
}
_, err = ioutil.ReadAll(fh)
_, err = io.ReadAll(fh)
var expectedErr error
switch {
case i == fileHeaderSize:
@@ -1128,7 +1513,7 @@ func TestDecrypterRead(t *testing.T) {
cd := newCloseDetector(in)
fh, err := c.newDecrypter(cd)
assert.NoError(t, err)
_, err = ioutil.ReadAll(fh)
_, err = io.ReadAll(fh)
assert.Error(t, err, "potato")
assert.Equal(t, 0, cd.closed)
@@ -1138,13 +1523,13 @@ func TestDecrypterRead(t *testing.T) {
copy(file16copy, file16)
for i := range file16copy {
file16copy[i] ^= 0xFF
fh, err := c.newDecrypter(ioutil.NopCloser(bytes.NewBuffer(file16copy)))
fh, err := c.newDecrypter(io.NopCloser(bytes.NewBuffer(file16copy)))
if i < fileMagicSize {
assert.Error(t, err, ErrorEncryptedBadMagic.Error())
assert.Nil(t, fh)
} else {
assert.NoError(t, err)
_, err = ioutil.ReadAll(fh)
_, err = io.ReadAll(fh)
assert.Error(t, err, ErrorEncryptedFileBadHeader.Error())
}
file16copy[i] ^= 0xFF
@@ -1152,7 +1537,7 @@ func TestDecrypterRead(t *testing.T) {
}
func TestDecrypterClose(t *testing.T) {
c, err := newCipher(NameEncryptionStandard, "", "", true)
c, err := newCipher(NameEncryptionStandard, "", "", true, nil)
assert.NoError(t, err)
cd := newCloseDetector(bytes.NewBuffer(file16))
@@ -1179,7 +1564,7 @@ func TestDecrypterClose(t *testing.T) {
assert.Equal(t, 0, cd.closed)
// close after reading
out, err := ioutil.ReadAll(fh)
out, err := io.ReadAll(fh)
assert.NoError(t, err)
assert.Equal(t, []byte{1}, out)
assert.Equal(t, io.EOF, fh.err)
@@ -1190,7 +1575,7 @@ func TestDecrypterClose(t *testing.T) {
}
func TestPutGetBlock(t *testing.T) {
c, err := newCipher(NameEncryptionStandard, "", "", true)
c, err := newCipher(NameEncryptionStandard, "", "", true, nil)
assert.NoError(t, err)
block := c.getBlock()
@@ -1201,7 +1586,7 @@ func TestPutGetBlock(t *testing.T) {
}
func TestKey(t *testing.T) {
c, err := newCipher(NameEncryptionStandard, "", "", true)
c, err := newCipher(NameEncryptionStandard, "", "", true, nil)
assert.NoError(t, err)
// Check zero keys OK

View File

@@ -3,13 +3,13 @@ package crypt
import (
"context"
"errors"
"fmt"
"io"
"path"
"strings"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/cache"
@@ -28,9 +28,12 @@ func init() {
Description: "Encrypt/Decrypt a remote",
NewFs: NewFs,
CommandHelp: commandHelp,
MetadataInfo: &fs.MetadataInfo{
Help: `Any metadata supported by the underlying remote is read and written.`,
},
Options: []fs.Option{{
Name: "remote",
Help: "Remote to encrypt/decrypt.\nNormally should contain a ':' and a path, e.g. \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
Help: "Remote to encrypt/decrypt.\n\nNormally should contain a ':' and a path, e.g. \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
Required: true,
}, {
Name: "filename_encryption",
@@ -39,13 +42,13 @@ func init() {
Examples: []fs.OptionExample{
{
Value: "standard",
Help: "Encrypt the filenames see the docs for the details.",
Help: "Encrypt the filenames.\nSee the docs for the details.",
}, {
Value: "obfuscate",
Help: "Very simple filename obfuscation.",
}, {
Value: "off",
Help: "Don't encrypt the file names. Adds a \".bin\" extension only.",
Help: "Don't encrypt the file names.\nAdds a \".bin\" extension only.",
},
},
}, {
@@ -71,7 +74,7 @@ NB If filename_encryption is "off" then this option will do nothing.`,
Required: true,
}, {
Name: "password2",
Help: "Password or pass phrase for salt. Optional but recommended.\nShould be different to the previous password.",
Help: "Password or pass phrase for salt.\n\nOptional but recommended.\nShould be different to the previous password.",
IsPassword: true,
}, {
Name: "server_side_across_configs",
@@ -116,6 +119,29 @@ names, or for debugging purposes.`,
Help: "Encrypt file data.",
},
},
}, {
Name: "filename_encoding",
Help: `How to encode the encrypted filename to text string.
This option could help with shortening the encrypted filename. The
suitable option would depend on the way your remote count the filename
length and if it's case sensitive.`,
Default: "base32",
Examples: []fs.OptionExample{
{
Value: "base32",
Help: "Encode using base32. Suitable for all remote.",
},
{
Value: "base64",
Help: "Encode using base64. Suitable for case sensitive remote.",
},
{
Value: "base32768",
Help: "Encode using base32768. Suitable if your remote counts UTF-16 or\nUnicode codepoint instead of UTF-8 byte length. (Eg. Onedrive)",
},
},
Advanced: true,
}},
})
}
@@ -131,18 +157,22 @@ func newCipherForConfig(opt *Options) (*Cipher, error) {
}
password, err := obscure.Reveal(opt.Password)
if err != nil {
return nil, errors.Wrap(err, "failed to decrypt password")
return nil, fmt.Errorf("failed to decrypt password: %w", err)
}
var salt string
if opt.Password2 != "" {
salt, err = obscure.Reveal(opt.Password2)
if err != nil {
return nil, errors.Wrap(err, "failed to decrypt password2")
return nil, fmt.Errorf("failed to decrypt password2: %w", err)
}
}
cipher, err := newCipher(mode, password, salt, opt.DirectoryNameEncryption)
enc, err := NewNameEncoding(opt.FilenameEncoding)
if err != nil {
return nil, errors.Wrap(err, "failed to make cipher")
return nil, err
}
cipher, err := newCipher(mode, password, salt, opt.DirectoryNameEncryption, enc)
if err != nil {
return nil, fmt.Errorf("failed to make cipher: %w", err)
}
return cipher, nil
}
@@ -192,7 +222,7 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
}
}
if err != fs.ErrorIsFile && err != nil {
return nil, errors.Wrapf(err, "failed to make remote %q to wrap", remote)
return nil, fmt.Errorf("failed to make remote %q to wrap: %w", remote, err)
}
f := &Fs{
Fs: wrappedFs,
@@ -205,7 +235,7 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
// the features here are ones we could support, and they are
// ANDed with the ones from wrappedFs
f.features = (&fs.Features{
CaseInsensitive: cipher.NameEncryptionMode() == NameEncryptionOff,
CaseInsensitive: !cipher.dirNameEncrypt || cipher.NameEncryptionMode() == NameEncryptionOff,
DuplicateFiles: true,
ReadMimeType: false, // MimeTypes not supported with crypt
WriteMimeType: false,
@@ -214,6 +244,9 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
SetTier: true,
GetTier: true,
ServerSideAcrossConfigs: opt.ServerSideAcrossConfigs,
ReadMetadata: true,
WriteMetadata: true,
UserMetadata: true,
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
return f, err
@@ -229,6 +262,7 @@ type Options struct {
Password2 string `config:"password2"`
ServerSideAcrossConfigs bool `config:"server_side_across_configs"`
ShowMapping bool `config:"show_mapping"`
FilenameEncoding string `config:"filename_encoding"`
}
// Fs represents a wrapped fs.Fs
@@ -300,7 +334,7 @@ func (f *Fs) encryptEntries(ctx context.Context, entries fs.DirEntries) (newEntr
case fs.Directory:
f.addDir(ctx, &newEntries, x)
default:
return nil, errors.Errorf("Unknown object type %T", entry)
return nil, fmt.Errorf("unknown object type %T", entry)
}
}
return newEntries, nil
@@ -362,8 +396,14 @@ type putFn func(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ..
// put implements Put or PutStream
func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn) (fs.Object, error) {
ci := fs.GetConfig(ctx)
if f.opt.NoDataEncryption {
return put(ctx, in, f.newObjectInfo(src, nonce{}), options...)
o, err := put(ctx, in, f.newObjectInfo(src, nonce{}), options...)
if err == nil && o != nil {
o = f.newObject(o)
}
return o, err
}
// Encrypt the data into wrappedIn
@@ -375,6 +415,9 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
// Find a hash the destination supports to compute a hash of
// the encrypted data
ht := f.Fs.Hashes().GetOne()
if ci.IgnoreChecksum {
ht = hash.None
}
var hasher *hash.MultiHasher
if ht != hash.None {
hasher, err = hash.NewMultiHasherTypes(hash.NewHashSet(ht))
@@ -402,7 +445,7 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
var dstHash string
dstHash, err = o.Hash(ctx, ht)
if err != nil {
return nil, errors.Wrap(err, "failed to read destination hash")
return nil, fmt.Errorf("failed to read destination hash: %w", err)
}
if srcHash != "" && dstHash != "" {
if srcHash != dstHash {
@@ -411,7 +454,7 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
if err != nil {
fs.Errorf(o, "Failed to remove corrupted object: %v", err)
}
return nil, errors.Errorf("corrupted on transfer: %v crypted hash differ %q vs %q", ht, srcHash, dstHash)
return nil, fmt.Errorf("corrupted on transfer: %v crypted hash differ src %q vs dst %q", ht, srcHash, dstHash)
}
fs.Debugf(src, "%v = %s OK", ht, srcHash)
}
@@ -469,9 +512,9 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
// Copy src to this remote using server-side copy operations.
//
// This is stored with the remote path given
// This is stored with the remote path given.
//
// It returns the destination Object and a possible error
// It returns the destination Object and a possible error.
//
// Will only be called if src.Fs().Name() == f.Name()
//
@@ -494,9 +537,9 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
// Move src to this remote using server-side move operations.
//
// This is stored with the remote path given
// This is stored with the remote path given.
//
// It returns the destination Object and a possible error
// It returns the destination Object and a possible error.
//
// Will only be called if src.Fs().Name() == f.Name()
//
@@ -565,7 +608,7 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
func (f *Fs) CleanUp(ctx context.Context) error {
do := f.Fs.Features().CleanUp
if do == nil {
return errors.New("can't CleanUp")
return errors.New("not supported by underlying remote")
}
return do(ctx)
}
@@ -574,7 +617,7 @@ func (f *Fs) CleanUp(ctx context.Context) error {
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
do := f.Fs.Features().About
if do == nil {
return nil, errors.New("About not supported")
return nil, errors.New("not supported by underlying remote")
}
return do(ctx)
}
@@ -612,24 +655,24 @@ func (f *Fs) computeHashWithNonce(ctx context.Context, nonce nonce, src fs.Objec
// Open the src for input
in, err := src.Open(ctx)
if err != nil {
return "", errors.Wrap(err, "failed to open src")
return "", fmt.Errorf("failed to open src: %w", err)
}
defer fs.CheckClose(in, &err)
// Now encrypt the src with the nonce
out, err := f.cipher.newEncrypter(in, &nonce)
if err != nil {
return "", errors.Wrap(err, "failed to make encrypter")
return "", fmt.Errorf("failed to make encrypter: %w", err)
}
// pipe into hash
m, err := hash.NewMultiHasherTypes(hash.NewHashSet(hashType))
if err != nil {
return "", errors.Wrap(err, "failed to make hasher")
return "", fmt.Errorf("failed to make hasher: %w", err)
}
_, err = io.Copy(m, out)
if err != nil {
return "", errors.Wrap(err, "failed to hash data")
return "", fmt.Errorf("failed to hash data: %w", err)
}
return m.Sums()[hashType], nil
@@ -648,12 +691,12 @@ func (f *Fs) ComputeHash(ctx context.Context, o *Object, src fs.Object, hashType
// use a limited read so we only read the header
in, err := o.Object.Open(ctx, &fs.RangeOption{Start: 0, End: int64(fileHeaderSize) - 1})
if err != nil {
return "", errors.Wrap(err, "failed to open object to read nonce")
return "", fmt.Errorf("failed to open object to read nonce: %w", err)
}
d, err := f.cipher.newDecrypter(in)
if err != nil {
_ = in.Close()
return "", errors.Wrap(err, "failed to open object to read nonce")
return "", fmt.Errorf("failed to open object to read nonce: %w", err)
}
nonce := d.nonce
// fs.Debugf(o, "Read nonce % 2x", nonce)
@@ -672,7 +715,7 @@ func (f *Fs) ComputeHash(ctx context.Context, o *Object, src fs.Object, hashType
// Close d (and hence in) once we have read the nonce
err = d.Close()
if err != nil {
return "", errors.Wrap(err, "failed to close nonce read")
return "", fmt.Errorf("failed to close nonce read: %w", err)
}
return f.computeHashWithNonce(ctx, nonce, src, hashType)
@@ -791,7 +834,7 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
for _, encryptedFileName := range arg {
fileName, err := f.DecryptFileName(encryptedFileName)
if err != nil {
return out, errors.Wrap(err, fmt.Sprintf("Failed to decrypt : %s", encryptedFileName))
return out, fmt.Errorf("failed to decrypt: %s: %w", encryptedFileName, err)
}
out = append(out, fileName)
}
@@ -995,6 +1038,9 @@ func (o *ObjectInfo) Size() int64 {
if size < 0 {
return size
}
if o.f.opt.NoDataEncryption {
return size
}
return o.f.cipher.EncryptedSize(size)
}
@@ -1006,10 +1052,11 @@ func (o *ObjectInfo) Hash(ctx context.Context, hash hash.Type) (string, error) {
// Get the underlying object if there is one
if srcObj, ok = o.ObjectInfo.(fs.Object); ok {
// Prefer direct interface assertion
} else if do, ok := o.ObjectInfo.(fs.ObjectUnWrapper); ok {
// Otherwise likely is an operations.OverrideRemote
} else if do, ok := o.ObjectInfo.(*fs.OverrideRemote); ok {
// Unwrap if it is an operations.OverrideRemote
srcObj = do.UnWrap()
} else {
// Otherwise don't unwrap any further
return "", nil
}
// if this is wrapping a local object then we work out the hash
@@ -1021,6 +1068,50 @@ func (o *ObjectInfo) Hash(ctx context.Context, hash hash.Type) (string, error) {
return "", nil
}
// GetTier returns storage tier or class of the Object
func (o *ObjectInfo) GetTier() string {
do, ok := o.ObjectInfo.(fs.GetTierer)
if !ok {
return ""
}
return do.GetTier()
}
// ID returns the ID of the Object if known, or "" if not
func (o *ObjectInfo) ID() string {
do, ok := o.ObjectInfo.(fs.IDer)
if !ok {
return ""
}
return do.ID()
}
// Metadata returns metadata for an object
//
// It should return nil if there is no Metadata
func (o *ObjectInfo) Metadata(ctx context.Context) (fs.Metadata, error) {
do, ok := o.ObjectInfo.(fs.Metadataer)
if !ok {
return nil, nil
}
return do.Metadata(ctx)
}
// MimeType returns the content type of the Object if
// known, or "" if not
//
// This is deliberately unsupported so we don't leak mime type info by
// default.
func (o *ObjectInfo) MimeType(ctx context.Context) string {
return ""
}
// UnWrap returns the Object that this Object is wrapping or
// nil if it isn't wrapping anything
func (o *ObjectInfo) UnWrap() fs.Object {
return fs.UnWrapObjectInfo(o.ObjectInfo)
}
// ID returns the ID of the Object if known, or "" if not
func (o *Object) ID() string {
do, ok := o.Object.(fs.IDer)
@@ -1049,6 +1140,26 @@ func (o *Object) GetTier() string {
return do.GetTier()
}
// Metadata returns metadata for an object
//
// It should return nil if there is no Metadata
func (o *Object) Metadata(ctx context.Context) (fs.Metadata, error) {
do, ok := o.Object.(fs.Metadataer)
if !ok {
return nil, nil
}
return do.Metadata(ctx)
}
// MimeType returns the content type of the Object if
// known, or "" if not
//
// This is deliberately unsupported so we don't leak mime type info by
// default.
func (o *Object) MimeType(ctx context.Context) string {
return ""
}
// Check the interfaces are satisfied
var (
_ fs.Fs = (*Fs)(nil)
@@ -1071,10 +1182,6 @@ var (
_ fs.UserInfoer = (*Fs)(nil)
_ fs.Disconnecter = (*Fs)(nil)
_ fs.Shutdowner = (*Fs)(nil)
_ fs.ObjectInfo = (*ObjectInfo)(nil)
_ fs.Object = (*Object)(nil)
_ fs.ObjectUnWrapper = (*Object)(nil)
_ fs.IDer = (*Object)(nil)
_ fs.SetTierer = (*Object)(nil)
_ fs.GetTierer = (*Object)(nil)
_ fs.FullObjectInfo = (*ObjectInfo)(nil)
_ fs.FullObject = (*Object)(nil)
)

View File

@@ -17,41 +17,28 @@ import (
"github.com/stretchr/testify/require"
)
type testWrapper struct {
fs.ObjectInfo
}
// UnWrap returns the Object that this Object is wrapping or nil if it
// isn't wrapping anything
func (o testWrapper) UnWrap() fs.Object {
if o, ok := o.ObjectInfo.(fs.Object); ok {
return o
}
return nil
}
// Create a temporary local fs to upload things from
func makeTempLocalFs(t *testing.T) (localFs fs.Fs, cleanup func()) {
func makeTempLocalFs(t *testing.T) (localFs fs.Fs) {
localFs, err := fs.TemporaryLocalFs(context.Background())
require.NoError(t, err)
cleanup = func() {
t.Cleanup(func() {
require.NoError(t, localFs.Rmdir(context.Background(), ""))
}
return localFs, cleanup
})
return localFs
}
// Upload a file to a remote
func uploadFile(t *testing.T, f fs.Fs, remote, contents string) (obj fs.Object, cleanup func()) {
func uploadFile(t *testing.T, f fs.Fs, remote, contents string) (obj fs.Object) {
inBuf := bytes.NewBufferString(contents)
t1 := time.Date(2012, time.December, 17, 18, 32, 31, 0, time.UTC)
upSrc := object.NewStaticObjectInfo(remote, t1, int64(len(contents)), true, nil, nil)
obj, err := f.Put(context.Background(), inBuf, upSrc)
require.NoError(t, err)
cleanup = func() {
t.Cleanup(func() {
require.NoError(t, obj.Remove(context.Background()))
}
return obj, cleanup
})
return obj
}
// Test the ObjectInfo
@@ -65,11 +52,9 @@ func testObjectInfo(t *testing.T, f *Fs, wrap bool) {
path = "_wrap"
}
localFs, cleanupLocalFs := makeTempLocalFs(t)
defer cleanupLocalFs()
localFs := makeTempLocalFs(t)
obj, cleanupObj := uploadFile(t, localFs, path, contents)
defer cleanupObj()
obj := uploadFile(t, localFs, path, contents)
// encrypt the data
inBuf := bytes.NewBufferString(contents)
@@ -83,7 +68,7 @@ func testObjectInfo(t *testing.T, f *Fs, wrap bool) {
var oi fs.ObjectInfo = obj
if wrap {
// wrap the object in an fs.ObjectUnwrapper if required
oi = testWrapper{oi}
oi = fs.NewOverrideRemote(oi, "new_remote")
}
// wrap the object in a crypt for upload using the nonce we
@@ -91,7 +76,9 @@ func testObjectInfo(t *testing.T, f *Fs, wrap bool) {
src := f.newObjectInfo(oi, nonce)
// Test ObjectInfo methods
assert.Equal(t, int64(outBuf.Len()), src.Size())
if !f.opt.NoDataEncryption {
assert.Equal(t, int64(outBuf.Len()), src.Size())
}
assert.Equal(t, f, src.Fs())
assert.NotEqual(t, path, src.Remote())
@@ -114,16 +101,13 @@ func testComputeHash(t *testing.T, f *Fs) {
t.Skipf("%v: does not support hashes", f.Fs)
}
localFs, cleanupLocalFs := makeTempLocalFs(t)
defer cleanupLocalFs()
localFs := makeTempLocalFs(t)
// Upload a file to localFs as a test object
localObj, cleanupLocalObj := uploadFile(t, localFs, path, contents)
defer cleanupLocalObj()
localObj := uploadFile(t, localFs, path, contents)
// Upload the same data to the remote Fs also
remoteObj, cleanupRemoteObj := uploadFile(t, f, path, contents)
defer cleanupRemoteObj()
remoteObj := uploadFile(t, f, path, contents)
// Calculate the expected Hash of the remote object
computedHash, err := f.ComputeHash(ctx, remoteObj.(*Object), localObj, hashType)

View File

@@ -4,6 +4,7 @@ package crypt_test
import (
"os"
"path/filepath"
"runtime"
"testing"
"github.com/rclone/rclone/backend/crypt"
@@ -29,7 +30,7 @@ func TestIntegration(t *testing.T) {
}
// TestStandard runs integration tests against the remote
func TestStandard(t *testing.T) {
func TestStandardBase32(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
@@ -46,6 +47,51 @@ func TestStandard(t *testing.T) {
},
UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"},
QuickTestOK: true,
})
}
func TestStandardBase64(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-standard")
name := "TestCrypt"
fstests.Run(t, &fstests.Opt{
RemoteName: name + ":",
NilObject: (*crypt.Object)(nil),
ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "type", Value: "crypt"},
{Name: name, Key: "remote", Value: tempdir},
{Name: name, Key: "password", Value: obscure.MustObscure("potato")},
{Name: name, Key: "filename_encryption", Value: "standard"},
{Name: name, Key: "filename_encoding", Value: "base64"},
},
UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"},
QuickTestOK: true,
})
}
func TestStandardBase32768(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-standard")
name := "TestCrypt"
fstests.Run(t, &fstests.Opt{
RemoteName: name + ":",
NilObject: (*crypt.Object)(nil),
ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "type", Value: "crypt"},
{Name: name, Key: "remote", Value: tempdir},
{Name: name, Key: "password", Value: obscure.MustObscure("potato")},
{Name: name, Key: "filename_encryption", Value: "standard"},
{Name: name, Key: "filename_encoding", Value: "base32768"},
},
UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"},
QuickTestOK: true,
})
}
@@ -67,6 +113,7 @@ func TestOff(t *testing.T) {
},
UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"},
QuickTestOK: true,
})
}
@@ -75,6 +122,9 @@ func TestObfuscate(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
if runtime.GOOS == "darwin" {
t.Skip("Skipping on macOS as obfuscating control characters makes filenames macOS can't cope with")
}
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-obfuscate")
name := "TestCrypt3"
fstests.Run(t, &fstests.Opt{
@@ -89,6 +139,7 @@ func TestObfuscate(t *testing.T) {
SkipBadWindowsCharacters: true,
UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"},
QuickTestOK: true,
})
}
@@ -97,6 +148,9 @@ func TestNoDataObfuscate(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
if runtime.GOOS == "darwin" {
t.Skip("Skipping on macOS as obfuscating control characters makes filenames macOS can't cope with")
}
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-obfuscate")
name := "TestCrypt4"
fstests.Run(t, &fstests.Opt{
@@ -112,5 +166,6 @@ func TestNoDataObfuscate(t *testing.T) {
SkipBadWindowsCharacters: true,
UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"},
QuickTestOK: true,
})
}

View File

@@ -4,15 +4,15 @@
// buffers which are a multiple of an underlying crypto block size.
package pkcs7
import "github.com/pkg/errors"
import "errors"
// Errors Unpad can return
var (
ErrorPaddingNotFound = errors.New("Bad PKCS#7 padding - not padded")
ErrorPaddingNotAMultiple = errors.New("Bad PKCS#7 padding - not a multiple of blocksize")
ErrorPaddingTooLong = errors.New("Bad PKCS#7 padding - too long")
ErrorPaddingTooShort = errors.New("Bad PKCS#7 padding - too short")
ErrorPaddingNotAllTheSame = errors.New("Bad PKCS#7 padding - not all the same")
ErrorPaddingNotFound = errors.New("bad PKCS#7 padding - not padded")
ErrorPaddingNotAMultiple = errors.New("bad PKCS#7 padding - not a multiple of blocksize")
ErrorPaddingTooLong = errors.New("bad PKCS#7 padding - too long")
ErrorPaddingTooShort = errors.New("bad PKCS#7 padding - too short")
ErrorPaddingNotAllTheSame = errors.New("bad PKCS#7 padding - not all the same")
)
// Pad buf using PKCS#7 to a multiple of n.

595
backend/drive/drive.go Executable file → Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -4,8 +4,9 @@ import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"mime"
"os"
"path"
@@ -14,17 +15,20 @@ import (
"testing"
"time"
"github.com/pkg/errors"
_ "github.com/rclone/rclone/backend/local"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/filter"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fs/sync"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
"github.com/rclone/rclone/lib/random"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"google.golang.org/api/drive/v3"
"google.golang.org/api/googleapi"
)
func TestDriveScopes(t *testing.T) {
@@ -73,7 +77,7 @@ var additionalMimeTypes = map[string]string{
// Load the example export formats into exportFormats for testing
func TestInternalLoadExampleFormats(t *testing.T) {
fetchFormatsOnce.Do(func() {})
buf, err := ioutil.ReadFile(filepath.FromSlash("test/about.json"))
buf, err := os.ReadFile(filepath.FromSlash("test/about.json"))
var about struct {
ExportFormats map[string][]string `json:"exportFormats,omitempty"`
ImportFormats map[string][]string `json:"importFormats,omitempty"`
@@ -187,6 +191,69 @@ func TestExtensionsForImportFormats(t *testing.T) {
}
}
func (f *Fs) InternalTestShouldRetry(t *testing.T) {
ctx := context.Background()
gatewayTimeout := googleapi.Error{
Code: 503,
}
timeoutRetry, timeoutError := f.shouldRetry(ctx, &gatewayTimeout)
assert.True(t, timeoutRetry)
assert.Equal(t, &gatewayTimeout, timeoutError)
generic403 := googleapi.Error{
Code: 403,
}
rLEItem := googleapi.ErrorItem{
Reason: "rateLimitExceeded",
Message: "User rate limit exceeded.",
}
generic403.Errors = append(generic403.Errors, rLEItem)
oldStopUpload := f.opt.StopOnUploadLimit
oldStopDownload := f.opt.StopOnDownloadLimit
f.opt.StopOnUploadLimit = true
f.opt.StopOnDownloadLimit = true
defer func() {
f.opt.StopOnUploadLimit = oldStopUpload
f.opt.StopOnDownloadLimit = oldStopDownload
}()
expectedRLError := fserrors.FatalError(&generic403)
rateLimitRetry, rateLimitErr := f.shouldRetry(ctx, &generic403)
assert.False(t, rateLimitRetry)
assert.Equal(t, rateLimitErr, expectedRLError)
dQEItem := googleapi.ErrorItem{
Reason: "downloadQuotaExceeded",
}
generic403.Errors[0] = dQEItem
expectedDQError := fserrors.FatalError(&generic403)
downloadQuotaRetry, downloadQuotaError := f.shouldRetry(ctx, &generic403)
assert.False(t, downloadQuotaRetry)
assert.Equal(t, downloadQuotaError, expectedDQError)
tDFLEItem := googleapi.ErrorItem{
Reason: "teamDriveFileLimitExceeded",
}
generic403.Errors[0] = tDFLEItem
expectedTDFLError := fserrors.FatalError(&generic403)
teamDriveFileLimitRetry, teamDriveFileLimitError := f.shouldRetry(ctx, &generic403)
assert.False(t, teamDriveFileLimitRetry)
assert.Equal(t, teamDriveFileLimitError, expectedTDFLError)
qEItem := googleapi.ErrorItem{
Reason: "quotaExceeded",
}
generic403.Errors[0] = qEItem
expectedQuotaError := fserrors.FatalError(&generic403)
quotaExceededRetry, quotaExceededError := f.shouldRetry(ctx, &generic403)
assert.False(t, quotaExceededRetry)
assert.Equal(t, quotaExceededError, expectedQuotaError)
sqEItem := googleapi.ErrorItem{
Reason: "storageQuotaExceeded",
}
generic403.Errors[0] = sqEItem
expectedStorageQuotaError := fserrors.FatalError(&generic403)
storageQuotaExceededRetry, storageQuotaExceededError := f.shouldRetry(ctx, &generic403)
assert.False(t, storageQuotaExceededRetry)
assert.Equal(t, storageQuotaExceededError, expectedStorageQuotaError)
}
func (f *Fs) InternalTestDocumentImport(t *testing.T) {
oldAllow := f.opt.AllowImportNameChange
f.opt.AllowImportNameChange = true
@@ -375,9 +442,9 @@ func (f *Fs) InternalTestUnTrash(t *testing.T) {
// Make some objects, one in a subdir
contents := random.String(100)
file1 := fstest.NewItem("trashDir/toBeTrashed", contents, time.Now())
_, obj1 := fstests.PutTestContents(ctx, t, f, &file1, contents, false)
obj1 := fstests.PutTestContents(ctx, t, f, &file1, contents, false)
file2 := fstest.NewItem("trashDir/subdir/toBeTrashed", contents, time.Now())
_, _ = fstests.PutTestContents(ctx, t, f, &file2, contents, false)
_ = fstests.PutTestContents(ctx, t, f, &file2, contents, false)
// Check objects
checkObjects := func() {
@@ -419,11 +486,7 @@ func (f *Fs) InternalTestCopyID(t *testing.T) {
require.NoError(t, err)
o := obj.(*Object)
dir, err := ioutil.TempDir("", "rclone-drive-copyid-test")
require.NoError(t, err)
defer func() {
_ = os.RemoveAll(dir)
}()
dir := t.TempDir()
checkFile := func(name string) {
filePath := filepath.Join(dir, name)
@@ -461,6 +524,76 @@ func (f *Fs) InternalTestCopyID(t *testing.T) {
})
}
// TestIntegration/FsMkdir/FsPutFiles/Internal/AgeQuery
func (f *Fs) InternalTestAgeQuery(t *testing.T) {
// Check set up for filtering
assert.True(t, f.Features().FilterAware)
opt := &filter.Opt{}
err := opt.MaxAge.Set("1h")
assert.NoError(t, err)
flt, err := filter.NewFilter(opt)
assert.NoError(t, err)
defCtx := context.Background()
fltCtx := filter.ReplaceConfig(defCtx, flt)
testCtx1 := fltCtx
testCtx2 := filter.SetUseFilter(testCtx1, true)
testCtx3, testCancel := context.WithCancel(testCtx2)
testCtx4 := filter.SetUseFilter(testCtx3, false)
testCancel()
assert.False(t, filter.GetUseFilter(testCtx1))
assert.True(t, filter.GetUseFilter(testCtx2))
assert.True(t, filter.GetUseFilter(testCtx3))
assert.False(t, filter.GetUseFilter(testCtx4))
subRemote := fmt.Sprintf("%s:%s/%s", f.Name(), f.Root(), "agequery-testdir")
subFsResult, err := fs.NewFs(defCtx, subRemote)
require.NoError(t, err)
subFs, isDriveFs := subFsResult.(*Fs)
require.True(t, isDriveFs)
tempDir1 := t.TempDir()
tempFs1, err := fs.NewFs(defCtx, tempDir1)
require.NoError(t, err)
tempDir2 := t.TempDir()
tempFs2, err := fs.NewFs(defCtx, tempDir2)
require.NoError(t, err)
file1 := fstest.Item{ModTime: time.Now(), Path: "agequery.txt"}
_ = fstests.PutTestContents(defCtx, t, tempFs1, &file1, "abcxyz", true)
// validate sync/copy
const timeQuery = "(modifiedTime >= '"
assert.NoError(t, sync.CopyDir(defCtx, subFs, tempFs1, false))
assert.NotContains(t, subFs.lastQuery, timeQuery)
assert.NoError(t, sync.CopyDir(fltCtx, subFs, tempFs1, false))
assert.Contains(t, subFs.lastQuery, timeQuery)
assert.NoError(t, sync.CopyDir(fltCtx, tempFs2, subFs, false))
assert.Contains(t, subFs.lastQuery, timeQuery)
assert.NoError(t, sync.CopyDir(defCtx, tempFs2, subFs, false))
assert.NotContains(t, subFs.lastQuery, timeQuery)
// validate list/walk
devNull, errOpen := os.OpenFile(os.DevNull, os.O_WRONLY, 0)
require.NoError(t, errOpen)
defer func() {
_ = devNull.Close()
}()
assert.NoError(t, operations.List(defCtx, subFs, devNull))
assert.NotContains(t, subFs.lastQuery, timeQuery)
assert.NoError(t, operations.List(fltCtx, subFs, devNull))
assert.Contains(t, subFs.lastQuery, timeQuery)
}
func (f *Fs) InternalTest(t *testing.T) {
// These tests all depend on each other so run them as nested tests
t.Run("DocumentImport", func(t *testing.T) {
@@ -478,6 +611,8 @@ func (f *Fs) InternalTest(t *testing.T) {
t.Run("Shortcuts", f.InternalTestShortcuts)
t.Run("UnTrash", f.InternalTestUnTrash)
t.Run("CopyID", f.InternalTestCopyID)
t.Run("AgeQuery", f.InternalTestAgeQuery)
t.Run("ShouldRetry", f.InternalTestShouldRetry)
}
var _ fstests.InternalTester = (*Fs)(nil)

346
backend/dropbox/batcher.go Normal file
View File

@@ -0,0 +1,346 @@
// This file contains the implementation of the sync batcher for uploads
//
// Dropbox rules say you can start as many batches as you want, but
// you may only have one batch being committed and must wait for the
// batch to be finished before committing another.
package dropbox
import (
"context"
"errors"
"fmt"
"sync"
"time"
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/async"
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/files"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/lib/atexit"
)
const (
maxBatchSize = 1000 // max size the batch can be
defaultTimeoutSync = 500 * time.Millisecond // kick off the batch if nothing added for this long (sync)
defaultTimeoutAsync = 10 * time.Second // kick off the batch if nothing added for this long (ssync)
defaultBatchSizeAsync = 100 // default batch size if async
)
// batcher holds info about the current items waiting for upload
type batcher struct {
f *Fs // Fs this batch is part of
mode string // configured batch mode
size int // maximum size for batch
timeout time.Duration // idle timeout for batch
async bool // whether we are using async batching
in chan batcherRequest // incoming items to batch
closed chan struct{} // close to indicate batcher shut down
atexit atexit.FnHandle // atexit handle
shutOnce sync.Once // make sure we shutdown once only
wg sync.WaitGroup // wait for shutdown
}
// batcherRequest holds an incoming request with a place for a reply
type batcherRequest struct {
commitInfo *files.UploadSessionFinishArg
result chan<- batcherResponse
}
// Return true if batcherRequest is the quit request
func (br *batcherRequest) isQuit() bool {
return br.commitInfo == nil
}
// Send this to get the engine to quit
var quitRequest = batcherRequest{}
// batcherResponse holds a response to be delivered to clients waiting
// for a batch to complete.
type batcherResponse struct {
err error
entry *files.FileMetadata
}
// newBatcher creates a new batcher structure
func newBatcher(ctx context.Context, f *Fs, mode string, size int, timeout time.Duration) (*batcher, error) {
// fs.Debugf(f, "Creating batcher with mode %q, size %d, timeout %v", mode, size, timeout)
if size > maxBatchSize || size < 0 {
return nil, fmt.Errorf("dropbox: batch size must be < %d and >= 0 - it is currently %d", maxBatchSize, size)
}
async := false
switch mode {
case "sync":
if size <= 0 {
ci := fs.GetConfig(ctx)
size = ci.Transfers
}
if timeout <= 0 {
timeout = defaultTimeoutSync
}
case "async":
if size <= 0 {
size = defaultBatchSizeAsync
}
if timeout <= 0 {
timeout = defaultTimeoutAsync
}
async = true
case "off":
size = 0
default:
return nil, fmt.Errorf("dropbox: batch mode must be sync|async|off not %q", mode)
}
b := &batcher{
f: f,
mode: mode,
size: size,
timeout: timeout,
async: async,
in: make(chan batcherRequest, size),
closed: make(chan struct{}),
}
if b.Batching() {
b.atexit = atexit.Register(b.Shutdown)
b.wg.Add(1)
go b.commitLoop(context.Background())
}
return b, nil
}
// Batching returns true if batching is active
func (b *batcher) Batching() bool {
return b.size > 0
}
// finishBatch commits the batch, returning a batch status to poll or maybe complete
func (b *batcher) finishBatch(ctx context.Context, items []*files.UploadSessionFinishArg) (complete *files.UploadSessionFinishBatchResult, err error) {
var arg = &files.UploadSessionFinishBatchArg{
Entries: items,
}
err = b.f.pacer.Call(func() (bool, error) {
complete, err = b.f.srv.UploadSessionFinishBatchV2(arg)
// If error is insufficient space then don't retry
if e, ok := err.(files.UploadSessionFinishAPIError); ok {
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.WriteErrorInsufficientSpace {
err = fserrors.NoRetryError(err)
return false, err
}
}
// after the first chunk is uploaded, we retry everything
return err != nil, err
})
if err != nil {
return nil, fmt.Errorf("batch commit failed: %w", err)
}
return complete, nil
}
// finishBatchJobStatus waits for the batch to complete returning completed entries
func (b *batcher) finishBatchJobStatus(ctx context.Context, launchBatchStatus *files.UploadSessionFinishBatchLaunch) (complete *files.UploadSessionFinishBatchResult, err error) {
if launchBatchStatus.AsyncJobId == "" {
return nil, errors.New("wait for batch completion: empty job ID")
}
var batchStatus *files.UploadSessionFinishBatchJobStatus
sleepTime := 100 * time.Millisecond
const maxSleepTime = 1 * time.Second
startTime := time.Now()
try := 1
for {
remaining := time.Duration(b.f.opt.BatchCommitTimeout) - time.Since(startTime)
if remaining < 0 {
break
}
err = b.f.pacer.Call(func() (bool, error) {
batchStatus, err = b.f.srv.UploadSessionFinishBatchCheck(&async.PollArg{
AsyncJobId: launchBatchStatus.AsyncJobId,
})
return shouldRetry(ctx, err)
})
if err != nil {
fs.Debugf(b.f, "Wait for batch: sleeping for %v after error: %v: try %d remaining %v", sleepTime, err, try, remaining)
} else {
if batchStatus.Tag == "complete" {
fs.Debugf(b.f, "Upload batch completed in %v", time.Since(startTime))
return batchStatus.Complete, nil
}
fs.Debugf(b.f, "Wait for batch: sleeping for %v after status: %q: try %d remaining %v", sleepTime, batchStatus.Tag, try, remaining)
}
time.Sleep(sleepTime)
sleepTime *= 2
if sleepTime > maxSleepTime {
sleepTime = maxSleepTime
}
try++
}
if err == nil {
err = errors.New("batch didn't complete")
}
return nil, fmt.Errorf("wait for batch failed after %d tries in %v: %w", try, time.Since(startTime), err)
}
// commit a batch
func (b *batcher) commitBatch(ctx context.Context, items []*files.UploadSessionFinishArg, results []chan<- batcherResponse) (err error) {
// If commit fails then signal clients if sync
var signalled = b.async
defer func() {
if err != nil && signalled {
// Signal to clients that there was an error
for _, result := range results {
result <- batcherResponse{err: err}
}
}
}()
desc := fmt.Sprintf("%s batch length %d starting with: %s", b.mode, len(items), items[0].Commit.Path)
fs.Debugf(b.f, "Committing %s", desc)
// finalise the batch getting either a result or a job id to poll
complete, err := b.finishBatch(ctx, items)
if err != nil {
return err
}
// Check we got the right number of entries
entries := complete.Entries
if len(entries) != len(results) {
return fmt.Errorf("expecting %d items in batch but got %d", len(results), len(entries))
}
// Report results to clients
var (
errorTag = ""
errorCount = 0
)
for i := range results {
item := entries[i]
resp := batcherResponse{}
if item.Tag == "success" {
resp.entry = item.Success
} else {
errorCount++
errorTag = item.Tag
if item.Failure != nil {
errorTag = item.Failure.Tag
if item.Failure.LookupFailed != nil {
errorTag += "/" + item.Failure.LookupFailed.Tag
}
if item.Failure.Path != nil {
errorTag += "/" + item.Failure.Path.Tag
}
if item.Failure.PropertiesError != nil {
errorTag += "/" + item.Failure.PropertiesError.Tag
}
}
resp.err = fmt.Errorf("batch upload failed: %s", errorTag)
}
if !b.async {
results[i] <- resp
}
}
// Show signalled so no need to report error to clients from now on
signalled = true
// Report an error if any failed in the batch
if errorTag != "" {
return fmt.Errorf("batch had %d errors: last error: %s", errorCount, errorTag)
}
fs.Debugf(b.f, "Committed %s", desc)
return nil
}
// commitLoop runs the commit engine in the background
func (b *batcher) commitLoop(ctx context.Context) {
var (
items []*files.UploadSessionFinishArg // current batch of uncommitted files
results []chan<- batcherResponse // current batch of clients awaiting results
idleTimer = time.NewTimer(b.timeout)
commit = func() {
err := b.commitBatch(ctx, items, results)
if err != nil {
fs.Errorf(b.f, "%s batch commit: failed to commit batch length %d: %v", b.mode, len(items), err)
}
items, results = nil, nil
}
)
defer b.wg.Done()
defer idleTimer.Stop()
idleTimer.Stop()
outer:
for {
select {
case req := <-b.in:
if req.isQuit() {
break outer
}
items = append(items, req.commitInfo)
results = append(results, req.result)
idleTimer.Stop()
if len(items) >= b.size {
commit()
} else {
idleTimer.Reset(b.timeout)
}
case <-idleTimer.C:
if len(items) > 0 {
fs.Debugf(b.f, "Batch idle for %v so committing", b.timeout)
commit()
}
}
}
// commit any remaining items
if len(items) > 0 {
commit()
}
}
// Shutdown finishes any pending batches then shuts everything down
//
// Can be called from atexit handler
func (b *batcher) Shutdown() {
if !b.Batching() {
return
}
b.shutOnce.Do(func() {
atexit.Unregister(b.atexit)
fs.Infof(b.f, "Committing uploads - please wait...")
// show that batcher is shutting down
close(b.closed)
// quit the commitLoop by sending a quitRequest message
//
// Note that we don't close b.in because that will
// cause write to closed channel in Commit when we are
// exiting due to a signal.
b.in <- quitRequest
b.wg.Wait()
})
}
// Commit commits the file using a batch call, first adding it to the
// batch and then waiting for the batch to complete in a synchronous
// way if async is not set.
func (b *batcher) Commit(ctx context.Context, commitInfo *files.UploadSessionFinishArg) (entry *files.FileMetadata, err error) {
select {
case <-b.closed:
return nil, fserrors.FatalError(errors.New("batcher is shutting down"))
default:
}
fs.Debugf(b.f, "Adding %q to batch", commitInfo.Commit.Path)
resp := make(chan batcherResponse, 1)
b.in <- batcherRequest{
commitInfo: commitInfo,
result: resp,
}
// If running async then don't wait for the result
if b.async {
return nil, nil
}
result := <-resp
return result.entry, result.err
}

406
backend/dropbox/dropbox.go Executable file → Normal file
View File

@@ -23,23 +23,22 @@ of path_display and all will be well.
import (
"context"
"errors"
"fmt"
"io"
"log"
"path"
"regexp"
"strings"
"time"
"unicode/utf8"
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox"
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/auth"
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/common"
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/files"
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/sharing"
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/team"
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/users"
"github.com/pkg/errors"
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox"
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/auth"
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/common"
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/files"
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/sharing"
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/team"
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/users"
"github.com/rclone/rclone/backend/dropbox/dbhash"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
@@ -65,9 +64,9 @@ const (
// Upload chunk size - setting too small makes uploads slow.
// Chunks are buffered into memory for retries.
//
// Speed vs chunk size uploading a 1 GB file on 2017-11-22
// Speed vs chunk size uploading a 1 GiB file on 2017-11-22
//
// Chunk Size MB, Speed Mbyte/s, % of max
// Chunk Size MiB, Speed MiB/s, % of max
// 1 1.364 11%
// 2 2.443 19%
// 4 4.288 33%
@@ -82,11 +81,11 @@ const (
// 96 12.302 95%
// 128 12.945 100%
//
// Choose 48MB which is 91% of Maximum speed. rclone by
// default does 4 transfers so this should use 4*48MB = 192MB
// Choose 48 MiB which is 91% of Maximum speed. rclone by
// default does 4 transfers so this should use 4*48 MiB = 192 MiB
// by default.
defaultChunkSize = 48 * fs.MebiByte
maxChunkSize = 150 * fs.MebiByte
defaultChunkSize = 48 * fs.Mebi
maxChunkSize = 150 * fs.Mebi
// Max length of filename parts: https://help.dropbox.com/installs-integrations/sync-uploads/files-not-syncing
maxFileNameLength = 255
)
@@ -139,32 +138,29 @@ func getOauthConfig(m configmap.Mapper) *oauth2.Config {
// Register with Fs
func init() {
DbHashType = hash.RegisterHash("DropboxHash", 64, dbhash.New)
DbHashType = hash.RegisterHash("dropbox", "DropboxHash", 64, dbhash.New)
fs.Register(&fs.RegInfo{
Name: "dropbox",
Description: "Dropbox",
NewFs: NewFs,
Config: func(ctx context.Context, name string, m configmap.Mapper) {
opt := oauthutil.Options{
NoOffline: true,
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
return oauthutil.ConfigOut("", &oauthutil.Options{
OAuth2Config: getOauthConfig(m),
NoOffline: true,
OAuth2Opts: []oauth2.AuthCodeOption{
oauth2.SetAuthURLParam("token_access_type", "offline"),
},
}
err := oauthutil.Config(ctx, "dropbox", name, m, getOauthConfig(m), &opt)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
})
},
Options: append(oauthutil.SharedOptions, []fs.Option{{
Name: "chunk_size",
Help: fmt.Sprintf(`Upload chunk size. (< %v).
Help: fmt.Sprintf(`Upload chunk size (< %v).
Any files larger than this will be uploaded in chunks of this size.
Note that chunks are buffered in memory (one at a time) so rclone can
deal with retries. Setting this larger will increase the speed
slightly (at most 10%% for 128MB in tests) at the cost of using more
slightly (at most 10%% for 128 MiB in tests) at the cost of using more
memory. It can be set smaller if you are tight on memory.`, maxChunkSize),
Default: defaultChunkSize,
Advanced: true,
@@ -213,6 +209,68 @@ Note that we don't unmount the shared folder afterwards so the
shared folder.`,
Default: false,
Advanced: true,
}, {
Name: "batch_mode",
Help: `Upload file batching sync|async|off.
This sets the batch mode used by rclone.
For full info see [the main docs](https://rclone.org/dropbox/#batch-mode)
This has 3 possible values
- off - no batching
- sync - batch uploads and check completion (default)
- async - batch upload and don't check completion
Rclone will close any outstanding batches when it exits which may make
a delay on quit.
`,
Default: "sync",
Advanced: true,
}, {
Name: "batch_size",
Help: `Max number of files in upload batch.
This sets the batch size of files to upload. It has to be less than 1000.
By default this is 0 which means rclone which calculate the batch size
depending on the setting of batch_mode.
- batch_mode: async - default batch_size is 100
- batch_mode: sync - default batch_size is the same as --transfers
- batch_mode: off - not in use
Rclone will close any outstanding batches when it exits which may make
a delay on quit.
Setting this is a great idea if you are uploading lots of small files
as it will make them a lot quicker. You can use --transfers 32 to
maximise throughput.
`,
Default: 0,
Advanced: true,
}, {
Name: "batch_timeout",
Help: `Max time to allow an idle upload batch before uploading.
If an upload batch is idle for more than this long then it will be
uploaded.
The default for this is 0 which means rclone will choose a sensible
default based on the batch_mode in use.
- batch_mode: async - default batch_timeout is 500ms
- batch_mode: sync - default batch_timeout is 10s
- batch_mode: off - not in use
`,
Default: fs.Duration(0),
Advanced: true,
}, {
Name: "batch_commit_timeout",
Help: `Max time to wait for a batch to finish committing`,
Default: fs.Duration(10 * time.Minute),
Advanced: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
@@ -232,11 +290,16 @@ shared folder.`,
// Options defines the configuration for this backend
type Options struct {
ChunkSize fs.SizeSuffix `config:"chunk_size"`
Impersonate string `config:"impersonate"`
SharedFiles bool `config:"shared_files"`
SharedFolders bool `config:"shared_folders"`
Enc encoder.MultiEncoder `config:"encoding"`
ChunkSize fs.SizeSuffix `config:"chunk_size"`
Impersonate string `config:"impersonate"`
SharedFiles bool `config:"shared_files"`
SharedFolders bool `config:"shared_folders"`
BatchMode string `config:"batch_mode"`
BatchSize int `config:"batch_size"`
BatchTimeout fs.Duration `config:"batch_timeout"`
BatchCommitTimeout fs.Duration `config:"batch_commit_timeout"`
AsyncBatch bool `config:"async_batch"`
Enc encoder.MultiEncoder `config:"encoding"`
}
// Fs represents a remote dropbox server
@@ -255,6 +318,7 @@ type Fs struct {
slashRootSlash string // root with "/" prefix and postfix, lowercase
pacer *fs.Pacer // To pace the API calls
ns string // The namespace we are using or "" for none
batcher *batcher // batch builder
}
// Object describes a dropbox object
@@ -270,8 +334,6 @@ type Object struct {
hash string // content_hash of the object
}
// ------------------------------------------------------------
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string {
return f.name
@@ -301,36 +363,36 @@ func shouldRetry(ctx context.Context, err error) (bool, error) {
if err == nil {
return false, err
}
baseErrString := errors.Cause(err).Error()
errString := err.Error()
// First check for specific errors
if strings.Contains(baseErrString, "insufficient_space") {
if strings.Contains(errString, "insufficient_space") {
return false, fserrors.FatalError(err)
} else if strings.Contains(baseErrString, "malformed_path") {
} else if strings.Contains(errString, "malformed_path") {
return false, fserrors.NoRetryError(err)
}
// Then handle any official Retry-After header from Dropbox's SDK
switch e := err.(type) {
case auth.RateLimitAPIError:
if e.RateLimitError.RetryAfter > 0 {
fs.Logf(baseErrString, "Too many requests or write operations. Trying again in %d seconds.", e.RateLimitError.RetryAfter)
fs.Logf(errString, "Too many requests or write operations. Trying again in %d seconds.", e.RateLimitError.RetryAfter)
err = pacer.RetryAfterError(err, time.Duration(e.RateLimitError.RetryAfter)*time.Second)
}
return true, err
}
// Keep old behavior for backward compatibility
if strings.Contains(baseErrString, "too_many_write_operations") || strings.Contains(baseErrString, "too_many_requests") || baseErrString == "" {
if strings.Contains(errString, "too_many_write_operations") || strings.Contains(errString, "too_many_requests") || errString == "" {
return true, err
}
return fserrors.ShouldRetry(err), err
}
func checkUploadChunkSize(cs fs.SizeSuffix) error {
const minChunkSize = fs.Byte
const minChunkSize = fs.SizeSuffixBase
if cs < minChunkSize {
return errors.Errorf("%s is less than %s", cs, minChunkSize)
return fmt.Errorf("%s is less than %s", cs, minChunkSize)
}
if cs > maxChunkSize {
return errors.Errorf("%s is greater than %s", cs, maxChunkSize)
return fmt.Errorf("%s is greater than %s", cs, maxChunkSize)
}
return nil
}
@@ -353,7 +415,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
}
err = checkUploadChunkSize(opt.ChunkSize)
if err != nil {
return nil, errors.Wrap(err, "dropbox: chunk size")
return nil, fmt.Errorf("dropbox: chunk size: %w", err)
}
// Convert the old token if it exists. The old token was just
@@ -365,13 +427,13 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
newToken := fmt.Sprintf(`{"access_token":"%s","token_type":"bearer","expiry":"0001-01-01T00:00:00Z"}`, oldToken)
err := config.SetValueAndSave(name, config.ConfigToken, newToken)
if err != nil {
return nil, errors.Wrap(err, "NewFS convert token")
return nil, fmt.Errorf("NewFS convert token: %w", err)
}
}
oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, getOauthConfig(m))
if err != nil {
return nil, errors.Wrap(err, "failed to configure dropbox")
return nil, fmt.Errorf("failed to configure dropbox: %w", err)
}
ci := fs.GetConfig(ctx)
@@ -382,6 +444,10 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
ci: ci,
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
}
f.batcher, err = newBatcher(ctx, f, f.opt.BatchMode, f.opt.BatchSize, time.Duration(f.opt.BatchTimeout))
if err != nil {
return nil, err
}
cfg := dropbox.Config{
LogLevel: dropbox.LogOff, // logging in the SDK: LogOff, LogDebug, LogInfo
Client: oAuthClient, // maybe???
@@ -406,9 +472,11 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
args := team.NewMembersGetInfoArgs(members)
memberIds, err := f.team.MembersGetInfo(args)
if err != nil {
return nil, errors.Wrapf(err, "invalid dropbox team member: %q", opt.Impersonate)
return nil, fmt.Errorf("invalid dropbox team member: %q: %w", opt.Impersonate, err)
}
if len(memberIds) == 0 || memberIds[0].MemberInfo == nil || memberIds[0].MemberInfo.Profile == nil {
return nil, fmt.Errorf("dropbox team member not found: %q", opt.Impersonate)
}
cfg.AsMemberID = memberIds[0].MemberInfo.Profile.MemberProfile.TeamMemberId
@@ -485,7 +553,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
return shouldRetry(ctx, err)
})
if err != nil {
return nil, errors.Wrap(err, "get current account failed")
return nil, fmt.Errorf("get current account failed: %w", err)
}
switch x := acc.RootInfo.(type) {
case *common.TeamRootInfo:
@@ -493,28 +561,30 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
case *common.UserRootInfo:
f.ns = x.RootNamespaceId
default:
return nil, errors.Errorf("unknown RootInfo type %v %T", acc.RootInfo, acc.RootInfo)
return nil, fmt.Errorf("unknown RootInfo type %v %T", acc.RootInfo, acc.RootInfo)
}
fs.Debugf(f, "Using root namespace %q", f.ns)
}
f.setRoot(root)
// See if the root is actually an object
_, err = f.getFileMetadata(ctx, f.slashRoot)
if err == nil {
newRoot := path.Dir(f.root)
if newRoot == "." {
newRoot = ""
if f.root != "" {
_, err = f.getFileMetadata(ctx, f.slashRoot)
if err == nil {
newRoot := path.Dir(f.root)
if newRoot == "." {
newRoot = ""
}
f.setRoot(newRoot)
// return an error with an fs which points to the parent
return f, fs.ErrorIsFile
}
f.setRoot(newRoot)
// return an error with an fs which points to the parent
return f, fs.ErrorIsFile
}
return f, nil
}
// headerGenerator for dropbox sdk
func (f *Fs) headerGenerator(hostType string, style string, namespace string, route string) map[string]string {
func (f *Fs) headerGenerator(hostType string, namespace string, route string) map[string]string {
if f.ns == "" {
return map[string]string{}
}
@@ -564,6 +634,9 @@ func (f *Fs) getFileMetadata(ctx context.Context, filePath string) (fileInfo *fi
}
fileInfo, ok := entry.(*files.FileMetadata)
if !ok {
if _, ok = entry.(*files.FolderMetadata); ok {
return nil, fs.ErrorIsDir
}
return nil, fs.ErrorNotAFile
}
return fileInfo, nil
@@ -641,7 +714,7 @@ func (f *Fs) listSharedFolders(ctx context.Context) (entries fs.DirEntries, err
return shouldRetry(ctx, err)
})
if err != nil {
return nil, errors.Wrap(err, "list continue")
return nil, fmt.Errorf("list continue: %w", err)
}
}
for _, entry := range res.Entries {
@@ -715,7 +788,7 @@ func (f *Fs) listReceivedFiles(ctx context.Context) (entries fs.DirEntries, err
return shouldRetry(ctx, err)
})
if err != nil {
return nil, errors.Wrap(err, "list continue")
return nil, fmt.Errorf("list continue: %w", err)
}
}
for _, entry := range res.Entries {
@@ -725,7 +798,7 @@ func (f *Fs) listReceivedFiles(ctx context.Context) (entries fs.DirEntries, err
fs: f,
url: entry.PreviewUrl,
remote: entryPath,
modTime: entry.TimeInvited,
modTime: *entry.TimeInvited,
}
if err != nil {
return nil, err
@@ -781,6 +854,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
arg := files.ListFolderArg{
Path: f.opt.Enc.FromStandardPath(root),
Recursive: false,
Limit: 1000,
}
if root == "/" {
arg.Path = "" // Specify root folder as empty string
@@ -808,7 +882,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
return shouldRetry(ctx, err)
})
if err != nil {
return nil, errors.Wrap(err, "list continue")
return nil, fmt.Errorf("list continue: %w", err)
}
}
for _, entry := range res.Entries {
@@ -851,7 +925,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// Put the object
//
// Copy the reader in to the new object which is returned
// Copy the reader in to the new object which is returned.
//
// The new object may have been created if an error is returned
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
@@ -920,7 +994,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error)
// check directory exists
_, err = f.getDirMetadata(ctx, root)
if err != nil {
return errors.Wrap(err, "Rmdir")
return fmt.Errorf("Rmdir: %w", err)
}
root = f.opt.Enc.FromStandardPath(root)
@@ -938,7 +1012,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error)
return shouldRetry(ctx, err)
})
if err != nil {
return errors.Wrap(err, "Rmdir")
return fmt.Errorf("Rmdir: %w", err)
}
if len(res.Entries) != 0 {
return errors.New("directory not empty")
@@ -970,9 +1044,9 @@ func (f *Fs) Precision() time.Duration {
// Copy src to this remote using server-side copy operations.
//
// This is stored with the remote path given
// This is stored with the remote path given.
//
// It returns the destination Object and a possible error
// It returns the destination Object and a possible error.
//
// Will only be called if src.Fs().Name() == f.Name()
//
@@ -1004,7 +1078,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
return shouldRetry(ctx, err)
})
if err != nil {
return nil, errors.Wrap(err, "copy failed")
return nil, fmt.Errorf("copy failed: %w", err)
}
// Set the metadata
@@ -1014,7 +1088,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
}
err = dstObj.setMetadataFromEntry(fileInfo)
if err != nil {
return nil, errors.Wrap(err, "copy failed")
return nil, fmt.Errorf("copy failed: %w", err)
}
return dstObj, nil
@@ -1031,9 +1105,9 @@ func (f *Fs) Purge(ctx context.Context, dir string) (err error) {
// Move src to this remote using server-side move operations.
//
// This is stored with the remote path given
// This is stored with the remote path given.
//
// It returns the destination Object and a possible error
// It returns the destination Object and a possible error.
//
// Will only be called if src.Fs().Name() == f.Name()
//
@@ -1065,7 +1139,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
return shouldRetry(ctx, err)
})
if err != nil {
return nil, errors.Wrap(err, "move failed")
return nil, fmt.Errorf("move failed: %w", err)
}
// Set the metadata
@@ -1075,7 +1149,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
}
err = dstObj.setMetadataFromEntry(fileInfo)
if err != nil {
return nil, errors.Wrap(err, "move failed")
return nil, fmt.Errorf("move failed: %w", err)
}
return dstObj, nil
}
@@ -1100,14 +1174,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
}
if expire < fs.DurationOff {
expiryTime := time.Now().Add(time.Duration(expire)).UTC().Round(time.Second)
createArg.Settings.Expires = expiryTime
}
// FIXME note we can't set Settings for non enterprise dropbox
// because of https://github.com/dropbox/dropbox-sdk-go-unofficial/issues/75
// however this only goes wrong when we set Expires, so as a
// work-around remove Settings unless expire is set.
if expire == fs.DurationOff {
createArg.Settings = nil
createArg.Settings.Expires = &expiryTime
}
var linkRes sharing.IsSharedLinkMetadata
@@ -1132,7 +1199,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
return
}
if len(listRes.Links) == 0 {
err = errors.New("Dropbox says the sharing link already exists, but list came back empty")
err = errors.New("sharing link already exists, but list came back empty")
return
}
linkRes = listRes.Links[0]
@@ -1144,7 +1211,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
case *sharing.FolderLinkMetadata:
link = res.Url
default:
err = fmt.Errorf("Don't know how to extract link, response has unknown format: %T", res)
err = fmt.Errorf("don't know how to extract link, response has unknown format: %T", res)
}
}
return
@@ -1190,7 +1257,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
return shouldRetry(ctx, err)
})
if err != nil {
return errors.Wrap(err, "MoveDir failed")
return fmt.Errorf("MoveDir failed: %w", err)
}
return nil
@@ -1204,7 +1271,7 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
return shouldRetry(ctx, err)
})
if err != nil {
return nil, errors.Wrap(err, "about failed")
return nil, err
}
var total uint64
if q.Allocation != nil {
@@ -1305,10 +1372,12 @@ func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.
if timeout < 30 {
timeout = 30
fs.Debugf(f, "Increasing poll interval to minimum 30s")
}
if timeout > 480 {
timeout = 480
fs.Debugf(f, "Decreasing poll interval to maximum 480s")
}
err = f.pacer.Call(func() (bool, error) {
@@ -1344,7 +1413,7 @@ func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.
return shouldRetry(ctx, err)
})
if err != nil {
return "", errors.Wrap(err, "list continue")
return "", fmt.Errorf("list continue: %w", err)
}
cursor = changeList.Cursor
var entryType fs.EntryType
@@ -1366,7 +1435,7 @@ func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.
}
if entryPath != "" {
notifyFunc(entryPath, entryType)
notifyFunc(f.opt.Enc.ToStandardPath(entryPath), entryType)
}
}
if !changeList.HasMore {
@@ -1381,6 +1450,13 @@ func (f *Fs) Hashes() hash.Set {
return hash.Set(DbHashType)
}
// Shutdown the backend, closing any background tasks and any
// cached connections.
func (f *Fs) Shutdown(ctx context.Context) error {
f.batcher.Shutdown()
return nil
}
// ------------------------------------------------------------
// Fs returns the parent Fs
@@ -1416,7 +1492,7 @@ func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
}
err := o.readMetaData(ctx)
if err != nil {
return "", errors.Wrap(err, "failed to read hash from metadata")
return "", fmt.Errorf("failed to read hash from metadata: %w", err)
}
return o.hash, nil
}
@@ -1540,97 +1616,110 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
// uploadChunked uploads the object in parts
//
// Will work optimally if size is >= uploadChunkSize. If the size is either
// unknown (i.e. -1) or smaller than uploadChunkSize, the method incurs an
// avoidable request to the Dropbox API that does not carry payload.
// Will introduce two additional network requests to start and finish the session.
// If the size is unknown (i.e. -1) the method incurs one additional
// request to the Dropbox API that does not carry a payload to close the append session.
func (o *Object) uploadChunked(ctx context.Context, in0 io.Reader, commitInfo *files.CommitInfo, size int64) (entry *files.FileMetadata, err error) {
chunkSize := int64(o.fs.opt.ChunkSize)
chunks := 0
if size != -1 {
chunks = int(size/chunkSize) + 1
}
in := readers.NewCountingReader(in0)
buf := make([]byte, int(chunkSize))
fmtChunk := func(cur int, last bool) {
if chunks == 0 && last {
fs.Debugf(o, "Streaming chunk %d/%d", cur, cur)
} else if chunks == 0 {
fs.Debugf(o, "Streaming chunk %d/unknown", cur)
} else {
fs.Debugf(o, "Uploading chunk %d/%d", cur, chunks)
}
}
// write the first chunk
fmtChunk(1, false)
// start upload
var res *files.UploadSessionStartResult
chunk := readers.NewRepeatableLimitReaderBuffer(in, buf, chunkSize)
err = o.fs.pacer.Call(func() (bool, error) {
// seek to the start in case this is a retry
if _, err = chunk.Seek(0, io.SeekStart); err != nil {
return false, nil
}
res, err = o.fs.srv.UploadSessionStart(&files.UploadSessionStartArg{}, chunk)
res, err = o.fs.srv.UploadSessionStart(&files.UploadSessionStartArg{}, nil)
return shouldRetry(ctx, err)
})
if err != nil {
return nil, err
}
chunkSize := int64(o.fs.opt.ChunkSize)
chunks, remainder := size/chunkSize, size%chunkSize
if remainder > 0 {
chunks++
}
// write chunks
in := readers.NewCountingReader(in0)
buf := make([]byte, int(chunkSize))
cursor := files.UploadSessionCursor{
SessionId: res.SessionId,
Offset: 0,
}
appendArg := files.UploadSessionAppendArg{
Cursor: &cursor,
Close: false,
}
// write more whole chunks (if any)
currentChunk := 2
for {
if chunks > 0 && currentChunk >= chunks {
// if the size is known, only upload full chunks. Remaining bytes are uploaded with
// the UploadSessionFinish request.
break
} else if chunks == 0 && in.BytesRead()-cursor.Offset < uint64(chunkSize) {
// if the size is unknown, upload as long as we can read full chunks from the reader.
// The UploadSessionFinish request will not contain any payload.
break
}
appendArg := files.UploadSessionAppendArg{Cursor: &cursor}
for currentChunk := 1; ; currentChunk++ {
cursor.Offset = in.BytesRead()
fmtChunk(currentChunk, false)
chunk = readers.NewRepeatableLimitReaderBuffer(in, buf, chunkSize)
if chunks < 0 {
fs.Debugf(o, "Streaming chunk %d/unknown", currentChunk)
} else {
fs.Debugf(o, "Uploading chunk %d/%d", currentChunk, chunks)
}
chunk := readers.NewRepeatableLimitReaderBuffer(in, buf, chunkSize)
skip := int64(0)
err = o.fs.pacer.Call(func() (bool, error) {
// seek to the start in case this is a retry
if _, err = chunk.Seek(0, io.SeekStart); err != nil {
return false, nil
if _, err = chunk.Seek(skip, io.SeekStart); err != nil {
return false, err
}
err = o.fs.srv.UploadSessionAppendV2(&appendArg, chunk)
// after the first chunk is uploaded, we retry everything
// after session is started, we retry everything
if err != nil {
// Check for incorrect offset error and retry with new offset
if uErr, ok := err.(files.UploadSessionAppendV2APIError); ok {
if uErr.EndpointError != nil && uErr.EndpointError.IncorrectOffset != nil {
correctOffset := uErr.EndpointError.IncorrectOffset.CorrectOffset
delta := int64(correctOffset) - int64(cursor.Offset)
skip += delta
what := fmt.Sprintf("incorrect offset error received: sent %d, need %d, skip %d", cursor.Offset, correctOffset, skip)
if skip < 0 {
return false, fmt.Errorf("can't seek backwards to correct offset: %s", what)
} else if skip == chunkSize {
fs.Debugf(o, "%s: chunk received OK - continuing", what)
return false, nil
} else if skip > chunkSize {
// This error should never happen
return false, fmt.Errorf("can't seek forwards by more than a chunk to correct offset: %s", what)
}
// Skip the sent data on next retry
cursor.Offset = uint64(int64(cursor.Offset) + delta)
fs.Debugf(o, "%s: skipping bytes on retry to fix offset", what)
}
}
}
return err != nil, err
})
if err != nil {
return nil, err
}
currentChunk++
if appendArg.Close {
break
}
if size > 0 {
// if size is known, check if next chunk is final
appendArg.Close = uint64(size)-in.BytesRead() <= uint64(chunkSize)
if in.BytesRead() > uint64(size) {
return nil, fmt.Errorf("expected %d bytes in input, but have read %d so far", size, in.BytesRead())
}
} else {
// if size is unknown, upload as long as we can read full chunks from the reader
appendArg.Close = in.BytesRead()-cursor.Offset < uint64(chunkSize)
}
}
// write the remains
// finish upload
cursor.Offset = in.BytesRead()
args := &files.UploadSessionFinishArg{
Cursor: &cursor,
Commit: commitInfo,
}
fmtChunk(currentChunk, true)
chunk = readers.NewRepeatableReaderBuffer(in, buf)
// If we are batching then we should have written all the data now
// store the commit info now for a batch commit
if o.fs.batcher.Batching() {
return o.fs.batcher.Commit(ctx, args)
}
err = o.fs.pacer.Call(func() (bool, error) {
// seek to the start in case this is a retry
if _, err = chunk.Seek(0, io.SeekStart); err != nil {
return false, nil
}
entry, err = o.fs.srv.UploadSessionFinish(args, chunk)
entry, err = o.fs.srv.UploadSessionFinish(args, nil)
// If error is insufficient space then don't retry
if e, ok := err.(files.UploadSessionFinishAPIError); ok {
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.WriteErrorInsufficientSpace {
@@ -1674,7 +1763,7 @@ func checkPathLength(name string) (err error) {
// Update the already existing object
//
// Copy the reader into the object updating modTime and size
// Copy the reader into the object updating modTime and size.
//
// The new object may have been created if an error is returned
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
@@ -1683,12 +1772,13 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
}
remote := o.remotePath()
if ignoredFiles.MatchString(remote) {
return fserrors.NoRetryError(errors.Errorf("file name %q is disallowed - not uploading", path.Base(remote)))
return fserrors.NoRetryError(fmt.Errorf("file name %q is disallowed - not uploading", path.Base(remote)))
}
commitInfo := files.NewCommitInfo(o.fs.opt.Enc.FromStandardPath(o.remotePath()))
commitInfo.Mode.Tag = "overwrite"
// The Dropbox API only accepts timestamps in UTC with second precision.
commitInfo.ClientModified = src.ModTime(ctx).UTC().Round(time.Second)
clientModified := src.ModTime(ctx).UTC().Round(time.Second)
commitInfo.ClientModified = &clientModified
// Don't attempt to create filenames that are too long
if cErr := checkPathLength(commitInfo.Path); cErr != nil {
return cErr
@@ -1697,16 +1787,25 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
size := src.Size()
var err error
var entry *files.FileMetadata
if size > int64(o.fs.opt.ChunkSize) || size == -1 {
if size > int64(o.fs.opt.ChunkSize) || size < 0 || o.fs.batcher.Batching() {
entry, err = o.uploadChunked(ctx, in, commitInfo, size)
} else {
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
entry, err = o.fs.srv.Upload(commitInfo, in)
entry, err = o.fs.srv.Upload(&files.UploadArg{CommitInfo: *commitInfo}, in)
return shouldRetry(ctx, err)
})
}
if err != nil {
return errors.Wrap(err, "upload failed")
return fmt.Errorf("upload failed: %w", err)
}
// If we haven't received data back from batch upload then fake it
//
// This will only happen if we are uploading async batches
if entry == nil {
o.bytes = size
o.modTime = *commitInfo.ClientModified
o.hash = "" // we don't have this
return nil
}
return o.setMetadataFromEntry(entry)
}
@@ -1735,6 +1834,7 @@ var (
_ fs.PublicLinker = (*Fs)(nil)
_ fs.DirMover = (*Fs)(nil)
_ fs.Abouter = (*Fs)(nil)
_ fs.Shutdowner = &Fs{}
_ fs.Object = (*Object)(nil)
_ fs.IDer = (*Object)(nil)
)

View File

@@ -2,14 +2,16 @@ package fichier
import (
"context"
"errors"
"fmt"
"io"
"net/http"
"net/url"
"regexp"
"strconv"
"strings"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/lib/rest"
@@ -26,25 +28,44 @@ var retryErrorCodes = []int{
509, // Bandwidth Limit Exceeded
}
var errorRegex = regexp.MustCompile(`#\d{1,3}`)
func parseFichierError(err error) int {
matches := errorRegex.FindStringSubmatch(err.Error())
if len(matches) == 0 {
return 0
}
code, err := strconv.Atoi(matches[0])
if err != nil {
fs.Debugf(nil, "failed parsing fichier error: %v", err)
return 0
}
return code
}
// shouldRetry returns a boolean as to whether this resp and err
// deserve to be retried. It returns the err as a convenience
func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
if fserrors.ContextError(ctx, &err) {
return false, err
}
// Detect this error which the integration tests provoke
// error HTTP error 403 (403 Forbidden) returned body: "{\"message\":\"Flood detected: IP Locked #374\",\"status\":\"KO\"}"
// 1Fichier uses HTTP error code 403 (Forbidden) for all kinds of errors with
// responses looking like this: "{\"message\":\"Flood detected: IP Locked #374\",\"status\":\"KO\"}"
//
// https://1fichier.com/api.html
//
// file/ls.cgi is limited :
//
// Warning (can be changed in case of abuses) :
// List all files of the account is limited to 1 request per hour.
// List folders is limited to 5 000 results and 1 request per folder per 30s.
if err != nil && strings.Contains(err.Error(), "Flood detected") {
fs.Debugf(nil, "Sleeping for 30 seconds due to: %v", err)
time.Sleep(30 * time.Second)
// We attempt to parse the actual 1Fichier error code from this body and handle it accordingly
// Most importantly #374 (Flood detected: IP locked) which the integration tests provoke
// The list below is far from complete and should be expanded if we see any more error codes.
if err != nil {
switch parseFichierError(err) {
case 93:
return false, err // No such user
case 186:
return false, err // IP blocked?
case 374:
fs.Debugf(nil, "Sleeping for 30 seconds due to: %v", err)
time.Sleep(30 * time.Second)
default:
}
}
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
}
@@ -80,16 +101,22 @@ func (f *Fs) readFileInfo(ctx context.Context, url string) (*File, error) {
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't read file info")
return nil, fmt.Errorf("couldn't read file info: %w", err)
}
return &file, err
}
// maybe do some actual validation later if necessary
func validToken(token *GetTokenResponse) bool {
return token.Status == "OK"
}
func (f *Fs) getDownloadToken(ctx context.Context, url string) (*GetTokenResponse, error) {
request := DownloadRequest{
URL: url,
Single: 1,
Pass: f.opt.FilePassword,
}
opts := rest.Opts{
Method: "POST",
@@ -99,10 +126,11 @@ func (f *Fs) getDownloadToken(ctx context.Context, url string) (*GetTokenRespons
var token GetTokenResponse
err := f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, &request, &token)
return shouldRetry(ctx, resp, err)
doretry, err := shouldRetry(ctx, resp, err)
return doretry || !validToken(&token), err
})
if err != nil {
return nil, errors.Wrap(err, "couldn't list files")
return nil, fmt.Errorf("couldn't list files: %w", err)
}
return &token, nil
@@ -118,10 +146,16 @@ func fileFromSharedFile(file *SharedFile) File {
func (f *Fs) listSharedFiles(ctx context.Context, id string) (entries fs.DirEntries, err error) {
opts := rest.Opts{
Method: "GET",
RootURL: "https://1fichier.com/dir/",
Path: id,
Parameters: map[string][]string{"json": {"1"}},
Method: "GET",
RootURL: "https://1fichier.com/dir/",
Path: id,
Parameters: map[string][]string{"json": {"1"}},
ContentType: "application/x-www-form-urlencoded",
}
if f.opt.FolderPassword != "" {
opts.Method = "POST"
opts.Parameters = nil
opts.Body = strings.NewReader("json=1&pass=" + url.QueryEscape(f.opt.FolderPassword))
}
var sharedFiles SharedFolderResponse
@@ -130,7 +164,7 @@ func (f *Fs) listSharedFiles(ctx context.Context, id string) (entries fs.DirEntr
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't list files")
return nil, fmt.Errorf("couldn't list files: %w", err)
}
entries = make([]fs.DirEntry, len(sharedFiles))
@@ -159,7 +193,7 @@ func (f *Fs) listFiles(ctx context.Context, directoryID int) (filesList *FilesLi
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't list files")
return nil, fmt.Errorf("couldn't list files: %w", err)
}
for i := range filesList.Items {
item := &filesList.Items[i]
@@ -187,7 +221,7 @@ func (f *Fs) listFolders(ctx context.Context, directoryID int) (foldersList *Fol
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't list folders")
return nil, fmt.Errorf("couldn't list folders: %w", err)
}
foldersList.Name = f.opt.Enc.ToStandardName(foldersList.Name)
for i := range foldersList.SubFolders {
@@ -281,7 +315,7 @@ func (f *Fs) makeFolder(ctx context.Context, leaf string, folderID int) (respons
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't create folder")
return nil, fmt.Errorf("couldn't create folder: %w", err)
}
// fs.Debugf(f, "Created Folder `%s` in id `%s`", name, directoryID)
@@ -308,10 +342,10 @@ func (f *Fs) removeFolder(ctx context.Context, name string, folderID int) (respo
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't remove folder")
return nil, fmt.Errorf("couldn't remove folder: %w", err)
}
if response.Status != "OK" {
return nil, errors.New("Can't remove non-empty dir")
return nil, fmt.Errorf("can't remove folder: %s", response.Message)
}
// fs.Debugf(f, "Removed Folder with id `%s`", directoryID)
@@ -338,7 +372,7 @@ func (f *Fs) deleteFile(ctx context.Context, url string) (response *GenericOKRes
})
if err != nil {
return nil, errors.Wrap(err, "couldn't remove file")
return nil, fmt.Errorf("couldn't remove file: %w", err)
}
// fs.Debugf(f, "Removed file with url `%s`", url)
@@ -365,7 +399,7 @@ func (f *Fs) moveFile(ctx context.Context, url string, folderID int, rename stri
})
if err != nil {
return nil, errors.Wrap(err, "couldn't copy file")
return nil, fmt.Errorf("couldn't copy file: %w", err)
}
return response, nil
@@ -390,7 +424,35 @@ func (f *Fs) copyFile(ctx context.Context, url string, folderID int, rename stri
})
if err != nil {
return nil, errors.Wrap(err, "couldn't copy file")
return nil, fmt.Errorf("couldn't copy file: %w", err)
}
return response, nil
}
func (f *Fs) renameFile(ctx context.Context, url string, newName string) (response *RenameFileResponse, err error) {
request := &RenameFileRequest{
URLs: []RenameFileURL{
{
URL: url,
Filename: newName,
},
},
}
opts := rest.Opts{
Method: "POST",
Path: "/file/rename.cgi",
}
response = &RenameFileResponse{}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, request, response)
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, fmt.Errorf("couldn't rename file: %w", err)
}
return response, nil
@@ -411,7 +473,7 @@ func (f *Fs) getUploadNode(ctx context.Context) (response *GetUploadNodeResponse
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "didnt got an upload node")
return nil, fmt.Errorf("didnt got an upload node: %w", err)
}
// fs.Debugf(f, "Got Upload node")
@@ -425,7 +487,7 @@ func (f *Fs) uploadFile(ctx context.Context, in io.Reader, size int64, fileName,
fileName = f.opt.Enc.FromStandardName(fileName)
if len(uploadID) > 10 || !isAlphaNumeric(uploadID) {
return nil, errors.New("Invalid UploadID")
return nil, errors.New("invalid UploadID")
}
opts := rest.Opts{
@@ -455,7 +517,7 @@ func (f *Fs) uploadFile(ctx context.Context, in io.Reader, size int64, fileName,
})
if err != nil {
return nil, errors.Wrap(err, "couldn't upload file")
return nil, fmt.Errorf("couldn't upload file: %w", err)
}
// fs.Debugf(f, "Uploaded File `%s`", fileName)
@@ -467,7 +529,7 @@ func (f *Fs) endUpload(ctx context.Context, uploadID string, nodeurl string) (re
// fs.Debugf(f, "Ending File Upload `%s`", uploadID)
if len(uploadID) > 10 || !isAlphaNumeric(uploadID) {
return nil, errors.New("Invalid UploadID")
return nil, errors.New("invalid UploadID")
}
opts := rest.Opts{
@@ -489,7 +551,7 @@ func (f *Fs) endUpload(ctx context.Context, uploadID string, nodeurl string) (re
})
if err != nil {
return nil, errors.Wrap(err, "couldn't finish file upload")
return nil, fmt.Errorf("couldn't finish file upload: %w", err)
}
return response, err

View File

@@ -1,7 +1,9 @@
// Package fichier provides an interface to the 1Fichier storage system.
package fichier
import (
"context"
"errors"
"fmt"
"io"
"net/http"
@@ -9,7 +11,6 @@ import (
"strings"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
@@ -35,17 +36,24 @@ func init() {
fs.Register(&fs.RegInfo{
Name: "fichier",
Description: "1Fichier",
Config: func(ctx context.Context, name string, config configmap.Mapper) {
},
NewFs: NewFs,
NewFs: NewFs,
Options: []fs.Option{{
Help: "Your API Key, get it from https://1fichier.com/console/params.pl",
Help: "Your API Key, get it from https://1fichier.com/console/params.pl.",
Name: "api_key",
}, {
Help: "If you want to download a shared folder, add this parameter",
Help: "If you want to download a shared folder, add this parameter.",
Name: "shared_folder",
Required: false,
Advanced: true,
}, {
Help: "If you want to download a shared file that is password protected, add this parameter.",
Name: "file_password",
Advanced: true,
IsPassword: true,
}, {
Help: "If you want to list the files in a shared folder that is password protected, add this parameter.",
Name: "folder_password",
Advanced: true,
IsPassword: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
@@ -77,9 +85,11 @@ func init() {
// Options defines the configuration for this backend
type Options struct {
APIKey string `config:"api_key"`
SharedFolder string `config:"shared_folder"`
Enc encoder.MultiEncoder `config:"encoding"`
APIKey string `config:"api_key"`
SharedFolder string `config:"shared_folder"`
FilePassword string `config:"file_password"`
FolderPassword string `config:"folder_password"`
Enc encoder.MultiEncoder `config:"encoding"`
}
// Fs is the interface a cloud storage system must provide
@@ -285,7 +295,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
path, ok := f.dirCache.GetInv(directoryID)
if !ok {
return nil, errors.New("Cannot find dir in dircache")
return nil, errors.New("cannot find dir in dircache")
}
return f.newObjectFromFile(ctx, path, file), nil
@@ -425,25 +435,45 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
return nil, fs.ErrorCantMove
}
// Find current directory ID
_, currentDirectoryID, err := f.dirCache.FindPath(ctx, remote, false)
if err != nil {
return nil, err
}
// Create temporary object
dstObj, leaf, directoryID, err := f.createObject(ctx, remote)
if err != nil {
return nil, err
}
folderID, err := strconv.Atoi(directoryID)
if err != nil {
return nil, err
}
resp, err := f.moveFile(ctx, srcObj.file.URL, folderID, leaf)
if err != nil {
return nil, errors.Wrap(err, "couldn't move file")
}
if resp.Status != "OK" {
return nil, errors.New("couldn't move file")
// If it is in the correct directory, just rename it
var url string
if currentDirectoryID == directoryID {
resp, err := f.renameFile(ctx, srcObj.file.URL, leaf)
if err != nil {
return nil, fmt.Errorf("couldn't rename file: %w", err)
}
if resp.Status != "OK" {
return nil, fmt.Errorf("couldn't rename file: %s", resp.Message)
}
url = resp.URLs[0].URL
} else {
folderID, err := strconv.Atoi(directoryID)
if err != nil {
return nil, err
}
resp, err := f.moveFile(ctx, srcObj.file.URL, folderID, leaf)
if err != nil {
return nil, fmt.Errorf("couldn't move file: %w", err)
}
if resp.Status != "OK" {
return nil, fmt.Errorf("couldn't move file: %s", resp.Message)
}
url = resp.URLs[0]
}
file, err := f.readFileInfo(ctx, resp.URLs[0])
file, err := f.readFileInfo(ctx, url)
if err != nil {
return nil, errors.New("couldn't read file data")
}
@@ -471,10 +501,10 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
}
resp, err := f.copyFile(ctx, srcObj.file.URL, folderID, leaf)
if err != nil {
return nil, errors.Wrap(err, "couldn't move file")
return nil, fmt.Errorf("couldn't move file: %w", err)
}
if resp.Status != "OK" {
return nil, errors.New("couldn't move file")
return nil, fmt.Errorf("couldn't move file: %s", resp.Message)
}
file, err := f.readFileInfo(ctx, resp.URLs[0].ToURL)
@@ -485,6 +515,32 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
return dstObj, nil
}
// About gets quota information
func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
opts := rest.Opts{
Method: "POST",
Path: "/user/info.cgi",
ContentType: "application/json",
}
var accountInfo AccountInfo
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.rest.CallJSON(ctx, &opts, nil, &accountInfo)
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, fmt.Errorf("failed to read user info: %w", err)
}
// FIXME max upload size would be useful to use in Update
usage = &fs.Usage{
Used: fs.NewUsageValue(accountInfo.ColdStorage), // bytes in use
Total: fs.NewUsageValue(accountInfo.AvailableColdStorage), // bytes total
Free: fs.NewUsageValue(accountInfo.AvailableColdStorage - accountInfo.ColdStorage), // bytes free
}
return usage, nil
}
// PublicLink adds a "readable by anyone with link" permission on the given file or folder.
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (string, error) {
o, err := f.NewObject(ctx, remote)

View File

@@ -2,11 +2,12 @@ package fichier
import (
"context"
"errors"
"fmt"
"io"
"net/http"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/rest"
@@ -122,7 +123,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// Delete duplicate after successful upload
err = o.Remove(ctx)
if err != nil {
return errors.Wrap(err, "failed to remove old version")
return fmt.Errorf("failed to remove old version: %w", err)
}
// Replace guts of old object with new one

View File

@@ -19,6 +19,7 @@ type ListFilesRequest struct {
type DownloadRequest struct {
URL string `json:"url"`
Single int `json:"single"`
Pass string `json:"pass,omitempty"`
}
// RemoveFolderRequest is the request structure of the corresponding request
@@ -63,8 +64,9 @@ type MoveFileRequest struct {
// MoveFileResponse is the response structure of the corresponding request
type MoveFileResponse struct {
Status string `json:"status"`
URLs []string `json:"urls"`
Status string `json:"status"`
Message string `json:"message"`
URLs []string `json:"urls"`
}
// CopyFileRequest is the request structure of the corresponding request
@@ -76,17 +78,42 @@ type CopyFileRequest struct {
// CopyFileResponse is the response structure of the corresponding request
type CopyFileResponse struct {
Status string `json:"status"`
Copied int `json:"copied"`
URLs []FileCopy `json:"urls"`
Status string `json:"status"`
Message string `json:"message"`
Copied int `json:"copied"`
URLs []FileCopy `json:"urls"`
}
// FileCopy is used in the the CopyFileResponse
// FileCopy is used in the CopyFileResponse
type FileCopy struct {
FromURL string `json:"from_url"`
ToURL string `json:"to_url"`
}
// RenameFileURL is the data structure to rename a single file
type RenameFileURL struct {
URL string `json:"url"`
Filename string `json:"filename"`
}
// RenameFileRequest is the request structure of the corresponding request
type RenameFileRequest struct {
URLs []RenameFileURL `json:"urls"`
Pretty int `json:"pretty"`
}
// RenameFileResponse is the response structure of the corresponding request
type RenameFileResponse struct {
Status string `json:"status"`
Message string `json:"message"`
Renamed int `json:"renamed"`
URLs []struct {
URL string `json:"url"`
OldFilename string `json:"old_filename"`
NewFilename string `json:"new_filename"`
} `json:"urls"`
}
// GetUploadNodeResponse is the response structure of the corresponding request
type GetUploadNodeResponse struct {
ID string `json:"id"`
@@ -155,3 +182,34 @@ type FoldersList struct {
Status string `json:"Status"`
SubFolders []Folder `json:"sub_folders"`
}
// AccountInfo is the structure how 1Fichier returns user info
type AccountInfo struct {
StatsDate string `json:"stats_date"`
MailRM string `json:"mail_rm"`
DefaultQuota int64 `json:"default_quota"`
UploadForbidden string `json:"upload_forbidden"`
PageLimit int `json:"page_limit"`
ColdStorage int64 `json:"cold_storage"`
Status string `json:"status"`
UseCDN string `json:"use_cdn"`
AvailableColdStorage int64 `json:"available_cold_storage"`
DefaultPort string `json:"default_port"`
DefaultDomain int `json:"default_domain"`
Email string `json:"email"`
DownloadMenu string `json:"download_menu"`
FTPDID int `json:"ftp_did"`
DefaultPortFiles string `json:"default_port_files"`
FTPReport string `json:"ftp_report"`
OverQuota int64 `json:"overquota"`
AvailableStorage int64 `json:"available_storage"`
CDN string `json:"cdn"`
Offer string `json:"offer"`
SubscriptionEnd string `json:"subscription_end"`
TFA string `json:"2fa"`
AllowedColdStorage int64 `json:"allowed_cold_storage"`
HotStorage int64 `json:"hot_storage"`
DefaultColdStorageQuota int64 `json:"default_cold_storage_quota"`
FTPMode string `json:"ftp_mode"`
RUReport string `json:"ru_report"`
}

View File

@@ -5,6 +5,7 @@ package api
import (
"bytes"
"encoding/json"
"fmt"
"reflect"
"strings"
@@ -18,7 +19,7 @@ const (
timeFormatJSON = `"` + timeFormatParameters + `"`
)
// Time represents represents date and time information for the
// Time represents date and time information for the
// filefabric API
type Time time.Time
@@ -51,15 +52,50 @@ func (t Time) String() string {
return time.Time(t).UTC().Format(timeFormatParameters)
}
// Int represents an integer which can be represented in JSON as a
// quoted integer or an integer.
type Int int
// MarshalJSON turns a Int into JSON
func (i *Int) MarshalJSON() (out []byte, err error) {
return json.Marshal((*int)(i))
}
// UnmarshalJSON turns JSON into a Int
func (i *Int) UnmarshalJSON(data []byte) error {
if len(data) >= 2 && data[0] == '"' && data[len(data)-1] == '"' {
data = data[1 : len(data)-1]
}
return json.Unmarshal(data, (*int)(i))
}
// String represents an string which can be represented in JSON as a
// quoted string or an integer.
type String string
// MarshalJSON turns a String into JSON
func (s *String) MarshalJSON() (out []byte, err error) {
return json.Marshal((*string)(s))
}
// UnmarshalJSON turns JSON into a String
func (s *String) UnmarshalJSON(data []byte) error {
err := json.Unmarshal(data, (*string)(s))
if err != nil {
*s = String(data)
}
return nil
}
// Status return returned in all status responses
type Status struct {
Code string `json:"status"`
Message string `json:"statusmessage"`
TaskID string `json:"taskid"`
TaskID String `json:"taskid"`
// Warning string `json:"warning"` // obsolete
}
// Status statisfies the error interface
// Status satisfies the error interface
func (e *Status) Error() string {
return fmt.Sprintf("%s (%s)", e.Message, e.Code)
}
@@ -115,7 +151,7 @@ type GetFolderContentsResponse struct {
Total int `json:"total,string"`
Items []Item `json:"filelist"`
Folder Item `json:"folder"`
From int `json:"from,string"`
From Int `json:"from"`
//Count int `json:"count"`
Pid string `json:"pid"`
RefreshResult Status `json:"refreshresult"`

View File

@@ -17,9 +17,9 @@ import (
"bytes"
"context"
"encoding/base64"
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"path"
@@ -32,7 +32,6 @@ import (
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/random"
"github.com/pkg/errors"
"github.com/rclone/rclone/backend/filefabric/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
@@ -65,7 +64,7 @@ func init() {
NewFs: NewFs,
Options: []fs.Option{{
Name: "url",
Help: "URL of the Enterprise File Fabric to connect to",
Help: "URL of the Enterprise File Fabric to connect to.",
Required: true,
Examples: []fs.OptionExample{{
Value: "https://storagemadeeasy.com",
@@ -79,14 +78,15 @@ func init() {
}},
}, {
Name: "root_folder_id",
Help: `ID of the root folder
Help: `ID of the root folder.
Leave blank normally.
Fill in to make rclone start with directory of a given ID.
`,
}, {
Name: "permanent_token",
Help: `Permanent Authentication Token
Help: `Permanent Authentication Token.
A Permanent Authentication Token can be created in the Enterprise File
Fabric, on the users Dashboard under Security, there is an entry
@@ -99,7 +99,7 @@ For more info see: https://docs.storagemadeeasy.com/organisationcloud/api-tokens
`,
}, {
Name: "token",
Help: `Session Token
Help: `Session Token.
This is a session token which rclone caches in the config file. It is
usually valid for 1 hour.
@@ -109,14 +109,14 @@ Don't set this value - rclone will set it automatically.
Advanced: true,
}, {
Name: "token_expiry",
Help: `Token expiry time
Help: `Token expiry time.
Don't set this value - rclone will set it automatically.
`,
Advanced: true,
}, {
Name: "version",
Help: `Version read from the file fabric
Help: `Version read from the file fabric.
Don't set this value - rclone will set it automatically.
`,
@@ -149,7 +149,7 @@ type Fs struct {
opt Options // parsed options
features *fs.Features // optional features
m configmap.Mapper // to save config
srv *rest.Client // the connection to the one drive server
srv *rest.Client // the connection to the server
dirCache *dircache.DirCache // Map of directory path to directory id
pacer *fs.Pacer // pacer for API calls
tokenMu sync.Mutex // hold when reading the token
@@ -222,13 +222,14 @@ var retryStatusCodes = []struct {
// delete in that folder. Please try again later or use
// another name. (error_background)
code: "error_background",
sleep: 6 * time.Second,
sleep: 1 * time.Second,
},
}
// shouldRetry returns a boolean as to whether this resp and err
// deserve to be retried. It returns the err as a convenience
func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error, status api.OKError) (bool, error) {
// try should be the number of the tries so far, counting up from 1
func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error, status api.OKError, try int) (bool, error) {
if fserrors.ContextError(ctx, &err) {
return false, err
}
@@ -244,9 +245,10 @@ func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error, st
for _, retryCode := range retryStatusCodes {
if code == retryCode.code {
if retryCode.sleep > 0 {
// make this thread only sleep extra time
fs.Debugf(f, "Sleeping for %v to wait for %q error to clear", retryCode.sleep, retryCode.code)
time.Sleep(retryCode.sleep)
// make this thread only sleep exponentially increasing extra time
sleepTime := retryCode.sleep << (try - 1)
fs.Debugf(f, "Sleeping for %v to wait for %q error to clear", sleepTime, retryCode.code)
time.Sleep(sleepTime)
}
return true, err
}
@@ -264,7 +266,7 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, rootID string, path string
"pid": rootID,
}, &resp, nil)
if err != nil {
return nil, errors.Wrap(err, "failed to check path exists")
return nil, fmt.Errorf("failed to check path exists: %w", err)
}
if resp.Exists != "y" {
return nil, fs.ErrorObjectNotFound
@@ -305,7 +307,7 @@ func (f *Fs) getApplianceInfo(ctx context.Context) error {
"token": "*",
}, &applianceInfo, nil)
if err != nil {
return errors.Wrap(err, "failed to read appliance version")
return fmt.Errorf("failed to read appliance version: %w", err)
}
f.opt.Version = applianceInfo.SoftwareVersionLabel
f.m.Set("version", f.opt.Version)
@@ -346,7 +348,7 @@ func (f *Fs) getToken(ctx context.Context) (token string, err error) {
"authtoken": f.opt.PermanentToken,
}, &info, nil)
if err != nil {
return "", errors.Wrap(err, "failed to get session token")
return "", fmt.Errorf("failed to get session token: %w", err)
}
refreshed = true
now = now.Add(tokenLifeTime)
@@ -370,7 +372,7 @@ type params map[string]interface{}
// rpc calls the rpc.php method of the SME file fabric
//
// This is an entry point to all the method calls
// This is an entry point to all the method calls.
//
// If result is nil then resp.Body will need closing
func (f *Fs) rpc(ctx context.Context, function string, p params, result api.OKError, options []fs.OpenOption) (resp *http.Response, err error) {
@@ -400,11 +402,13 @@ func (f *Fs) rpc(ctx context.Context, function string, p params, result api.OKEr
ContentType: "application/x-www-form-urlencoded",
Options: options,
}
try := 0
err = f.pacer.Call(func() (bool, error) {
try++
// Refresh the body each retry
opts.Body = strings.NewReader(data.Encode())
resp, err = f.srv.CallJSON(ctx, &opts, nil, result)
return f.shouldRetry(ctx, resp, err, result)
return f.shouldRetry(ctx, resp, err, result, try)
})
if err != nil {
return resp, err
@@ -485,7 +489,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
// Root is a dir - cache its ID
f.dirCache.Put(f.root, info.ID)
}
} else {
//} else {
// Root is not found so a directory
}
}
@@ -557,7 +561,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
"fi_name": f.opt.Enc.FromStandardName(leaf),
}, &info, nil)
if err != nil {
return "", errors.Wrap(err, "failed to create directory")
return "", fmt.Errorf("failed to create directory: %w", err)
}
// fmt.Printf("...Id %q\n", *info.Id)
return info.Item.ID, nil
@@ -590,7 +594,7 @@ OUTER:
var info api.GetFolderContentsResponse
_, err = f.rpc(ctx, "getFolderContents", p, &info, nil)
if err != nil {
return false, errors.Wrap(err, "failed to list directory")
return false, fmt.Errorf("failed to list directory: %w", err)
}
for i := range info.Items {
item := &info.Items[i]
@@ -673,7 +677,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// Creates from the parameters passed in a half finished Object which
// must have setMetaData called on it
//
// Returns the object, leaf, directoryID and error
// Returns the object, leaf, directoryID and error.
//
// Used to create new objects
func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time, size int64) (o *Object, leaf string, directoryID string, err error) {
@@ -692,7 +696,7 @@ func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time,
// Put the object
//
// Copy the reader in to the new object which is returned
// Copy the reader in to the new object which is returned.
//
// The new object may have been created if an error is returned
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
@@ -721,7 +725,7 @@ func (f *Fs) deleteObject(ctx context.Context, id string) (err error) {
"completedeletion": "n",
}, &info, nil)
if err != nil {
return errors.Wrap(err, "failed to delete file")
return fmt.Errorf("failed to delete file: %w", err)
}
return nil
}
@@ -758,7 +762,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
}, &info, nil)
f.dirCache.FlushDir(dir)
if err != nil {
return errors.Wrap(err, "failed to remove directory")
return fmt.Errorf("failed to remove directory: %w", err)
}
return nil
}
@@ -778,9 +782,9 @@ func (f *Fs) Precision() time.Duration {
// Copy src to this remote using server side copy operations.
//
// This is stored with the remote path given
// This is stored with the remote path given.
//
// It returns the destination Object and a possible error
// It returns the destination Object and a possible error.
//
// Will only be called if src.Fs().Name() == f.Name()
//
@@ -820,7 +824,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
}
_, err = f.rpc(ctx, "doCopyFile", p, &info, nil)
if err != nil {
return nil, errors.Wrap(err, "failed to copy file")
return nil, fmt.Errorf("failed to copy file: %w", err)
}
err = dstObj.setMetaData(&info.Item)
if err != nil {
@@ -838,8 +842,8 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
return f.purgeCheck(ctx, dir, false)
}
// Wait for the the background task to complete if necessary
func (f *Fs) waitForBackgroundTask(ctx context.Context, taskID string) (err error) {
// Wait for the background task to complete if necessary
func (f *Fs) waitForBackgroundTask(ctx context.Context, taskID api.String) (err error) {
if taskID == "" || taskID == "0" {
// No task to wait for
return nil
@@ -852,7 +856,7 @@ func (f *Fs) waitForBackgroundTask(ctx context.Context, taskID string) (err erro
"taskid": taskID,
}, &info, nil)
if err != nil {
return errors.Wrapf(err, "failed to wait for task %s to complete", taskID)
return fmt.Errorf("failed to wait for task %s to complete: %w", taskID, err)
}
if len(info.Tasks) == 0 {
// task has finished
@@ -885,7 +889,7 @@ func (f *Fs) renameLeaf(ctx context.Context, isDir bool, id string, newLeaf stri
"fi_name": newLeaf,
}, &info, nil)
if err != nil {
return nil, errors.Wrap(err, "failed to rename leaf")
return nil, fmt.Errorf("failed to rename leaf: %w", err)
}
err = f.waitForBackgroundTask(ctx, info.Status.TaskID)
if err != nil {
@@ -929,7 +933,7 @@ func (f *Fs) move(ctx context.Context, isDir bool, id, oldLeaf, newLeaf, oldDire
"dir_id": newDirectoryID,
}, &info, nil)
if err != nil {
return nil, errors.Wrap(err, "failed to move file to new directory")
return nil, fmt.Errorf("failed to move file to new directory: %w", err)
}
item = &info.Item
err = f.waitForBackgroundTask(ctx, info.Status.TaskID)
@@ -951,9 +955,9 @@ func (f *Fs) move(ctx context.Context, isDir bool, id, oldLeaf, newLeaf, oldDire
// Move src to this remote using server side move operations.
//
// This is stored with the remote path given
// This is stored with the remote path given.
//
// It returns the destination Object and a possible error
// It returns the destination Object and a possible error.
//
// Will only be called if src.Fs().Name() == f.Name()
//
@@ -1032,7 +1036,7 @@ func (f *Fs) CleanUp(ctx context.Context) (err error) {
var info api.EmptyResponse
_, err = f.rpc(ctx, "emptyTrashInBackground", params{}, &info, nil)
if err != nil {
return errors.Wrap(err, "failed to empty trash")
return fmt.Errorf("failed to empty trash: %w", err)
}
return nil
}
@@ -1089,7 +1093,7 @@ func (o *Object) Size() int64 {
// setMetaData sets the metadata from info
func (o *Object) setMetaData(info *api.Item) (err error) {
if info.Type != api.ItemTypeFile {
return errors.Wrapf(fs.ErrorNotAFile, "%q is %q", o.remote, info.Type)
return fs.ErrorIsDir
}
o.hasMetaData = true
o.size = info.Size
@@ -1130,7 +1134,6 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
// ModTime returns the modification time of the object
//
//
// It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers
func (o *Object) ModTime(ctx context.Context) time.Time {
@@ -1159,7 +1162,7 @@ func (o *Object) modifyFile(ctx context.Context, keyValues [][2]string) error {
"data": data.String(),
}, &info, nil)
if err != nil {
return errors.Wrap(err, "failed to update metadata")
return fmt.Errorf("failed to update metadata: %w", err)
}
return o.setMetaData(&info.Item)
}
@@ -1182,7 +1185,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
return nil, errors.New("can't download - no id")
}
if o.contentType == emptyMimeType {
return ioutil.NopCloser(bytes.NewReader([]byte{})), nil
return io.NopCloser(bytes.NewReader([]byte{})), nil
}
fs.FixRangeOption(options, o.size)
resp, err := o.fs.rpc(ctx, "getFile", params{
@@ -1196,7 +1199,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
// Update the object with the contents of the io.Reader, modTime and size
//
// If existing is set then it updates the object rather than creating a new one
// If existing is set then it updates the object rather than creating a new one.
//
// The new object may have been created if an error is returned
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
@@ -1242,7 +1245,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
}
_, err = o.fs.rpc(ctx, "doInitUpload", p, &upload, nil)
if err != nil {
return errors.Wrap(err, "failed to initialize upload")
return fmt.Errorf("failed to initialize upload: %w", err)
}
// Cancel the upload if aborted or it fails
@@ -1278,18 +1281,20 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
var contentLength = size
opts.ContentLength = &contentLength // NB CallJSON scribbles on this which is naughty
}
try := 0
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
try++
resp, err := o.fs.srv.CallJSON(ctx, &opts, nil, &uploader)
return o.fs.shouldRetry(ctx, resp, err, nil)
return o.fs.shouldRetry(ctx, resp, err, nil, try)
})
if err != nil {
return errors.Wrap(err, "failed to upload")
return fmt.Errorf("failed to upload: %w", err)
}
if uploader.Success != "y" {
return errors.Errorf("upload failed")
return fmt.Errorf("upload failed")
}
if size > 0 && uploader.FileSize != size {
return errors.Errorf("upload failed: size mismatch: want %d got %d", size, uploader.FileSize)
return fmt.Errorf("upload failed: size mismatch: want %d got %d", size, uploader.FileSize)
}
// Now finalize the file
@@ -1301,7 +1306,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
}
_, err = o.fs.rpc(ctx, "doCompleteUpload", p, &finalize, nil)
if err != nil {
return errors.Wrap(err, "failed to finalize upload")
return fmt.Errorf("failed to finalize upload: %w", err)
}
finalized = true

View File

@@ -4,6 +4,8 @@ package ftp
import (
"context"
"crypto/tls"
"errors"
"fmt"
"io"
"net"
"net/textproto"
@@ -14,7 +16,6 @@ import (
"time"
"github.com/jlaffaye/ftp"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/config"
@@ -44,66 +45,94 @@ const (
func init() {
fs.Register(&fs.RegInfo{
Name: "ftp",
Description: "FTP Connection",
Description: "FTP",
NewFs: NewFs,
Options: []fs.Option{{
Name: "host",
Help: "FTP host to connect to",
Help: "FTP host to connect to.\n\nE.g. \"ftp.example.com\".",
Required: true,
Examples: []fs.OptionExample{{
Value: "ftp.example.com",
Help: "Connect to ftp.example.com",
}},
}, {
Name: "user",
Help: "FTP username, leave blank for current username, " + currentUser,
Name: "user",
Help: "FTP username.",
Default: currentUser,
}, {
Name: "port",
Help: "FTP port, leave blank to use default (21)",
Name: "port",
Help: "FTP port number.",
Default: 21,
}, {
Name: "pass",
Help: "FTP password",
Help: "FTP password.",
IsPassword: true,
Required: true,
}, {
Name: "tls",
Help: `Use Implicit FTPS (FTP over TLS)
Help: `Use Implicit FTPS (FTP over TLS).
When using implicit FTP over TLS the client connects using TLS
right from the start which breaks compatibility with
non-TLS-aware servers. This is usually served over port 990 rather
than port 21. Cannot be used in combination with explicit FTP.`,
than port 21. Cannot be used in combination with explicit FTPS.`,
Default: false,
}, {
Name: "explicit_tls",
Help: `Use Explicit FTPS (FTP over TLS)
Help: `Use Explicit FTPS (FTP over TLS).
When using explicit FTP over TLS the client explicitly requests
security from the server in order to upgrade a plain text connection
to an encrypted one. Cannot be used in combination with implicit FTP.`,
to an encrypted one. Cannot be used in combination with implicit FTPS.`,
Default: false,
}, {
Name: "concurrency",
Help: "Maximum number of FTP simultaneous connections, 0 for unlimited",
Name: "concurrency",
Help: strings.Replace(`Maximum number of FTP simultaneous connections, 0 for unlimited.
Note that setting this is very likely to cause deadlocks so it should
be used with care.
If you are doing a sync or copy then make sure concurrency is one more
than the sum of |--transfers| and |--checkers|.
If you use |--check-first| then it just needs to be one more than the
maximum of |--checkers| and |--transfers|.
So for |concurrency 3| you'd use |--checkers 2 --transfers 2
--check-first| or |--checkers 1 --transfers 1|.
`, "|", "`", -1),
Default: 0,
Advanced: true,
}, {
Name: "no_check_certificate",
Help: "Do not verify the TLS certificate of the server",
Help: "Do not verify the TLS certificate of the server.",
Default: false,
Advanced: true,
}, {
Name: "disable_epsv",
Help: "Disable using EPSV even if server advertises support",
Help: "Disable using EPSV even if server advertises support.",
Default: false,
Advanced: true,
}, {
Name: "disable_mlsd",
Help: "Disable using MLSD even if server advertises support",
Help: "Disable using MLSD even if server advertises support.",
Default: false,
Advanced: true,
}, {
Name: "disable_utf8",
Help: "Disable using UTF-8 even if server advertises support.",
Default: false,
Advanced: true,
}, {
Name: "writing_mdtm",
Help: "Use MDTM to set modification time (VsFtpd quirk)",
Default: false,
Advanced: true,
}, {
Name: "force_list_hidden",
Help: "Use LIST -a to force listing of hidden files and folders. This will disable the use of MLSD.",
Default: false,
Advanced: true,
}, {
Name: "idle_timeout",
Default: fs.Duration(60 * time.Second),
Help: `Max time before closing idle connections
Help: `Max time before closing idle connections.
If no connections have been returned to the connection pool in the time
given, rclone will empty the connection pool.
@@ -116,17 +145,51 @@ Set to 0 to keep connections indefinitely.
Help: "Maximum time to wait for a response to close.",
Default: fs.Duration(60 * time.Second),
Advanced: true,
}, {
Name: "tls_cache_size",
Help: `Size of TLS session cache for all control and data connections.
TLS cache allows to resume TLS sessions and reuse PSK between connections.
Increase if default size is not enough resulting in TLS resumption errors.
Enabled by default. Use 0 to disable.`,
Default: 32,
Advanced: true,
}, {
Name: "disable_tls13",
Help: "Disable TLS 1.3 (workaround for FTP servers with buggy TLS)",
Default: false,
Advanced: true,
}, {
Name: "shut_timeout",
Help: "Maximum time to wait for data connection closing status.",
Default: fs.Duration(60 * time.Second),
Advanced: true,
}, {
Name: "ask_password",
Default: false,
Help: `Allow asking for FTP password when needed.
If this is set and no password is supplied then rclone will ask for a password
`,
Advanced: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
Advanced: true,
// The FTP protocol can't handle trailing spaces (for instance
// pureftpd turns them into _)
//
// proftpd can't handle '*' in file names
// pureftpd can't handle '[', ']' or '*'
// The FTP protocol can't handle trailing spaces
// (for instance, pureftpd turns them into '_')
Default: (encoder.Display |
encoder.EncodeRightSpace),
Examples: []fs.OptionExample{{
Value: "Asterisk,Ctl,Dot,Slash",
Help: "ProFTPd can't handle '*' in file names",
}, {
Value: "BackSlash,Ctl,Del,Dot,RightSpace,Slash,SquareBracket",
Help: "PureFTPd can't handle '[]' or '*' in file names",
}, {
Value: "Ctl,LeftPeriod,Slash",
Help: "VsFTPd can't handle file names starting with dot",
}},
}},
})
}
@@ -139,12 +202,19 @@ type Options struct {
Port string `config:"port"`
TLS bool `config:"tls"`
ExplicitTLS bool `config:"explicit_tls"`
TLSCacheSize int `config:"tls_cache_size"`
DisableTLS13 bool `config:"disable_tls13"`
Concurrency int `config:"concurrency"`
SkipVerifyTLSCert bool `config:"no_check_certificate"`
DisableEPSV bool `config:"disable_epsv"`
DisableMLSD bool `config:"disable_mlsd"`
DisableUTF8 bool `config:"disable_utf8"`
WritingMDTM bool `config:"writing_mdtm"`
ForceListHidden bool `config:"force_list_hidden"`
IdleTimeout fs.Duration `config:"idle_timeout"`
CloseTimeout fs.Duration `config:"close_timeout"`
ShutTimeout fs.Duration `config:"shut_timeout"`
AskPassword bool `config:"ask_password"`
Enc encoder.MultiEncoder `config:"encoding"`
}
@@ -165,6 +235,9 @@ type Fs struct {
tokens *pacer.TokenDispenser
tlsConf *tls.Config
pacer *fs.Pacer // pacer for FTP connections
fGetTime bool // true if the ftp library accepts GetTime
fSetTime bool // true if the ftp library accepts SetTime
fLstTime bool // true if the List call returns precise time
}
// Object describes an FTP file
@@ -179,6 +252,7 @@ type FileInfo struct {
Name string
Size uint64
ModTime time.Time
precise bool // true if the time is precise
IsDir bool
}
@@ -241,18 +315,26 @@ func (dl *debugLog) Write(p []byte) (n int, err error) {
return len(p), nil
}
// returns true if this FTP error should be retried
func isRetriableFtpError(err error) bool {
switch errX := err.(type) {
case *textproto.Error:
switch errX.Code {
case ftp.StatusNotAvailable, ftp.StatusTransfertAborted:
return true
}
}
return false
}
// shouldRetry returns a boolean as to whether this err deserve to be
// retried. It returns the err as a convenience
func shouldRetry(ctx context.Context, err error) (bool, error) {
if fserrors.ContextError(ctx, &err) {
return false, err
}
switch errX := err.(type) {
case *textproto.Error:
switch errX.Code {
case ftp.StatusNotAvailable:
return true, err
}
if isRetriableFtpError(err) {
return true, err
}
return fserrors.ShouldRetry(err), err
}
@@ -262,14 +344,44 @@ func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
fs.Debugf(f, "Connecting to FTP server")
// Make ftp library dial with fshttp dialer optionally using TLS
initialConnection := true
dial := func(network, address string) (conn net.Conn, err error) {
fs.Debugf(f, "dial(%q,%q)", network, address)
defer func() {
fs.Debugf(f, "> dial: conn=%T, err=%v", conn, err)
}()
conn, err = fshttp.NewDialer(ctx).Dial(network, address)
if f.tlsConf != nil && err == nil {
conn = tls.Client(conn, f.tlsConf)
if err != nil {
return nil, err
}
return
// Connect using cleartext only for non TLS
if f.tlsConf == nil {
return conn, nil
}
// Initial connection only needs to be cleartext for explicit TLS
if f.opt.ExplicitTLS && initialConnection {
initialConnection = false
return conn, nil
}
// Upgrade connection to TLS
tlsConn := tls.Client(conn, f.tlsConf)
// Do the initial handshake - tls.Client doesn't do it for us
// If we do this then connections to proftpd/pureftpd lock up
// See: https://github.com/rclone/rclone/issues/6426
// See: https://github.com/jlaffaye/ftp/issues/282
if false {
err = tlsConn.HandshakeContext(ctx)
if err != nil {
_ = conn.Close()
return nil, err
}
}
return tlsConn, nil
}
ftpConfig := []ftp.DialOption{
ftp.DialWithContext(ctx),
ftp.DialWithDialFunc(dial),
}
ftpConfig := []ftp.DialOption{ftp.DialWithDialFunc(dial)}
if f.opt.TLS {
// Our dialer takes care of TLS but ftp library also needs tlsConf
@@ -277,12 +389,6 @@ func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
ftpConfig = append(ftpConfig, ftp.DialWithTLS(f.tlsConf))
} else if f.opt.ExplicitTLS {
ftpConfig = append(ftpConfig, ftp.DialWithExplicitTLS(f.tlsConf))
// Initial connection needs to be cleartext for explicit TLS
conn, err := fshttp.NewDialer(ctx).Dial("tcp", f.dialAddr)
if err != nil {
return nil, err
}
ftpConfig = append(ftpConfig, ftp.DialWithNetConn(conn))
}
if f.opt.DisableEPSV {
ftpConfig = append(ftpConfig, ftp.DialWithDisabledEPSV(true))
@@ -290,6 +396,18 @@ func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
if f.opt.DisableMLSD {
ftpConfig = append(ftpConfig, ftp.DialWithDisabledMLSD(true))
}
if f.opt.DisableUTF8 {
ftpConfig = append(ftpConfig, ftp.DialWithDisabledUTF8(true))
}
if f.opt.ShutTimeout != 0 && f.opt.ShutTimeout != fs.DurationOff {
ftpConfig = append(ftpConfig, ftp.DialWithShutTimeout(time.Duration(f.opt.ShutTimeout)))
}
if f.opt.WritingMDTM {
ftpConfig = append(ftpConfig, ftp.DialWithWritingMDTM(true))
}
if f.opt.ForceListHidden {
ftpConfig = append(ftpConfig, ftp.DialWithForceListHidden(true))
}
if f.ci.Dump&(fs.DumpHeaders|fs.DumpBodies|fs.DumpRequests|fs.DumpResponses) != 0 {
ftpConfig = append(ftpConfig, ftp.DialWithDebugOutput(&debugLog{auth: f.ci.Dump&fs.DumpAuth != 0}))
}
@@ -306,7 +424,7 @@ func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
return false, nil
})
if err != nil {
err = errors.Wrapf(err, "failed to make FTP connection to %q", f.dialAddr)
err = fmt.Errorf("failed to make FTP connection to %q: %w", f.dialAddr, err)
}
return c, err
}
@@ -353,8 +471,8 @@ func (f *Fs) putFtpConnection(pc **ftp.ServerConn, err error) {
*pc = nil
if err != nil {
// If not a regular FTP error code then check the connection
_, isRegularError := errors.Cause(err).(*textproto.Error)
if !isRegularError {
var tpErr *textproto.Error
if !errors.As(err, &tpErr) {
nopErr := c.NoOp()
if nopErr != nil {
fs.Debugf(f, "Connection failed, closing: %v", nopErr)
@@ -400,9 +518,14 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
if err != nil {
return nil, err
}
pass, err := obscure.Reveal(opt.Pass)
if err != nil {
return nil, errors.Wrap(err, "NewFS decrypt password")
pass := ""
if opt.AskPassword && opt.Pass == "" {
pass = config.GetPassword("FTP server password")
} else {
pass, err = obscure.Reveal(opt.Pass)
if err != nil {
return nil, fmt.Errorf("NewFS decrypt password: %w", err)
}
}
user := opt.User
if user == "" {
@@ -419,7 +542,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
protocol = "ftps://"
}
if opt.TLS && opt.ExplicitTLS {
return nil, errors.New("Implicit TLS and explicit TLS are mutually incompatible. Please revise your config")
return nil, errors.New("implicit TLS and explicit TLS are mutually incompatible, please revise your config")
}
var tlsConfig *tls.Config
if opt.TLS || opt.ExplicitTLS {
@@ -427,6 +550,12 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
ServerName: opt.Host,
InsecureSkipVerify: opt.SkipVerifyTLSCert,
}
if opt.TLSCacheSize > 0 {
tlsConfig.ClientSessionCache = tls.NewLRUClientSessionCache(opt.TLSCacheSize)
}
if opt.DisableTLS13 {
tlsConfig.MaxVersion = tls.VersionTLS12
}
}
u := protocol + path.Join(dialAddr+"/", root)
ci := fs.GetConfig(ctx)
@@ -453,7 +582,13 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
// Make a connection and pool it to return errors early
c, err := f.getFtpConnection(ctx)
if err != nil {
return nil, errors.Wrap(err, "NewFs")
return nil, fmt.Errorf("NewFs: %w", err)
}
f.fGetTime = c.IsGetTimeSupported()
f.fSetTime = c.IsSetTimeSupported()
f.fLstTime = c.IsTimePreciseInList()
if !f.fLstTime && f.fGetTime {
f.features.SlowModTime = true
}
f.putFtpConnection(&c, nil)
if root != "" {
@@ -465,7 +600,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
}
_, err := f.NewObject(ctx, remote)
if err != nil {
if err == fs.ErrorObjectNotFound || errors.Cause(err) == fs.ErrorNotAFile {
if err == fs.ErrorObjectNotFound || errors.Is(err, fs.ErrorNotAFile) {
// File doesn't exist so return old f
f.root = root
return f, nil
@@ -530,8 +665,7 @@ func (f *Fs) dirFromStandardPath(dir string) string {
// findItem finds a directory entry for the name in its parent directory
func (f *Fs) findItem(ctx context.Context, remote string) (entry *ftp.Entry, err error) {
// defer fs.Trace(remote, "")("o=%v, err=%v", &o, &err)
fullPath := path.Join(f.root, remote)
if fullPath == "" || fullPath == "." || fullPath == "/" {
if remote == "" || remote == "." || remote == "/" {
// if root, assume exists and synthesize an entry
return &ftp.Entry{
Name: "",
@@ -539,13 +673,32 @@ func (f *Fs) findItem(ctx context.Context, remote string) (entry *ftp.Entry, err
Time: time.Now(),
}, nil
}
dir := path.Dir(fullPath)
base := path.Base(fullPath)
c, err := f.getFtpConnection(ctx)
if err != nil {
return nil, errors.Wrap(err, "findItem")
return nil, fmt.Errorf("findItem: %w", err)
}
// returns TRUE if MLST is supported which is required to call GetEntry
if c.IsTimePreciseInList() {
entry, err := c.GetEntry(f.opt.Enc.FromStandardPath(remote))
f.putFtpConnection(&c, err)
if err != nil {
err = translateErrorFile(err)
if err == fs.ErrorObjectNotFound {
return nil, nil
}
return nil, err
}
if entry != nil {
f.entryToStandard(entry)
}
return entry, nil
}
dir := path.Dir(remote)
base := path.Base(remote)
files, err := c.List(f.dirFromStandardPath(dir))
f.putFtpConnection(&c, err)
if err != nil {
@@ -564,7 +717,7 @@ func (f *Fs) findItem(ctx context.Context, remote string) (entry *ftp.Entry, err
// it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err error) {
// defer fs.Trace(remote, "")("o=%v, err=%v", &o, &err)
entry, err := f.findItem(ctx, remote)
entry, err := f.findItem(ctx, path.Join(f.root, remote))
if err != nil {
return nil, err
}
@@ -573,13 +726,12 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err err
fs: f,
remote: remote,
}
info := &FileInfo{
o.info = &FileInfo{
Name: remote,
Size: entry.Size,
ModTime: entry.Time,
precise: f.fLstTime,
}
o.info = info
return o, nil
}
return nil, fs.ErrorObjectNotFound
@@ -587,9 +739,9 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err err
// dirExists checks the directory pointed to by remote exists or not
func (f *Fs) dirExists(ctx context.Context, remote string) (exists bool, err error) {
entry, err := f.findItem(ctx, remote)
entry, err := f.findItem(ctx, path.Join(f.root, remote))
if err != nil {
return false, errors.Wrap(err, "dirExists")
return false, fmt.Errorf("dirExists: %w", err)
}
if entry != nil && entry.Type == ftp.EntryTypeFolder {
return true, nil
@@ -610,7 +762,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// defer log.Trace(dir, "dir=%q", dir)("entries=%v, err=%v", &entries, &err)
c, err := f.getFtpConnection(ctx)
if err != nil {
return nil, errors.Wrap(err, "list")
return nil, fmt.Errorf("list: %w", err)
}
var listErr error
@@ -639,7 +791,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
case <-timer.C:
// if timer fired assume no error but connection dead
fs.Errorf(f, "Timeout when waiting for List")
return nil, errors.New("Timeout when waiting for List")
return nil, errors.New("timeout when waiting for List")
}
// Annoyingly FTP returns success for a directory which
@@ -648,7 +800,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
if len(files) == 0 {
exists, err := f.dirExists(ctx, dir)
if err != nil {
return nil, errors.Wrap(err, "list")
return nil, fmt.Errorf("list: %w", err)
}
if !exists {
return nil, fs.ErrorDirNotFound
@@ -674,6 +826,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
Name: newremote,
Size: object.Size,
ModTime: object.Time,
precise: f.fLstTime,
}
o.info = info
entries = append(entries, o)
@@ -687,8 +840,19 @@ func (f *Fs) Hashes() hash.Set {
return 0
}
// Precision shows Modified Time not supported
// Precision shows whether modified time is supported or not depending on the
// FTP server capabilities, namely whether FTP server:
// - accepts the MDTM command to get file time (fGetTime)
// or supports MLSD returning precise file time in the list (fLstTime)
// - accepts the MFMT command to set file time (fSetTime)
// or non-standard form of the MDTM command (fSetTime, too)
// used by VsFtpd for the same purpose (WritingMDTM)
//
// See "mdtm_write" in https://security.appspot.com/vsftpd/vsftpd_conf.html
func (f *Fs) Precision() time.Duration {
if (f.fGetTime || f.fLstTime) && f.fSetTime {
return time.Second
}
return fs.ModTimeNotSupported
}
@@ -701,7 +865,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
// fs.Debugf(f, "Trying to put file %s", src.Remote())
err := f.mkParentDir(ctx, src.Remote())
if err != nil {
return nil, errors.Wrap(err, "Put mkParentDir failed")
return nil, fmt.Errorf("Put mkParentDir failed: %w", err)
}
o := &Object{
fs: f,
@@ -719,31 +883,18 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
// getInfo reads the FileInfo for a path
func (f *Fs) getInfo(ctx context.Context, remote string) (fi *FileInfo, err error) {
// defer fs.Trace(remote, "")("fi=%v, err=%v", &fi, &err)
dir := path.Dir(remote)
base := path.Base(remote)
c, err := f.getFtpConnection(ctx)
file, err := f.findItem(ctx, remote)
if err != nil {
return nil, errors.Wrap(err, "getInfo")
}
files, err := c.List(f.dirFromStandardPath(dir))
f.putFtpConnection(&c, err)
if err != nil {
return nil, translateErrorFile(err)
}
for i := range files {
file := files[i]
f.entryToStandard(file)
if file.Name == base {
info := &FileInfo{
Name: remote,
Size: file.Size,
ModTime: file.Time,
IsDir: file.Type == ftp.EntryTypeFolder,
}
return info, nil
return nil, err
} else if file != nil {
info := &FileInfo{
Name: remote,
Size: file.Size,
ModTime: file.Time,
precise: f.fLstTime,
IsDir: file.Type == ftp.EntryTypeFolder,
}
return info, nil
}
return nil, fs.ErrorObjectNotFound
}
@@ -761,7 +912,7 @@ func (f *Fs) mkdir(ctx context.Context, abspath string) error {
}
return fs.ErrorIsFile
} else if err != fs.ErrorObjectNotFound {
return errors.Wrapf(err, "mkdir %q failed", abspath)
return fmt.Errorf("mkdir %q failed: %w", abspath, err)
}
parent := path.Dir(abspath)
err = f.mkdir(ctx, parent)
@@ -770,7 +921,7 @@ func (f *Fs) mkdir(ctx context.Context, abspath string) error {
}
c, connErr := f.getFtpConnection(ctx)
if connErr != nil {
return errors.Wrap(connErr, "mkdir")
return fmt.Errorf("mkdir: %w", connErr)
}
err = c.MakeDir(f.dirFromStandardPath(abspath))
f.putFtpConnection(&c, err)
@@ -806,7 +957,7 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
c, err := f.getFtpConnection(ctx)
if err != nil {
return errors.Wrap(translateErrorFile(err), "Rmdir")
return fmt.Errorf("Rmdir: %w", translateErrorFile(err))
}
err = c.RemoveDir(f.dirFromStandardPath(path.Join(f.root, dir)))
f.putFtpConnection(&c, err)
@@ -822,11 +973,11 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
}
err := f.mkParentDir(ctx, remote)
if err != nil {
return nil, errors.Wrap(err, "Move mkParentDir failed")
return nil, fmt.Errorf("Move mkParentDir failed: %w", err)
}
c, err := f.getFtpConnection(ctx)
if err != nil {
return nil, errors.Wrap(err, "Move")
return nil, fmt.Errorf("Move: %w", err)
}
err = c.Rename(
f.opt.Enc.FromStandardPath(path.Join(srcObj.fs.root, srcObj.remote)),
@@ -834,11 +985,11 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
)
f.putFtpConnection(&c, err)
if err != nil {
return nil, errors.Wrap(err, "Move Rename failed")
return nil, fmt.Errorf("Move Rename failed: %w", err)
}
dstObj, err := f.NewObject(ctx, remote)
if err != nil {
return nil, errors.Wrap(err, "Move NewObject failed")
return nil, fmt.Errorf("Move NewObject failed: %w", err)
}
return dstObj, nil
}
@@ -868,19 +1019,19 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
}
return fs.ErrorIsFile
} else if err != fs.ErrorObjectNotFound {
return errors.Wrapf(err, "DirMove getInfo failed")
return fmt.Errorf("DirMove getInfo failed: %w", err)
}
// Make sure the parent directory exists
err = f.mkdir(ctx, path.Dir(dstPath))
if err != nil {
return errors.Wrap(err, "DirMove mkParentDir dst failed")
return fmt.Errorf("DirMove mkParentDir dst failed: %w", err)
}
// Do the move
c, err := f.getFtpConnection(ctx)
if err != nil {
return errors.Wrap(err, "DirMove")
return fmt.Errorf("DirMove: %w", err)
}
err = c.Rename(
f.dirFromStandardPath(srcPath),
@@ -888,7 +1039,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
)
f.putFtpConnection(&c, err)
if err != nil {
return errors.Wrapf(err, "DirMove Rename(%q,%q) failed", srcPath, dstPath)
return fmt.Errorf("DirMove Rename(%q,%q) failed: %w", srcPath, dstPath, err)
}
return nil
}
@@ -925,12 +1076,41 @@ func (o *Object) Size() int64 {
// ModTime returns the modification time of the object
func (o *Object) ModTime(ctx context.Context) time.Time {
if !o.info.precise && o.fs.fGetTime {
c, err := o.fs.getFtpConnection(ctx)
if err == nil {
path := path.Join(o.fs.root, o.remote)
path = o.fs.opt.Enc.FromStandardPath(path)
modTime, err := c.GetTime(path)
if err == nil && o.info != nil {
o.info.ModTime = modTime
o.info.precise = true
}
o.fs.putFtpConnection(&c, err)
}
}
return o.info.ModTime
}
// SetModTime sets the modification time of the object
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
return nil
if !o.fs.fSetTime {
fs.Errorf(o.fs, "SetModTime is not supported")
return nil
}
c, err := o.fs.getFtpConnection(ctx)
if err != nil {
return err
}
path := path.Join(o.fs.root, o.remote)
path = o.fs.opt.Enc.FromStandardPath(path)
err = c.SetTime(path, modTime.In(time.UTC))
if err == nil && o.info != nil {
o.info.ModTime = modTime
o.info.precise = true
}
o.fs.putFtpConnection(&c, err)
return err
}
// Storable returns a boolean as to whether this object is storable
@@ -963,7 +1143,11 @@ func (f *ftpReadCloser) Close() error {
errchan <- f.rc.Close()
}()
// Wait for Close for up to 60 seconds by default
timer := time.NewTimer(time.Duration(f.f.opt.CloseTimeout))
closeTimeout := f.f.opt.CloseTimeout
if closeTimeout == 0 {
closeTimeout = fs.DurationOff
}
timer := time.NewTimer(time.Duration(closeTimeout))
select {
case err = <-errchan:
timer.Stop()
@@ -1010,22 +1194,33 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.Read
}
}
}
c, err := o.fs.getFtpConnection(ctx)
var (
fd *ftp.Response
c *ftp.ServerConn
)
err = o.fs.pacer.Call(func() (bool, error) {
c, err = o.fs.getFtpConnection(ctx)
if err != nil {
return false, err // getFtpConnection has retries already
}
fd, err = c.RetrFrom(o.fs.opt.Enc.FromStandardPath(path), uint64(offset))
if err != nil {
o.fs.putFtpConnection(&c, err)
}
return shouldRetry(ctx, err)
})
if err != nil {
return nil, errors.Wrap(err, "open")
}
fd, err := c.RetrFrom(o.fs.opt.Enc.FromStandardPath(path), uint64(offset))
if err != nil {
o.fs.putFtpConnection(&c, err)
return nil, errors.Wrap(err, "open")
return nil, fmt.Errorf("open: %w", err)
}
rc = &ftpReadCloser{rc: readers.NewLimitedReadCloser(fd, limit), c: c, f: o.fs}
return rc, nil
}
// Update the already existing object
//
// Copy the reader into the object updating modTime and size
// Copy the reader into the object updating modTime and size.
//
// The new object may have been created if an error is returned
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
@@ -1047,19 +1242,33 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
}
c, err := o.fs.getFtpConnection(ctx)
if err != nil {
return errors.Wrap(err, "Update")
return fmt.Errorf("Update: %w", err)
}
err = c.Stor(o.fs.opt.Enc.FromStandardPath(path), in)
// Ignore error 250 here - send by some servers
if err != nil {
switch errX := err.(type) {
case *textproto.Error:
switch errX.Code {
case ftp.StatusRequestedFileActionOK:
err = nil
}
}
}
if err != nil {
_ = c.Quit() // toss this connection to avoid sync errors
remove()
// recycle connection in advance to let remove() find free token
o.fs.putFtpConnection(nil, err)
return errors.Wrap(err, "update stor")
remove()
return fmt.Errorf("update stor: %w", err)
}
o.fs.putFtpConnection(&c, nil)
if err = o.SetModTime(ctx, src.ModTime(ctx)); err != nil {
return fmt.Errorf("SetModTime: %w", err)
}
o.info, err = o.fs.getInfo(ctx, path)
if err != nil {
return errors.Wrap(err, "update getinfo")
return fmt.Errorf("update getinfo: %w", err)
}
return nil
}
@@ -1078,7 +1287,7 @@ func (o *Object) Remove(ctx context.Context) (err error) {
} else {
c, err := o.fs.getFtpConnection(ctx)
if err != nil {
return errors.Wrap(err, "Remove")
return fmt.Errorf("Remove: %w", err)
}
err = c.Delete(o.fs.opt.Enc.FromStandardPath(path))
o.fs.putFtpConnection(&c, err)

View File

@@ -0,0 +1,115 @@
package ftp
import (
"context"
"fmt"
"strings"
"testing"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
"github.com/rclone/rclone/lib/readers"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
type settings map[string]interface{}
func deriveFs(ctx context.Context, t *testing.T, f fs.Fs, opts settings) fs.Fs {
fsName := strings.Split(f.Name(), "{")[0] // strip off hash
configMap := configmap.Simple{}
for key, val := range opts {
configMap[key] = fmt.Sprintf("%v", val)
}
remote := fmt.Sprintf("%s,%s:%s", fsName, configMap.String(), f.Root())
fixFs, err := fs.NewFs(ctx, remote)
require.NoError(t, err)
return fixFs
}
// test that big file uploads do not cause network i/o timeout
func (f *Fs) testUploadTimeout(t *testing.T) {
const (
fileSize = 100000000 // 100 MiB
idleTimeout = 1 * time.Second // small because test server is local
maxTime = 10 * time.Second // prevent test hangup
)
if testing.Short() {
t.Skip("not running with -short")
}
ctx := context.Background()
ci := fs.GetConfig(ctx)
saveLowLevelRetries := ci.LowLevelRetries
saveTimeout := ci.Timeout
defer func() {
ci.LowLevelRetries = saveLowLevelRetries
ci.Timeout = saveTimeout
}()
ci.LowLevelRetries = 1
ci.Timeout = idleTimeout
upload := func(concurrency int, shutTimeout time.Duration) (obj fs.Object, err error) {
fixFs := deriveFs(ctx, t, f, settings{
"concurrency": concurrency,
"shut_timeout": shutTimeout,
})
// Make test object
fileTime := fstest.Time("2020-03-08T09:30:00.000000000Z")
meta := object.NewStaticObjectInfo("upload-timeout.test", fileTime, int64(fileSize), true, nil, nil)
data := readers.NewPatternReader(int64(fileSize))
// Run upload and ensure maximum time
done := make(chan bool)
deadline := time.After(maxTime)
go func() {
obj, err = fixFs.Put(ctx, data, meta)
done <- true
}()
select {
case <-done:
case <-deadline:
t.Fatalf("Upload got stuck for %v !", maxTime)
}
return obj, err
}
// non-zero shut_timeout should fix i/o errors
obj, err := upload(f.opt.Concurrency, time.Second)
assert.NoError(t, err)
assert.NotNil(t, obj)
if obj != nil {
_ = obj.Remove(ctx)
}
}
// rclone must support precise time with ProFtpd and PureFtpd out of the box.
// The VsFtpd server does not support the MFMT command to set file time like
// other servers but by default supports the MDTM command in the non-standard
// two-argument form for the same purpose.
// See "mdtm_write" in https://security.appspot.com/vsftpd/vsftpd_conf.html
func (f *Fs) testTimePrecision(t *testing.T) {
name := f.Name()
if pos := strings.Index(name, "{"); pos != -1 {
name = name[:pos]
}
switch name {
case "TestFTPProftpd", "TestFTPPureftpd", "TestFTPVsftpd":
assert.LessOrEqual(t, f.Precision(), time.Second)
}
}
// InternalTest dispatches all internal tests
func (f *Fs) InternalTest(t *testing.T) {
t.Run("UploadTimeout", f.testUploadTimeout)
t.Run("TimePrecision", f.testTimePrecision)
}
var _ fstests.InternalTester = (*Fs)(nil)

View File

@@ -9,25 +9,27 @@ import (
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
// TestIntegration runs integration tests against rclone FTP server
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestFTPProftpd:",
NilObject: (*ftp.Object)(nil),
})
}
func TestIntegration2(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("skipping as -remote is set")
}
fstests.Run(t, &fstests.Opt{
RemoteName: "TestFTPRclone:",
NilObject: (*ftp.Object)(nil),
})
}
func TestIntegration3(t *testing.T) {
// TestIntegrationProftpd runs integration tests against proFTPd
func TestIntegrationProftpd(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("skipping as -remote is set")
}
fstests.Run(t, &fstests.Opt{
RemoteName: "TestFTPProftpd:",
NilObject: (*ftp.Object)(nil),
})
}
// TestIntegrationPureftpd runs integration tests against pureFTPd
func TestIntegrationPureftpd(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("skipping as -remote is set")
}
@@ -37,12 +39,13 @@ func TestIntegration3(t *testing.T) {
})
}
// func TestIntegration4(t *testing.T) {
// if *fstest.RemoteName != "" {
// t.Skip("skipping as -remote is set")
// }
// fstests.Run(t, &fstests.Opt{
// RemoteName: "TestFTPVsftpd:",
// NilObject: (*ftp.Object)(nil),
// })
// }
// TestIntegrationVsftpd runs integration tests against vsFTPd
func TestIntegrationVsftpd(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("skipping as -remote is set")
}
fstests.Run(t, &fstests.Opt{
RemoteName: "TestFTPVsftpd:",
NilObject: (*ftp.Object)(nil),
})
}

View File

@@ -16,16 +16,17 @@ import (
"context"
"encoding/base64"
"encoding/hex"
"errors"
"fmt"
"io"
"io/ioutil"
"log"
"net/http"
"os"
"path"
"strconv"
"strings"
"sync"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
@@ -43,6 +44,7 @@ import (
"golang.org/x/oauth2"
"golang.org/x/oauth2/google"
"google.golang.org/api/googleapi"
option "google.golang.org/api/option"
// NOTE: This API is deprecated
storage "google.golang.org/api/storage/v1"
@@ -51,10 +53,10 @@ import (
const (
rcloneClientID = "202264815644.apps.googleusercontent.com"
rcloneEncryptedClientSecret = "Uj7C9jGfb9gmeaV70Lh058cNkWvepr-Es9sBm0zdgil7JaOWF1VySw"
timeFormatIn = time.RFC3339
timeFormatOut = "2006-01-02T15:04:05.000000000Z07:00"
metaMtime = "mtime" // key to store mtime under in metadata
listChunks = 1000 // chunk size to read directory listings
timeFormat = time.RFC3339Nano
metaMtime = "mtime" // key to store mtime in metadata
metaMtimeGsutil = "goog-reserved-file-mtime" // key used by GSUtil to store mtime in metadata
listChunks = 1000 // chunk size to read directory listings
minSleep = 10 * time.Millisecond
)
@@ -65,7 +67,7 @@ var (
Endpoint: google.Endpoint,
ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.TitleBarRedirectURL,
RedirectURL: oauthutil.RedirectURL,
}
)
@@ -76,72 +78,72 @@ func init() {
Prefix: "gcs",
Description: "Google Cloud Storage (this is not Google Drive)",
NewFs: NewFs,
Config: func(ctx context.Context, name string, m configmap.Mapper) {
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
saFile, _ := m.Get("service_account_file")
saCreds, _ := m.Get("service_account_credentials")
anonymous, _ := m.Get("anonymous")
if saFile != "" || saCreds != "" || anonymous == "true" {
return
}
err := oauthutil.Config(ctx, "google cloud storage", name, m, storageConfig, nil)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
envAuth, _ := m.Get("env_auth")
if saFile != "" || saCreds != "" || anonymous == "true" || envAuth == "true" {
return nil, nil
}
return oauthutil.ConfigOut("", &oauthutil.Options{
OAuth2Config: storageConfig,
})
},
Options: append(oauthutil.SharedOptions, []fs.Option{{
Name: "project_number",
Help: "Project number.\nOptional - needed only for list/create/delete buckets - see your developer console.",
Help: "Project number.\n\nOptional - needed only for list/create/delete buckets - see your developer console.",
}, {
Name: "service_account_file",
Help: "Service Account Credentials JSON file path\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login." + env.ShellExpandHelp,
Help: "Service Account Credentials JSON file path.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login." + env.ShellExpandHelp,
}, {
Name: "service_account_credentials",
Help: "Service Account Credentials JSON blob\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
Help: "Service Account Credentials JSON blob.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
Hide: fs.OptionHideBoth,
}, {
Name: "anonymous",
Help: "Access public buckets and objects without credentials\nSet to 'true' if you just want to download files and don't configure credentials.",
Help: "Access public buckets and objects without credentials.\n\nSet to 'true' if you just want to download files and don't configure credentials.",
Default: false,
}, {
Name: "object_acl",
Help: "Access Control List for new objects.",
Examples: []fs.OptionExample{{
Value: "authenticatedRead",
Help: "Object owner gets OWNER access, and all Authenticated Users get READER access.",
Help: "Object owner gets OWNER access.\nAll Authenticated Users get READER access.",
}, {
Value: "bucketOwnerFullControl",
Help: "Object owner gets OWNER access, and project team owners get OWNER access.",
Help: "Object owner gets OWNER access.\nProject team owners get OWNER access.",
}, {
Value: "bucketOwnerRead",
Help: "Object owner gets OWNER access, and project team owners get READER access.",
Help: "Object owner gets OWNER access.\nProject team owners get READER access.",
}, {
Value: "private",
Help: "Object owner gets OWNER access [default if left blank].",
Help: "Object owner gets OWNER access.\nDefault if left blank.",
}, {
Value: "projectPrivate",
Help: "Object owner gets OWNER access, and project team members get access according to their roles.",
Help: "Object owner gets OWNER access.\nProject team members get access according to their roles.",
}, {
Value: "publicRead",
Help: "Object owner gets OWNER access, and all Users get READER access.",
Help: "Object owner gets OWNER access.\nAll Users get READER access.",
}},
}, {
Name: "bucket_acl",
Help: "Access Control List for new buckets.",
Examples: []fs.OptionExample{{
Value: "authenticatedRead",
Help: "Project team owners get OWNER access, and all Authenticated Users get READER access.",
Help: "Project team owners get OWNER access.\nAll Authenticated Users get READER access.",
}, {
Value: "private",
Help: "Project team owners get OWNER access [default if left blank].",
Help: "Project team owners get OWNER access.\nDefault if left blank.",
}, {
Value: "projectPrivate",
Help: "Project team members get access according to their roles.",
}, {
Value: "publicRead",
Help: "Project team owners get OWNER access, and all Users get READER access.",
Help: "Project team owners get OWNER access.\nAll Users get READER access.",
}, {
Value: "publicReadWrite",
Help: "Project team owners get OWNER access, and all Users get WRITER access.",
Help: "Project team owners get OWNER access.\nAll Users get WRITER access.",
}},
}, {
Name: "bucket_policy_only",
@@ -164,64 +166,112 @@ Docs: https://cloud.google.com/storage/docs/bucket-policy-only
Help: "Location for the newly created buckets.",
Examples: []fs.OptionExample{{
Value: "",
Help: "Empty for default location (US).",
Help: "Empty for default location (US)",
}, {
Value: "asia",
Help: "Multi-regional location for Asia.",
Help: "Multi-regional location for Asia",
}, {
Value: "eu",
Help: "Multi-regional location for Europe.",
Help: "Multi-regional location for Europe",
}, {
Value: "us",
Help: "Multi-regional location for United States.",
Help: "Multi-regional location for United States",
}, {
Value: "asia-east1",
Help: "Taiwan.",
Help: "Taiwan",
}, {
Value: "asia-east2",
Help: "Hong Kong.",
Help: "Hong Kong",
}, {
Value: "asia-northeast1",
Help: "Tokyo.",
Help: "Tokyo",
}, {
Value: "asia-northeast2",
Help: "Osaka",
}, {
Value: "asia-northeast3",
Help: "Seoul",
}, {
Value: "asia-south1",
Help: "Mumbai.",
Help: "Mumbai",
}, {
Value: "asia-south2",
Help: "Delhi",
}, {
Value: "asia-southeast1",
Help: "Singapore.",
Help: "Singapore",
}, {
Value: "asia-southeast2",
Help: "Jakarta",
}, {
Value: "australia-southeast1",
Help: "Sydney.",
Help: "Sydney",
}, {
Value: "australia-southeast2",
Help: "Melbourne",
}, {
Value: "europe-north1",
Help: "Finland.",
Help: "Finland",
}, {
Value: "europe-west1",
Help: "Belgium.",
Help: "Belgium",
}, {
Value: "europe-west2",
Help: "London.",
Help: "London",
}, {
Value: "europe-west3",
Help: "Frankfurt.",
Help: "Frankfurt",
}, {
Value: "europe-west4",
Help: "Netherlands.",
Help: "Netherlands",
}, {
Value: "europe-west6",
Help: "Zürich",
}, {
Value: "europe-central2",
Help: "Warsaw",
}, {
Value: "us-central1",
Help: "Iowa.",
Help: "Iowa",
}, {
Value: "us-east1",
Help: "South Carolina.",
Help: "South Carolina",
}, {
Value: "us-east4",
Help: "Northern Virginia.",
Help: "Northern Virginia",
}, {
Value: "us-west1",
Help: "Oregon.",
Help: "Oregon",
}, {
Value: "us-west2",
Help: "California.",
Help: "California",
}, {
Value: "us-west3",
Help: "Salt Lake City",
}, {
Value: "us-west4",
Help: "Las Vegas",
}, {
Value: "northamerica-northeast1",
Help: "Montréal",
}, {
Value: "northamerica-northeast2",
Help: "Toronto",
}, {
Value: "southamerica-east1",
Help: "São Paulo",
}, {
Value: "southamerica-west1",
Help: "Santiago",
}, {
Value: "asia1",
Help: "Dual region: asia-northeast1 and asia-northeast2.",
}, {
Value: "eur4",
Help: "Dual region: europe-north1 and europe-west4.",
}, {
Value: "nam4",
Help: "Dual region: us-central1 and us-east1.",
}},
}, {
Name: "storage_class",
@@ -248,6 +298,32 @@ Docs: https://cloud.google.com/storage/docs/bucket-policy-only
Value: "DURABLE_REDUCED_AVAILABILITY",
Help: "Durable reduced availability storage class",
}},
}, {
Name: "no_check_bucket",
Help: `If set, don't attempt to check the bucket exists or create it.
This can be useful when trying to minimise the number of transactions
rclone does if you know the bucket exists already.
`,
Default: false,
Advanced: true,
}, {
Name: "decompress",
Help: `If set this will decompress gzip encoded objects.
It is possible to upload objects to GCS with "Content-Encoding: gzip"
set. Normally rclone will download these files as compressed objects.
If this flag is set then rclone will decompress these files with
"Content-Encoding: gzip" as they are received. This means that rclone
can't check the size and hash but the file contents will be decompressed.
`,
Advanced: true,
Default: false,
}, {
Name: "endpoint",
Help: "Endpoint for the service.\n\nLeave blank normally.",
Advanced: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
@@ -255,6 +331,17 @@ Docs: https://cloud.google.com/storage/docs/bucket-policy-only
Default: (encoder.Base |
encoder.EncodeCrLf |
encoder.EncodeInvalidUtf8),
}, {
Name: "env_auth",
Help: "Get GCP IAM credentials from runtime (environment variables or instance meta data if no env vars).\n\nOnly applies if service_account_file and service_account_credentials is blank.",
Default: false,
Examples: []fs.OptionExample{{
Value: "false",
Help: "Enter credentials in the next step.",
}, {
Value: "true",
Help: "Get GCP IAM credentials from the environment (env vars or IAM).",
}},
}}...),
})
}
@@ -270,21 +357,26 @@ type Options struct {
BucketPolicyOnly bool `config:"bucket_policy_only"`
Location string `config:"location"`
StorageClass string `config:"storage_class"`
NoCheckBucket bool `config:"no_check_bucket"`
Decompress bool `config:"decompress"`
Endpoint string `config:"endpoint"`
Enc encoder.MultiEncoder `config:"encoding"`
EnvAuth bool `config:"env_auth"`
}
// Fs represents a remote storage server
type Fs struct {
name string // name of this remote
root string // the path we are working on if any
opt Options // parsed options
features *fs.Features // optional features
svc *storage.Service // the connection to the storage server
client *http.Client // authorized client
rootBucket string // bucket part of root (if any)
rootDirectory string // directory part of root (if any)
cache *bucket.Cache // cache of bucket status
pacer *fs.Pacer // To pace the API calls
name string // name of this remote
root string // the path we are working on if any
opt Options // parsed options
features *fs.Features // optional features
svc *storage.Service // the connection to the storage server
client *http.Client // authorized client
rootBucket string // bucket part of root (if any)
rootDirectory string // directory part of root (if any)
cache *bucket.Cache // cache of bucket status
pacer *fs.Pacer // To pace the API calls
warnCompressed sync.Once // warn once about compressed files
}
// Object describes a storage object
@@ -298,6 +390,7 @@ type Object struct {
bytes int64 // Bytes in the object
modTime time.Time // Modified time of the object
mimeType string
gzipped bool // set if object has Content-Encoding: gzip
}
// ------------------------------------------------------------
@@ -315,7 +408,7 @@ func (f *Fs) Root() string {
// String converts this Fs to a string
func (f *Fs) String() string {
if f.rootBucket == "" {
return fmt.Sprintf("GCS root")
return "GCS root"
}
if f.rootDirectory == "" {
return fmt.Sprintf("GCS bucket %s", f.rootBucket)
@@ -376,7 +469,7 @@ func (o *Object) split() (bucket, bucketPath string) {
func getServiceAccountClient(ctx context.Context, credentialsData []byte) (*http.Client, error) {
conf, err := google.JWTConfigFromJSON(credentialsData, storageConfig.Scopes...)
if err != nil {
return nil, errors.Wrap(err, "error processing credentials")
return nil, fmt.Errorf("error processing credentials: %w", err)
}
ctxWithSpecialClient := oauthutil.Context(ctx, fshttp.NewClient(ctx))
return oauth2.NewClient(ctxWithSpecialClient, conf.TokenSource(ctxWithSpecialClient)), nil
@@ -407,9 +500,9 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
// try loading service account credentials from env variable, then from a file
if opt.ServiceAccountCredentials == "" && opt.ServiceAccountFile != "" {
loadedCreds, err := ioutil.ReadFile(env.ShellExpand(opt.ServiceAccountFile))
loadedCreds, err := os.ReadFile(env.ShellExpand(opt.ServiceAccountFile))
if err != nil {
return nil, errors.Wrap(err, "error opening service account credentials file")
return nil, fmt.Errorf("error opening service account credentials file: %w", err)
}
opt.ServiceAccountCredentials = string(loadedCreds)
}
@@ -418,7 +511,12 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
} else if opt.ServiceAccountCredentials != "" {
oAuthClient, err = getServiceAccountClient(ctx, []byte(opt.ServiceAccountCredentials))
if err != nil {
return nil, errors.Wrap(err, "failed configuring Google Cloud Storage Service Account")
return nil, fmt.Errorf("failed configuring Google Cloud Storage Service Account: %w", err)
}
} else if opt.EnvAuth {
oAuthClient, err = google.DefaultClient(ctx, storage.DevstorageFullControlScope)
if err != nil {
return nil, fmt.Errorf("failed to configure Google Cloud Storage: %w", err)
}
} else {
oAuthClient, _, err = oauthutil.NewClient(ctx, name, m, storageConfig)
@@ -426,7 +524,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
ctx := context.Background()
oAuthClient, err = google.DefaultClient(ctx, storage.DevstorageFullControlScope)
if err != nil {
return nil, errors.Wrap(err, "failed to configure Google Cloud Storage")
return nil, fmt.Errorf("failed to configure Google Cloud Storage: %w", err)
}
}
}
@@ -435,7 +533,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
name: name,
root: root,
opt: *opt,
pacer: fs.NewPacer(ctx, pacer.NewGoogleDrive(pacer.MinSleep(minSleep))),
pacer: fs.NewPacer(ctx, pacer.NewS3(pacer.MinSleep(minSleep))),
cache: bucket.NewCache(),
}
f.setRoot(root)
@@ -448,9 +546,13 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
// Create a new authorized Drive client.
f.client = oAuthClient
f.svc, err = storage.New(f.client)
gcsOpts := []option.ClientOption{option.WithHTTPClient(f.client)}
if opt.Endpoint != "" {
gcsOpts = append(gcsOpts, option.WithEndpoint(opt.Endpoint))
}
f.svc, err = storage.NewService(context.Background(), gcsOpts...)
if err != nil {
return nil, errors.Wrap(err, "couldn't create Google Cloud Storage client")
return nil, fmt.Errorf("couldn't create Google Cloud Storage client: %w", err)
}
if f.rootBucket != "" && f.rootDirectory != "" {
@@ -505,7 +607,7 @@ type listFn func(remote string, object *storage.Object, isDirectory bool) error
//
// dir is the starting directory, "" for root
//
// Set recurse to read sub directories
// Set recurse to read sub directories.
//
// The remote has prefix removed from it and if addBucket is set
// then it adds the bucket to the start.
@@ -723,7 +825,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
// Put the object into the bucket
//
// Copy the reader in to the new object which is returned
// Copy the reader in to the new object which is returned.
//
// The new object may have been created if an error is returned
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
@@ -760,10 +862,10 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) (err error) {
return nil
} else if gErr, ok := err.(*googleapi.Error); ok {
if gErr.Code != http.StatusNotFound {
return errors.Wrap(err, "failed to get bucket")
return fmt.Errorf("failed to get bucket: %w", err)
}
} else {
return errors.Wrap(err, "failed to get bucket")
return fmt.Errorf("failed to get bucket: %w", err)
}
if f.opt.ProjectNumber == "" {
@@ -793,6 +895,14 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) (err error) {
}, nil)
}
// checkBucket creates the bucket if it doesn't exist unless NoCheckBucket is true
func (f *Fs) checkBucket(ctx context.Context, bucket string) error {
if f.opt.NoCheckBucket {
return nil
}
return f.makeBucket(ctx, bucket)
}
// Rmdir deletes the bucket if the fs is at the root
//
// Returns an error if it isn't empty: Error 409: The bucket you tried
@@ -817,16 +927,16 @@ func (f *Fs) Precision() time.Duration {
// Copy src to this remote using server-side copy operations.
//
// This is stored with the remote path given
// This is stored with the remote path given.
//
// It returns the destination Object and a possible error
// It returns the destination Object and a possible error.
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
dstBucket, dstPath := f.split(remote)
err := f.makeBucket(ctx, dstBucket)
err := f.checkBucket(ctx, dstBucket)
if err != nil {
return nil, err
}
@@ -910,6 +1020,7 @@ func (o *Object) setMetaData(info *storage.Object) {
o.url = info.MediaLink
o.bytes = int64(info.Size)
o.mimeType = info.ContentType
o.gzipped = info.ContentEncoding == "gzip"
// Read md5sum
md5sumData, err := base64.StdEncoding.DecodeString(info.Md5Hash)
@@ -922,7 +1033,7 @@ func (o *Object) setMetaData(info *storage.Object) {
// read mtime out of metadata if available
mtimeString, ok := info.Metadata[metaMtime]
if ok {
modTime, err := time.Parse(timeFormatIn, mtimeString)
modTime, err := time.Parse(timeFormat, mtimeString)
if err == nil {
o.modTime = modTime
return
@@ -930,13 +1041,30 @@ func (o *Object) setMetaData(info *storage.Object) {
fs.Debugf(o, "Failed to read mtime from metadata: %s", err)
}
// Fallback to GSUtil mtime
mtimeGsutilString, ok := info.Metadata[metaMtimeGsutil]
if ok {
unixTimeSec, err := strconv.ParseInt(mtimeGsutilString, 10, 64)
if err == nil {
o.modTime = time.Unix(unixTimeSec, 0)
return
}
fs.Debugf(o, "Failed to read GSUtil mtime from metadata: %s", err)
}
// Fallback to the Updated time
modTime, err := time.Parse(timeFormatIn, info.Updated)
modTime, err := time.Parse(timeFormat, info.Updated)
if err != nil {
fs.Logf(o, "Bad time decode: %v", err)
} else {
o.modTime = modTime
}
// If gunzipping then size and md5sum are unknown
if o.gzipped && o.fs.opt.Decompress {
o.bytes = -1
o.md5sum = ""
}
}
// readObjectInfo reads the definition for an object
@@ -988,7 +1116,8 @@ func (o *Object) ModTime(ctx context.Context) time.Time {
// Returns metadata for an object
func metadataFromModTime(modTime time.Time) map[string]string {
metadata := make(map[string]string, 1)
metadata[metaMtime] = modTime.Format(timeFormatOut)
metadata[metaMtime] = modTime.Format(timeFormat)
metadata[metaMtimeGsutil] = strconv.FormatInt(modTime.Unix(), 10)
return metadata
}
@@ -1000,11 +1129,11 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) (err error)
return err
}
// Add the mtime to the existing metadata
mtime := modTime.Format(timeFormatOut)
if object.Metadata == nil {
object.Metadata = make(map[string]string, 1)
}
object.Metadata[metaMtime] = mtime
object.Metadata[metaMtime] = modTime.Format(timeFormat)
object.Metadata[metaMtimeGsutil] = strconv.FormatInt(modTime.Unix(), 10)
// Copy the object to itself to update the metadata
// Using PATCH requires too many permissions
bucket, bucketPath := o.split()
@@ -1036,6 +1165,18 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
return nil, err
}
fs.FixRangeOption(options, o.bytes)
if o.gzipped && !o.fs.opt.Decompress {
// Allow files which are stored on the cloud storage system
// compressed to be downloaded without being decompressed. Note
// that setting this here overrides the automatic decompression
// in the Transport.
//
// See: https://cloud.google.com/storage/docs/transcoding
req.Header.Set("Accept-Encoding", "gzip")
o.fs.warnCompressed.Do(func() {
fs.Logf(o, "Not decompressing 'Content-Encoding: gzip' compressed file. Use --gcs-decompress to override")
})
}
fs.OpenOptionAddHTTPHeaders(req.Header, options)
var res *http.Response
err = o.fs.pacer.Call(func() (bool, error) {
@@ -1054,7 +1195,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
_, isRanging := req.Header["Range"]
if !(res.StatusCode == http.StatusOK || (isRanging && res.StatusCode == http.StatusPartialContent)) {
_ = res.Body.Close() // ignore error
return nil, errors.Errorf("bad response: %d: %s", res.StatusCode, res.Status)
return nil, fmt.Errorf("bad response: %d: %s", res.StatusCode, res.Status)
}
return res.Body, nil
}
@@ -1064,7 +1205,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
// The new object may have been created if an error is returned
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
bucket, bucketPath := o.split()
err := o.fs.makeBucket(ctx, bucket)
err := o.fs.checkBucket(ctx, bucket)
if err != nil {
return err
}

View File

@@ -1,3 +1,4 @@
// Package api provides types used by the Google Photos API.
package api
import (

View File

@@ -6,9 +6,9 @@ package googlephotos
import (
"context"
"encoding/json"
"errors"
"fmt"
"io"
golog "log"
"net/http"
"net/url"
"path"
@@ -18,9 +18,9 @@ import (
"sync"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/backend/googlephotos/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
@@ -29,6 +29,7 @@ import (
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/log"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/oauthutil"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/rest"
@@ -54,6 +55,7 @@ const (
minSleep = 10 * time.Millisecond
scopeReadOnly = "https://www.googleapis.com/auth/photoslibrary.readonly"
scopeReadWrite = "https://www.googleapis.com/auth/photoslibrary"
scopeAccess = 2 // position of access scope in list
)
var (
@@ -62,12 +64,12 @@ var (
Scopes: []string{
"openid",
"profile",
scopeReadWrite,
scopeReadWrite, // this must be at position scopeAccess
},
Endpoint: google.Endpoint,
ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.TitleBarRedirectURL,
RedirectURL: oauthutil.RedirectURL,
}
)
@@ -78,36 +80,36 @@ func init() {
Prefix: "gphotos",
Description: "Google Photos",
NewFs: NewFs,
Config: func(ctx context.Context, name string, m configmap.Mapper) {
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
fs.Errorf(nil, "Couldn't parse config into struct: %v", err)
return
return nil, fmt.Errorf("couldn't parse config into struct: %w", err)
}
// Fill in the scopes
if opt.ReadOnly {
oauthConfig.Scopes[0] = scopeReadOnly
} else {
oauthConfig.Scopes[0] = scopeReadWrite
switch config.State {
case "":
// Fill in the scopes
if opt.ReadOnly {
oauthConfig.Scopes[scopeAccess] = scopeReadOnly
} else {
oauthConfig.Scopes[scopeAccess] = scopeReadWrite
}
return oauthutil.ConfigOut("warning", &oauthutil.Options{
OAuth2Config: oauthConfig,
})
case "warning":
// Warn the user as required by google photos integration
return fs.ConfigConfirm("warning_done", true, "config_warning", `Warning
IMPORTANT: All media items uploaded to Google Photos with rclone
are stored in full resolution at original quality. These uploads
will count towards storage in your Google Account.`)
case "warning_done":
return nil, nil
}
// Do the oauth
err = oauthutil.Config(ctx, "google photos", name, m, oauthConfig, nil)
if err != nil {
golog.Fatalf("Failed to configure token: %v", err)
}
// Warn the user
fmt.Print(`
*** IMPORTANT: All media items uploaded to Google Photos with rclone
*** are stored in full resolution at original quality. These uploads
*** will count towards storage in your Google Account.
`)
return nil, fmt.Errorf("unknown state %q", config.State)
},
Options: append(oauthutil.SharedOptions, []fs.Option{{
Name: "read_only",
@@ -130,14 +132,14 @@ you want to read the media.`,
}, {
Name: "start_year",
Default: 2000,
Help: `Year limits the photos to be downloaded to those which are uploaded after the given year`,
Help: `Year limits the photos to be downloaded to those which are uploaded after the given year.`,
Advanced: true,
}, {
Name: "include_archived",
Default: false,
Help: `Also view and download archived media.
By default rclone does not request archived media. Thus, when syncing,
By default, rclone does not request archived media. Thus, when syncing,
archived media is not visible in directory listings or transferred.
Note that media in albums is always visible and synced, no matter
@@ -149,16 +151,24 @@ listings and transferred.
Without this flag, archived media will not be visible in directory
listings and won't be transferred.`,
Advanced: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
Advanced: true,
Default: (encoder.Base |
encoder.EncodeCrLf |
encoder.EncodeInvalidUtf8),
}}...),
})
}
// Options defines the configuration for this backend
type Options struct {
ReadOnly bool `config:"read_only"`
ReadSize bool `config:"read_size"`
StartYear int `config:"start_year"`
IncludeArchived bool `config:"include_archived"`
ReadOnly bool `config:"read_only"`
ReadSize bool `config:"read_size"`
StartYear int `config:"start_year"`
IncludeArchived bool `config:"include_archived"`
Enc encoder.MultiEncoder `config:"encoding"`
}
// Fs represents a remote storage server
@@ -168,7 +178,7 @@ type Fs struct {
opt Options // parsed options
features *fs.Features // optional features
unAuth *rest.Client // unauthenticated http client
srv *rest.Client // the connection to the one drive server
srv *rest.Client // the connection to the server
ts *oauthutil.TokenSource // token source for oauth2
pacer *fs.Pacer // To pace the API calls
startTime time.Time // time Fs was started - used for datestamps
@@ -282,7 +292,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
baseClient := fshttp.NewClient(ctx)
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(ctx, name, m, oauthConfig, baseClient)
if err != nil {
return nil, errors.Wrap(err, "failed to configure Box")
return nil, fmt.Errorf("failed to configure Box: %w", err)
}
root = strings.Trim(path.Clean(root), "/")
@@ -335,13 +345,13 @@ func (f *Fs) fetchEndpoint(ctx context.Context, name string) (endpoint string, e
return shouldRetry(ctx, resp, err)
})
if err != nil {
return "", errors.Wrap(err, "couldn't read openID config")
return "", fmt.Errorf("couldn't read openID config: %w", err)
}
// Find userinfo endpoint
endpoint, ok := openIDconfig[name].(string)
if !ok {
return "", errors.Errorf("couldn't find %q from openID config", name)
return "", fmt.Errorf("couldn't find %q from openID config", name)
}
return endpoint, nil
@@ -364,7 +374,7 @@ func (f *Fs) UserInfo(ctx context.Context) (userInfo map[string]string, err erro
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't read user info")
return nil, fmt.Errorf("couldn't read user info: %w", err)
}
return userInfo, nil
}
@@ -395,7 +405,7 @@ func (f *Fs) Disconnect(ctx context.Context) (err error) {
return shouldRetry(ctx, resp, err)
})
if err != nil {
return errors.Wrap(err, "couldn't revoke token")
return fmt.Errorf("couldn't revoke token: %w", err)
}
fs.Infof(f, "res = %+v", res)
return nil
@@ -482,7 +492,7 @@ func (f *Fs) listAlbums(ctx context.Context, shared bool) (all *albums, err erro
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't list albums")
return nil, fmt.Errorf("couldn't list albums: %w", err)
}
newAlbums := result.Albums
if shared {
@@ -496,7 +506,9 @@ func (f *Fs) listAlbums(ctx context.Context, shared bool) (all *albums, err erro
lastID = newAlbums[len(newAlbums)-1].ID
}
for i := range newAlbums {
all.add(&newAlbums[i])
anAlbum := newAlbums[i]
anAlbum.Title = f.opt.Enc.FromStandardPath(anAlbum.Title)
all.add(&anAlbum)
}
if result.NextPageToken == "" {
break
@@ -537,7 +549,7 @@ func (f *Fs) list(ctx context.Context, filter api.SearchFilter, fn listFn) (err
return shouldRetry(ctx, resp, err)
})
if err != nil {
return errors.Wrap(err, "couldn't list files")
return fmt.Errorf("couldn't list files: %w", err)
}
items := result.MediaItems
if len(items) > 0 && items[0].ID == lastID {
@@ -550,7 +562,7 @@ func (f *Fs) list(ctx context.Context, filter api.SearchFilter, fn listFn) (err
for i := range items {
item := &result.MediaItems[i]
remote := item.Filename
remote = strings.Replace(remote, "/", "", -1)
remote = strings.ReplaceAll(remote, "/", "")
err = fn(remote, item, false)
if err != nil {
return err
@@ -649,7 +661,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// Put the object into the bucket
//
// Copy the reader in to the new object which is returned
// Copy the reader in to the new object which is returned.
//
// The new object may have been created if an error is returned
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
@@ -681,7 +693,7 @@ func (f *Fs) createAlbum(ctx context.Context, albumTitle string) (album *api.Alb
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't create album")
return nil, fmt.Errorf("couldn't create album: %w", err)
}
f.albums[false].add(&result)
return &result, nil
@@ -867,7 +879,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
return shouldRetry(ctx, resp, err)
})
if err != nil {
return errors.Wrap(err, "couldn't get media item")
return fmt.Errorf("couldn't get media item: %w", err)
}
o.setMetaData(&item)
return nil
@@ -1002,7 +1014,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return shouldRetry(ctx, resp, err)
})
if err != nil {
return errors.Wrap(err, "couldn't upload file")
return fmt.Errorf("couldn't upload file: %w", err)
}
uploadToken := strings.TrimSpace(string(token))
if uploadToken == "" {
@@ -1030,14 +1042,14 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return shouldRetry(ctx, resp, err)
})
if err != nil {
return errors.Wrap(err, "failed to create media item")
return fmt.Errorf("failed to create media item: %w", err)
}
if len(result.NewMediaItemResults) != 1 {
return errors.New("bad response to BatchCreate wrong number of items")
}
mediaItemResult := result.NewMediaItemResults[0]
if mediaItemResult.Status.Code != 0 {
return errors.Errorf("upload failed: %s (%d)", mediaItemResult.Status.Message, mediaItemResult.Status.Code)
return fmt.Errorf("upload failed: %s (%d)", mediaItemResult.Status.Message, mediaItemResult.Status.Code)
}
o.setMetaData(&mediaItemResult.MediaItem)
@@ -1059,7 +1071,7 @@ func (o *Object) Remove(ctx context.Context) (err error) {
albumTitle, fileName := match[1], match[2]
album, ok := o.fs.albums[false].get(albumTitle)
if !ok {
return errors.Errorf("couldn't file %q in album %q for delete", fileName, albumTitle)
return fmt.Errorf("couldn't file %q in album %q for delete", fileName, albumTitle)
}
opts := rest.Opts{
Method: "POST",
@@ -1075,7 +1087,7 @@ func (o *Object) Remove(ctx context.Context) (err error) {
return shouldRetry(ctx, resp, err)
})
if err != nil {
return errors.Wrap(err, "couldn't delete item from album")
return fmt.Errorf("couldn't delete item from album: %w", err)
}
return nil
}

View File

@@ -3,7 +3,7 @@ package googlephotos
import (
"context"
"fmt"
"io/ioutil"
"io"
"net/http"
"path"
"testing"
@@ -12,7 +12,6 @@ import (
_ "github.com/rclone/rclone/backend/local"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/lib/random"
"github.com/stretchr/testify/assert"
@@ -37,7 +36,7 @@ func TestIntegration(t *testing.T) {
}
f, err := fs.NewFs(ctx, *fstest.RemoteName)
if err == fs.ErrorNotFoundInConfigFile {
t.Skip(fmt.Sprintf("Couldn't create google photos backend - skipping tests: %v", err))
t.Skipf("Couldn't create google photos backend - skipping tests: %v", err)
}
require.NoError(t, err)
@@ -56,7 +55,7 @@ func TestIntegration(t *testing.T) {
require.NoError(t, err)
in, err := srcObj.Open(ctx)
require.NoError(t, err)
dstObj, err := f.Put(ctx, in, operations.NewOverrideRemote(srcObj, remote))
dstObj, err := f.Put(ctx, in, fs.NewOverrideRemote(srcObj, remote))
require.NoError(t, err)
assert.Equal(t, remote, dstObj.Remote())
_ = in.Close()
@@ -99,7 +98,7 @@ func TestIntegration(t *testing.T) {
t.Run("ObjectOpen", func(t *testing.T) {
in, err := dstObj.Open(ctx)
require.NoError(t, err)
buf, err := ioutil.ReadAll(in)
buf, err := io.ReadAll(in)
require.NoError(t, err)
require.NoError(t, in.Close())
assert.True(t, len(buf) > 1000)
@@ -221,7 +220,7 @@ func TestIntegration(t *testing.T) {
require.NoError(t, err)
in, err := srcObj.Open(ctx)
require.NoError(t, err)
dstObj, err := f.Put(ctx, in, operations.NewOverrideRemote(srcObj, remote))
dstObj, err := f.Put(ctx, in, fs.NewOverrideRemote(srcObj, remote))
require.NoError(t, err)
assert.Equal(t, remote, dstObj.Remote())
_ = in.Close()

View File

@@ -11,7 +11,6 @@ import (
"strings"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/backend/googlephotos/api"
"github.com/rclone/rclone/fs"
)
@@ -270,7 +269,7 @@ func days(ctx context.Context, f lister, prefix string, match []string) (entries
year := match[1]
current, err := time.Parse("2006", year)
if err != nil {
return nil, errors.Errorf("bad year %q", match[1])
return nil, fmt.Errorf("bad year %q", match[1])
}
currentYear := current.Year()
for current.Year() == currentYear {
@@ -284,7 +283,7 @@ func days(ctx context.Context, f lister, prefix string, match []string) (entries
func yearMonthDayFilter(ctx context.Context, f lister, match []string) (sf api.SearchFilter, err error) {
year, err := strconv.Atoi(match[1])
if err != nil || year < 1000 || year > 3000 {
return sf, errors.Errorf("bad year %q", match[1])
return sf, fmt.Errorf("bad year %q", match[1])
}
sf = api.SearchFilter{
Filters: &api.Filters{
@@ -300,14 +299,14 @@ func yearMonthDayFilter(ctx context.Context, f lister, match []string) (sf api.S
if len(match) >= 3 {
month, err := strconv.Atoi(match[2])
if err != nil || month < 1 || month > 12 {
return sf, errors.Errorf("bad month %q", match[2])
return sf, fmt.Errorf("bad month %q", match[2])
}
sf.Filters.DateFilter.Dates[0].Month = month
}
if len(match) >= 4 {
day, err := strconv.Atoi(match[3])
if err != nil || day < 1 || day > 31 {
return sf, errors.Errorf("bad day %q", match[3])
return sf, fmt.Errorf("bad day %q", match[3])
}
sf.Filters.DateFilter.Dates[0].Day = day
}
@@ -316,7 +315,7 @@ func yearMonthDayFilter(ctx context.Context, f lister, match []string) (sf api.S
// featureFilter creates a filter for the Feature enum
//
// The API only supports one feature, FAVORITES, so hardcode that feature
// The API only supports one feature, FAVORITES, so hardcode that feature.
//
// https://developers.google.com/photos/library/reference/rest/v1/mediaItems/search#FeatureFilter
func featureFilter(ctx context.Context, f lister, match []string) (sf api.SearchFilter) {

View File

@@ -50,7 +50,7 @@ func (f *testLister) listAlbums(ctx context.Context, shared bool) (all *albums,
// mock listUploads for testing
func (f *testLister) listUploads(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
entries, _ = f.uploaded[dir]
entries = f.uploaded[dir]
return entries, nil
}

180
backend/hasher/commands.go Normal file
View File

@@ -0,0 +1,180 @@
package hasher
import (
"context"
"errors"
"fmt"
"path"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/cache"
"github.com/rclone/rclone/fs/fspath"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/lib/kv"
)
// Command the backend to run a named command
//
// The command run is name
// args may be used to read arguments from
// opts may be used to read optional arguments from
//
// The result should be capable of being JSON encoded
// If it is a string or a []string it will be shown to the user
// otherwise it will be JSON encoded and shown to the user like that
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
switch name {
case "drop":
return nil, f.db.Stop(true)
case "dump", "fulldump":
return nil, f.dbDump(ctx, name == "fulldump", "")
case "import", "stickyimport":
sticky := name == "stickyimport"
if len(arg) != 2 {
return nil, errors.New("please provide checksum type and path to sum file")
}
return nil, f.dbImport(ctx, arg[0], arg[1], sticky)
default:
return nil, fs.ErrorCommandNotFound
}
}
var commandHelp = []fs.CommandHelp{{
Name: "drop",
Short: "Drop cache",
Long: `Completely drop checksum cache.
Usage Example:
rclone backend drop hasher:
`,
}, {
Name: "dump",
Short: "Dump the database",
Long: "Dump cache records covered by the current remote",
}, {
Name: "fulldump",
Short: "Full dump of the database",
Long: "Dump all cache records in the database",
}, {
Name: "import",
Short: "Import a SUM file",
Long: `Amend hash cache from a SUM file and bind checksums to files by size/time.
Usage Example:
rclone backend import hasher:subdir md5 /path/to/sum.md5
`,
}, {
Name: "stickyimport",
Short: "Perform fast import of a SUM file",
Long: `Fill hash cache from a SUM file without verifying file fingerprints.
Usage Example:
rclone backend stickyimport hasher:subdir md5 remote:path/to/sum.md5
`,
}}
func (f *Fs) dbDump(ctx context.Context, full bool, root string) error {
if root == "" {
remoteFs, err := cache.Get(ctx, f.opt.Remote)
if err != nil {
return err
}
root = fspath.JoinRootPath(remoteFs.Root(), f.Root())
}
op := &kvDump{
full: full,
root: root,
path: f.db.Path(),
fs: f,
}
err := f.db.Do(false, op)
if err == kv.ErrEmpty {
fs.Infof(op.path, "empty")
err = nil
}
return err
}
func (f *Fs) dbImport(ctx context.Context, hashName, sumRemote string, sticky bool) error {
var hashType hash.Type
if err := hashType.Set(hashName); err != nil {
return err
}
if hashType == hash.None {
return errors.New("please provide a valid hash type")
}
if !f.suppHashes.Contains(hashType) {
return errors.New("unsupported hash type")
}
if !f.keepHashes.Contains(hashType) {
fs.Infof(nil, "Need not import hashes of this type")
return nil
}
_, sumPath, err := fspath.SplitFs(sumRemote)
if err != nil {
return err
}
sumFs, err := cache.Get(ctx, sumRemote)
switch err {
case fs.ErrorIsFile:
// ok
case nil:
return fmt.Errorf("not a file: %s", sumRemote)
default:
return err
}
sumObj, err := sumFs.NewObject(ctx, path.Base(sumPath))
if err != nil {
return fmt.Errorf("cannot open sum file: %w", err)
}
hashes, err := operations.ParseSumFile(ctx, sumObj)
if err != nil {
return fmt.Errorf("failed to parse sum file: %w", err)
}
if sticky {
rootPath := f.Fs.Root()
for remote, hashVal := range hashes {
key := path.Join(rootPath, remote)
hashSums := operations.HashSums{hashName: hashVal}
if err := f.putRawHashes(ctx, key, anyFingerprint, hashSums); err != nil {
fs.Errorf(nil, "%s: failed to import: %v", remote, err)
}
}
fs.Infof(nil, "Summary: %d checksum(s) imported", len(hashes))
return nil
}
const longImportThreshold = 100
if len(hashes) > longImportThreshold {
fs.Infof(nil, "Importing %d checksums. Please wait...", len(hashes))
}
doneCount := 0
err = operations.ListFn(ctx, f, func(obj fs.Object) {
remote := obj.Remote()
hash := hashes[remote]
hashes[remote] = "" // mark as handled
o, ok := obj.(*Object)
if ok && hash != "" {
if err := o.putHashes(ctx, hashMap{hashType: hash}); err != nil {
fs.Errorf(nil, "%s: failed to import: %v", remote, err)
}
accounting.Stats(ctx).NewCheckingTransfer(obj, "importing").Done(ctx, err)
doneCount++
}
})
if err != nil {
fs.Errorf(nil, "Import failed: %v", err)
}
skipCount := 0
for remote, emptyOrDone := range hashes {
if emptyOrDone != "" {
fs.Infof(nil, "Skip vanished object: %s", remote)
skipCount++
}
}
fs.Infof(nil, "Summary: %d imported, %d skipped", doneCount, skipCount)
return err
}

530
backend/hasher/hasher.go Normal file
View File

@@ -0,0 +1,530 @@
// Package hasher implements a checksum handling overlay backend
package hasher
import (
"context"
"encoding/gob"
"errors"
"fmt"
"io"
"path"
"strings"
"sync"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/cache"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fspath"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/kv"
)
// Register with Fs
func init() {
fs.Register(&fs.RegInfo{
Name: "hasher",
Description: "Better checksums for other remotes",
NewFs: NewFs,
MetadataInfo: &fs.MetadataInfo{
Help: `Any metadata supported by the underlying remote is read and written.`,
},
CommandHelp: commandHelp,
Options: []fs.Option{{
Name: "remote",
Required: true,
Help: "Remote to cache checksums for (e.g. myRemote:path).",
}, {
Name: "hashes",
Default: fs.CommaSepList{"md5", "sha1"},
Advanced: false,
Help: "Comma separated list of supported checksum types.",
}, {
Name: "max_age",
Advanced: false,
Default: fs.DurationOff,
Help: "Maximum time to keep checksums in cache (0 = no cache, off = cache forever).",
}, {
Name: "auto_size",
Advanced: true,
Default: fs.SizeSuffix(0),
Help: "Auto-update checksum for files smaller than this size (disabled by default).",
}},
})
}
// Options defines the configuration for this backend
type Options struct {
Remote string `config:"remote"`
Hashes fs.CommaSepList `config:"hashes"`
AutoSize fs.SizeSuffix `config:"auto_size"`
MaxAge fs.Duration `config:"max_age"`
}
// Fs represents a wrapped fs.Fs
type Fs struct {
fs.Fs
name string
root string
wrapper fs.Fs
features *fs.Features
opt *Options
db *kv.DB
// fingerprinting
fpTime bool // true if using time in fingerprints
fpHash hash.Type // hash type to use in fingerprints or None
// hash types triaged by groups
suppHashes hash.Set // all supported checksum types
passHashes hash.Set // passed directly to the base without caching
slowHashes hash.Set // passed to the base and then cached
autoHashes hash.Set // calculated in-house and cached
keepHashes hash.Set // checksums to keep in cache (slow + auto)
}
var warnExperimental sync.Once
// NewFs constructs an Fs from the remote:path string
func NewFs(ctx context.Context, fsname, rpath string, cmap configmap.Mapper) (fs.Fs, error) {
if !kv.Supported() {
return nil, errors.New("hasher is not supported on this OS")
}
warnExperimental.Do(func() {
fs.Infof(nil, "Hasher is EXPERIMENTAL!")
})
opt := &Options{}
err := configstruct.Set(cmap, opt)
if err != nil {
return nil, err
}
if strings.HasPrefix(opt.Remote, fsname+":") {
return nil, errors.New("can't point remote at itself")
}
remotePath := fspath.JoinRootPath(opt.Remote, rpath)
baseFs, err := cache.Get(ctx, remotePath)
if err != nil && err != fs.ErrorIsFile {
return nil, fmt.Errorf("failed to derive base remote %q: %w", opt.Remote, err)
}
f := &Fs{
Fs: baseFs,
name: fsname,
root: rpath,
opt: opt,
}
baseFeatures := baseFs.Features()
f.fpTime = baseFs.Precision() != fs.ModTimeNotSupported
if baseFeatures.SlowHash {
f.slowHashes = f.Fs.Hashes()
} else {
f.passHashes = f.Fs.Hashes()
f.fpHash = f.passHashes.GetOne()
}
f.suppHashes = f.passHashes
f.suppHashes.Add(f.slowHashes.Array()...)
for _, hashName := range opt.Hashes {
var ht hash.Type
if err := ht.Set(hashName); err != nil {
return nil, fmt.Errorf("invalid token %q in hash string %q", hashName, opt.Hashes.String())
}
if !f.slowHashes.Contains(ht) {
f.autoHashes.Add(ht)
}
f.keepHashes.Add(ht)
f.suppHashes.Add(ht)
}
fs.Debugf(f, "Groups by usage: cached %s, passed %s, auto %s, slow %s, supported %s",
f.keepHashes, f.passHashes, f.autoHashes, f.slowHashes, f.suppHashes)
var nilSet hash.Set
if f.keepHashes == nilSet {
return nil, errors.New("configured hash_names have nothing to keep in cache")
}
if f.opt.MaxAge > 0 {
gob.Register(hashRecord{})
db, err := kv.Start(ctx, "hasher", f.Fs)
if err != nil {
return nil, err
}
f.db = db
}
stubFeatures := &fs.Features{
CanHaveEmptyDirectories: true,
IsLocal: true,
ReadMimeType: true,
WriteMimeType: true,
SetTier: true,
GetTier: true,
ReadMetadata: true,
WriteMetadata: true,
UserMetadata: true,
}
f.features = stubFeatures.Fill(ctx, f).Mask(ctx, f.Fs).WrapsFs(f, f.Fs)
cache.PinUntilFinalized(f.Fs, f)
return f, err
}
//
// Filesystem
//
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string { return f.name }
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string { return f.root }
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features { return f.features }
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() hash.Set { return f.suppHashes }
// String returns a description of the FS
// The "hasher::" prefix is a distinctive feature.
func (f *Fs) String() string {
return fmt.Sprintf("hasher::%s:%s", f.name, f.root)
}
// UnWrap returns the Fs that this Fs is wrapping
func (f *Fs) UnWrap() fs.Fs { return f.Fs }
// WrapFs returns the Fs that is wrapping this Fs
func (f *Fs) WrapFs() fs.Fs { return f.wrapper }
// SetWrapper sets the Fs that is wrapping this Fs
func (f *Fs) SetWrapper(wrapper fs.Fs) { f.wrapper = wrapper }
// Wrap base entries into hasher entries.
func (f *Fs) wrapEntries(baseEntries fs.DirEntries) (hashEntries fs.DirEntries, err error) {
hashEntries = baseEntries[:0] // work inplace
for _, entry := range baseEntries {
switch x := entry.(type) {
case fs.Object:
obj, err := f.wrapObject(x, nil)
if err != nil {
return nil, err
}
hashEntries = append(hashEntries, obj)
default:
hashEntries = append(hashEntries, entry) // trash in - trash out
}
}
return hashEntries, nil
}
// List the objects and directories in dir into entries.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
if entries, err = f.Fs.List(ctx, dir); err != nil {
return nil, err
}
return f.wrapEntries(entries)
}
// ListR lists the objects and directories recursively into out.
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
return f.Fs.Features().ListR(ctx, dir, func(baseEntries fs.DirEntries) error {
hashEntries, err := f.wrapEntries(baseEntries)
if err != nil {
return err
}
return callback(hashEntries)
})
}
// Purge a directory
func (f *Fs) Purge(ctx context.Context, dir string) error {
if do := f.Fs.Features().Purge; do != nil {
if err := do(ctx, dir); err != nil {
return err
}
err := f.db.Do(true, &kvPurge{
dir: path.Join(f.Fs.Root(), dir),
})
if err != nil {
fs.Errorf(f, "Failed to purge some hashes: %v", err)
}
return nil
}
return fs.ErrorCantPurge
}
// PutStream uploads to the remote path with undeterminate size.
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
if do := f.Fs.Features().PutStream; do != nil {
_ = f.pruneHash(src.Remote())
oResult, err := do(ctx, in, src, options...)
return f.wrapObject(oResult, err)
}
return nil, errors.New("PutStream not supported")
}
// PutUnchecked uploads the object, allowing duplicates.
func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
if do := f.Fs.Features().PutUnchecked; do != nil {
_ = f.pruneHash(src.Remote())
oResult, err := do(ctx, in, src, options...)
return f.wrapObject(oResult, err)
}
return nil, errors.New("PutUnchecked not supported")
}
// pruneHash deletes hash for a path
func (f *Fs) pruneHash(remote string) error {
return f.db.Do(true, &kvPrune{
key: path.Join(f.Fs.Root(), remote),
})
}
// CleanUp the trash in the Fs
func (f *Fs) CleanUp(ctx context.Context) error {
if do := f.Fs.Features().CleanUp; do != nil {
return do(ctx)
}
return errors.New("not supported by underlying remote")
}
// About gets quota information from the Fs
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
if do := f.Fs.Features().About; do != nil {
return do(ctx)
}
return nil, errors.New("not supported by underlying remote")
}
// ChangeNotify calls the passed function with a path that has had changes.
func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) {
if do := f.Fs.Features().ChangeNotify; do != nil {
do(ctx, notifyFunc, pollIntervalChan)
}
}
// UserInfo returns info about the connected user
func (f *Fs) UserInfo(ctx context.Context) (map[string]string, error) {
if do := f.Fs.Features().UserInfo; do != nil {
return do(ctx)
}
return nil, fs.ErrorNotImplemented
}
// Disconnect the current user
func (f *Fs) Disconnect(ctx context.Context) error {
if do := f.Fs.Features().Disconnect; do != nil {
return do(ctx)
}
return fs.ErrorNotImplemented
}
// MergeDirs merges the contents of all the directories passed
// in into the first one and rmdirs the other directories.
func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
if do := f.Fs.Features().MergeDirs; do != nil {
return do(ctx, dirs)
}
return errors.New("MergeDirs not supported")
}
// DirCacheFlush resets the directory cache - used in testing
// as an optional interface
func (f *Fs) DirCacheFlush() {
if do := f.Fs.Features().DirCacheFlush; do != nil {
do()
}
}
// PublicLink generates a public link to the remote path (usually readable by anyone)
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (string, error) {
if do := f.Fs.Features().PublicLink; do != nil {
return do(ctx, remote, expire, unlink)
}
return "", errors.New("PublicLink not supported")
}
// Copy src to this remote using server-side copy operations.
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
do := f.Fs.Features().Copy
if do == nil {
return nil, fs.ErrorCantCopy
}
o, ok := src.(*Object)
if !ok {
return nil, fs.ErrorCantCopy
}
oResult, err := do(ctx, o.Object, remote)
return f.wrapObject(oResult, err)
}
// Move src to this remote using server-side move operations.
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
do := f.Fs.Features().Move
if do == nil {
return nil, fs.ErrorCantMove
}
o, ok := src.(*Object)
if !ok {
return nil, fs.ErrorCantMove
}
oResult, err := do(ctx, o.Object, remote)
if err != nil {
return nil, err
}
_ = f.db.Do(true, &kvMove{
src: path.Join(f.Fs.Root(), src.Remote()),
dst: path.Join(f.Fs.Root(), remote),
dir: false,
fs: f,
})
return f.wrapObject(oResult, nil)
}
// DirMove moves src, srcRemote to this remote at dstRemote using server-side move operations.
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
do := f.Fs.Features().DirMove
if do == nil {
return fs.ErrorCantDirMove
}
srcFs, ok := src.(*Fs)
if !ok {
return fs.ErrorCantDirMove
}
err := do(ctx, srcFs.Fs, srcRemote, dstRemote)
if err == nil {
_ = f.db.Do(true, &kvMove{
src: path.Join(srcFs.Fs.Root(), srcRemote),
dst: path.Join(f.Fs.Root(), dstRemote),
dir: true,
fs: f,
})
}
return err
}
// Shutdown the backend, closing any background tasks and any cached connections.
func (f *Fs) Shutdown(ctx context.Context) (err error) {
err = f.db.Stop(false)
if do := f.Fs.Features().Shutdown; do != nil {
if err2 := do(ctx); err2 != nil {
err = err2
}
}
return
}
// NewObject finds the Object at remote.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
o, err := f.Fs.NewObject(ctx, remote)
return f.wrapObject(o, err)
}
//
// Object
//
// Object represents a composite file wrapping one or more data chunks
type Object struct {
fs.Object
f *Fs
}
// Wrap base object into hasher object
func (f *Fs) wrapObject(o fs.Object, err error) (obj fs.Object, outErr error) {
// log.Trace(o, "err=%v", err)("obj=%#v, outErr=%v", &obj, &outErr)
if err != nil {
return nil, err
}
if o == nil {
return nil, fs.ErrorObjectNotFound
}
return &Object{Object: o, f: f}, nil
}
// Fs returns read only access to the Fs that this object is part of
func (o *Object) Fs() fs.Info { return o.f }
// UnWrap returns the wrapped Object
func (o *Object) UnWrap() fs.Object { return o.Object }
// Return a string version
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.Object.String()
}
// ID returns the ID of the Object if possible
func (o *Object) ID() string {
if doer, ok := o.Object.(fs.IDer); ok {
return doer.ID()
}
return ""
}
// GetTier returns the Tier of the Object if possible
func (o *Object) GetTier() string {
if doer, ok := o.Object.(fs.GetTierer); ok {
return doer.GetTier()
}
return ""
}
// SetTier set the Tier of the Object if possible
func (o *Object) SetTier(tier string) error {
if doer, ok := o.Object.(fs.SetTierer); ok {
return doer.SetTier(tier)
}
return errors.New("SetTier not supported")
}
// MimeType of an Object if known, "" otherwise
func (o *Object) MimeType(ctx context.Context) string {
if doer, ok := o.Object.(fs.MimeTyper); ok {
return doer.MimeType(ctx)
}
return ""
}
// Metadata returns metadata for an object
//
// It should return nil if there is no Metadata
func (o *Object) Metadata(ctx context.Context) (fs.Metadata, error) {
do, ok := o.Object.(fs.Metadataer)
if !ok {
return nil, nil
}
return do.Metadata(ctx)
}
// Check the interfaces are satisfied
var (
_ fs.Fs = (*Fs)(nil)
_ fs.Purger = (*Fs)(nil)
_ fs.Copier = (*Fs)(nil)
_ fs.Mover = (*Fs)(nil)
_ fs.DirMover = (*Fs)(nil)
_ fs.Commander = (*Fs)(nil)
_ fs.PutUncheckeder = (*Fs)(nil)
_ fs.PutStreamer = (*Fs)(nil)
_ fs.CleanUpper = (*Fs)(nil)
_ fs.UnWrapper = (*Fs)(nil)
_ fs.ListRer = (*Fs)(nil)
_ fs.Abouter = (*Fs)(nil)
_ fs.Wrapper = (*Fs)(nil)
_ fs.MergeDirser = (*Fs)(nil)
_ fs.DirCacheFlusher = (*Fs)(nil)
_ fs.ChangeNotifier = (*Fs)(nil)
_ fs.PublicLinker = (*Fs)(nil)
_ fs.UserInfoer = (*Fs)(nil)
_ fs.Disconnecter = (*Fs)(nil)
_ fs.Shutdowner = (*Fs)(nil)
_ fs.FullObject = (*Object)(nil)
)

View File

@@ -0,0 +1,78 @@
package hasher
import (
"context"
"fmt"
"os"
"testing"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
"github.com/rclone/rclone/lib/kv"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func putFile(ctx context.Context, t *testing.T, f fs.Fs, name, data string) fs.Object {
mtime1 := fstest.Time("2001-02-03T04:05:06.499999999Z")
item := fstest.Item{Path: name, ModTime: mtime1}
o := fstests.PutTestContents(ctx, t, f, &item, data, true)
require.NotNil(t, o)
return o
}
func (f *Fs) testUploadFromCrypt(t *testing.T) {
// make a temporary local remote
tempRoot, err := fstest.LocalRemote()
require.NoError(t, err)
defer func() {
_ = os.RemoveAll(tempRoot)
}()
// make a temporary crypt remote
ctx := context.Background()
pass := obscure.MustObscure("crypt")
remote := fmt.Sprintf(`:crypt,remote="%s",password="%s":`, tempRoot, pass)
cryptFs, err := fs.NewFs(ctx, remote)
require.NoError(t, err)
// make a test file on the crypt remote
const dirName = "from_crypt_1"
const fileName = dirName + "/file_from_crypt_1"
const longTime = fs.ModTimeNotSupported
src := putFile(ctx, t, cryptFs, fileName, "doggy froggy")
// ensure that hash does not exist yet
_ = f.pruneHash(fileName)
hashType := f.keepHashes.GetOne()
hash, err := f.getRawHash(ctx, hashType, fileName, anyFingerprint, longTime)
assert.Error(t, err)
assert.Empty(t, hash)
// upload file to hasher
in, err := src.Open(ctx)
require.NoError(t, err)
dst, err := f.Put(ctx, in, src)
require.NoError(t, err)
assert.NotNil(t, dst)
// check that hash was created
hash, err = f.getRawHash(ctx, hashType, fileName, anyFingerprint, longTime)
assert.NoError(t, err)
assert.NotEmpty(t, hash)
//t.Logf("hash is %q", hash)
_ = operations.Purge(ctx, f, dirName)
}
// InternalTest dispatches all internal tests
func (f *Fs) InternalTest(t *testing.T) {
if !kv.Supported() {
t.Skip("hasher is not supported on this OS")
}
t.Run("UploadFromCrypt", f.testUploadFromCrypt)
}
var _ fstests.InternalTester = (*Fs)(nil)

View File

@@ -0,0 +1,39 @@
package hasher_test
import (
"os"
"path/filepath"
"testing"
"github.com/rclone/rclone/backend/hasher"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
"github.com/rclone/rclone/lib/kv"
_ "github.com/rclone/rclone/backend/all" // for integration tests
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
if !kv.Supported() {
t.Skip("hasher is not supported on this OS")
}
opt := fstests.Opt{
RemoteName: *fstest.RemoteName,
NilObject: (*hasher.Object)(nil),
UnimplementableFsMethods: []string{
"OpenWriterAt",
},
UnimplementableObjectMethods: []string{},
}
if *fstest.RemoteName == "" {
tempDir := filepath.Join(os.TempDir(), "rclone-hasher-test")
opt.ExtraConfig = []fstests.ExtraConfigItem{
{Name: "TestHasher", Key: "type", Value: "hasher"},
{Name: "TestHasher", Key: "remote", Value: tempDir},
}
opt.RemoteName = "TestHasher:"
opt.QuickTestOK = true
}
fstests.Run(t, &opt)
}

315
backend/hasher/kv.go Normal file
View File

@@ -0,0 +1,315 @@
package hasher
import (
"bytes"
"context"
"encoding/gob"
"errors"
"fmt"
"strings"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/lib/kv"
)
const (
timeFormat = "2006-01-02T15:04:05.000000000-0700"
anyFingerprint = "*"
)
type hashMap map[hash.Type]string
type hashRecord struct {
Fp string // fingerprint
Hashes operations.HashSums
Created time.Time
}
func (r *hashRecord) encode(key string) ([]byte, error) {
var buf bytes.Buffer
if err := gob.NewEncoder(&buf).Encode(r); err != nil {
fs.Debugf(key, "hasher encoding %v: %v", r, err)
return nil, err
}
return buf.Bytes(), nil
}
func (r *hashRecord) decode(key string, data []byte) error {
if err := gob.NewDecoder(bytes.NewBuffer(data)).Decode(r); err != nil {
fs.Debugf(key, "hasher decoding %q failed: %v", data, err)
return err
}
return nil
}
// kvPrune: prune a single hash
type kvPrune struct {
key string
}
func (op *kvPrune) Do(ctx context.Context, b kv.Bucket) error {
return b.Delete([]byte(op.key))
}
// kvPurge: delete a subtree
type kvPurge struct {
dir string
}
func (op *kvPurge) Do(ctx context.Context, b kv.Bucket) error {
dir := op.dir
if !strings.HasSuffix(dir, "/") {
dir += "/"
}
var items []string
cur := b.Cursor()
bkey, _ := cur.Seek([]byte(dir))
for bkey != nil {
key := string(bkey)
if !strings.HasPrefix(key, dir) {
break
}
items = append(items, key[len(dir):])
bkey, _ = cur.Next()
}
nerr := 0
for _, sub := range items {
if err := b.Delete([]byte(dir + sub)); err != nil {
nerr++
}
}
fs.Debugf(dir, "%d hashes purged, %d failed", len(items)-nerr, nerr)
return nil
}
// kvMove: assign hashes to new path
type kvMove struct {
src string
dst string
dir bool
fs *Fs
}
func (op *kvMove) Do(ctx context.Context, b kv.Bucket) error {
src, dst := op.src, op.dst
if !op.dir {
err := moveHash(b, src, dst)
fs.Debugf(op.fs, "moving cached hash %s to %s (err: %v)", src, dst, err)
return err
}
if !strings.HasSuffix(src, "/") {
src += "/"
}
if !strings.HasSuffix(dst, "/") {
dst += "/"
}
var items []string
cur := b.Cursor()
bkey, _ := cur.Seek([]byte(src))
for bkey != nil {
key := string(bkey)
if !strings.HasPrefix(key, src) {
break
}
items = append(items, key[len(src):])
bkey, _ = cur.Next()
}
nerr := 0
for _, suffix := range items {
srcKey, dstKey := src+suffix, dst+suffix
err := moveHash(b, srcKey, dstKey)
fs.Debugf(op.fs, "Rename cache record %s -> %s (err: %v)", srcKey, dstKey, err)
if err != nil {
nerr++
}
}
fs.Debugf(op.fs, "%d hashes moved, %d failed", len(items)-nerr, nerr)
return nil
}
func moveHash(b kv.Bucket, src, dst string) error {
data := b.Get([]byte(src))
err := b.Delete([]byte(src))
if err != nil || len(data) == 0 {
return err
}
return b.Put([]byte(dst), data)
}
// kvGet: get single hash from database
type kvGet struct {
key string
fp string
hash string
val string
age time.Duration
}
func (op *kvGet) Do(ctx context.Context, b kv.Bucket) error {
data := b.Get([]byte(op.key))
if len(data) == 0 {
return errors.New("no record")
}
var r hashRecord
if err := r.decode(op.key, data); err != nil {
return errors.New("invalid record")
}
if !(r.Fp == anyFingerprint || op.fp == anyFingerprint || r.Fp == op.fp) {
return errors.New("fingerprint changed")
}
if time.Since(r.Created) > op.age {
return errors.New("record timed out")
}
if r.Hashes != nil {
op.val = r.Hashes[op.hash]
}
return nil
}
// kvPut: set hashes for an object by key
type kvPut struct {
key string
fp string
hashes operations.HashSums
age time.Duration
}
func (op *kvPut) Do(ctx context.Context, b kv.Bucket) (err error) {
data := b.Get([]byte(op.key))
var r hashRecord
if len(data) > 0 {
err = r.decode(op.key, data)
if err != nil || r.Fp != op.fp || time.Since(r.Created) > op.age {
r.Hashes = nil
}
}
if len(r.Hashes) == 0 {
r.Created = time.Now()
r.Hashes = operations.HashSums{}
r.Fp = op.fp
}
for hashType, hashVal := range op.hashes {
r.Hashes[hashType] = hashVal
}
if data, err = r.encode(op.key); err != nil {
return fmt.Errorf("marshal failed: %w", err)
}
if err = b.Put([]byte(op.key), data); err != nil {
return fmt.Errorf("put failed: %w", err)
}
return err
}
// kvDump: dump the database.
// Note: long dump can cause concurrent operations to fail.
type kvDump struct {
full bool
root string
path string
fs *Fs
num int
total int
}
func (op *kvDump) Do(ctx context.Context, b kv.Bucket) error {
f, baseRoot, dbPath := op.fs, op.root, op.path
if op.full {
total := 0
num := 0
_ = b.ForEach(func(bkey, data []byte) error {
total++
key := string(bkey)
include := (baseRoot == "" || key == baseRoot || strings.HasPrefix(key, baseRoot+"/"))
var r hashRecord
if err := r.decode(key, data); err != nil {
fs.Errorf(nil, "%s: invalid record: %v", key, err)
return nil
}
fmt.Println(f.dumpLine(&r, key, include, nil))
if include {
num++
}
return nil
})
fs.Infof(dbPath, "%d records out of %d", num, total)
op.num, op.total = num, total // for unit tests
return nil
}
num := 0
cur := b.Cursor()
var bkey, data []byte
if baseRoot != "" {
bkey, data = cur.Seek([]byte(baseRoot))
} else {
bkey, data = cur.First()
}
for bkey != nil {
key := string(bkey)
if !(baseRoot == "" || key == baseRoot || strings.HasPrefix(key, baseRoot+"/")) {
break
}
var r hashRecord
if err := r.decode(key, data); err != nil {
fs.Errorf(nil, "%s: invalid record: %v", key, err)
continue
}
if key = strings.TrimPrefix(key[len(baseRoot):], "/"); key == "" {
key = "/"
}
fmt.Println(f.dumpLine(&r, key, true, nil))
num++
bkey, data = cur.Next()
}
fs.Infof(dbPath, "%d records", num)
op.num = num // for unit tests
return nil
}
func (f *Fs) dumpLine(r *hashRecord, path string, include bool, err error) string {
var status string
switch {
case !include:
status = "ext"
case err != nil:
status = "bad"
case r.Fp == anyFingerprint:
status = "stk"
default:
status = "ok "
}
var hashes []string
for _, hashType := range f.keepHashes.Array() {
hashName := hashType.String()
hashVal := r.Hashes[hashName]
if hashVal == "" || err != nil {
hashVal = "-"
}
hashVal = fmt.Sprintf("%-*s", hash.Width(hashType, false), hashVal)
hashes = append(hashes, hashName+":"+hashVal)
}
hashesStr := strings.Join(hashes, " ")
age := time.Since(r.Created).Round(time.Second)
if age > 24*time.Hour {
age = age.Round(time.Hour)
}
if err != nil {
age = 0
}
ageStr := age.String()
if strings.HasSuffix(ageStr, "h0m0s") {
ageStr = strings.TrimSuffix(ageStr, "0m0s")
}
return fmt.Sprintf("%s %s %9s %s", status, hashesStr, ageStr, path)
}

304
backend/hasher/object.go Normal file
View File

@@ -0,0 +1,304 @@
package hasher
import (
"context"
"errors"
"fmt"
"io"
"path"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/operations"
)
// obtain hash for an object
func (o *Object) getHash(ctx context.Context, hashType hash.Type) (string, error) {
maxAge := time.Duration(o.f.opt.MaxAge)
if maxAge <= 0 {
return "", nil
}
fp := o.fingerprint(ctx)
if fp == "" {
return "", errors.New("fingerprint failed")
}
return o.f.getRawHash(ctx, hashType, o.Remote(), fp, maxAge)
}
// obtain hash for a path
func (f *Fs) getRawHash(ctx context.Context, hashType hash.Type, remote, fp string, age time.Duration) (string, error) {
key := path.Join(f.Fs.Root(), remote)
op := &kvGet{
key: key,
fp: fp,
hash: hashType.String(),
age: age,
}
err := f.db.Do(false, op)
return op.val, err
}
// put new hashes for an object
func (o *Object) putHashes(ctx context.Context, rawHashes hashMap) error {
if o.f.opt.MaxAge <= 0 {
return nil
}
fp := o.fingerprint(ctx)
if fp == "" {
return nil
}
key := path.Join(o.f.Fs.Root(), o.Remote())
hashes := operations.HashSums{}
for hashType, hashVal := range rawHashes {
hashes[hashType.String()] = hashVal
}
return o.f.putRawHashes(ctx, key, fp, hashes)
}
// set hashes for a path without any validation
func (f *Fs) putRawHashes(ctx context.Context, key, fp string, hashes operations.HashSums) error {
return f.db.Do(true, &kvPut{
key: key,
fp: fp,
hashes: hashes,
age: time.Duration(f.opt.MaxAge),
})
}
// Hash returns the selected checksum of the file or "" if unavailable.
func (o *Object) Hash(ctx context.Context, hashType hash.Type) (hashVal string, err error) {
f := o.f
if f.passHashes.Contains(hashType) {
fs.Debugf(o, "pass %s", hashType)
return o.Object.Hash(ctx, hashType)
}
if !f.suppHashes.Contains(hashType) {
fs.Debugf(o, "unsupp %s", hashType)
return "", hash.ErrUnsupported
}
if hashVal, err = o.getHash(ctx, hashType); err != nil {
fs.Debugf(o, "getHash: %v", err)
err = nil
hashVal = ""
}
if hashVal != "" {
fs.Debugf(o, "cached %s = %q", hashType, hashVal)
return hashVal, nil
}
if f.slowHashes.Contains(hashType) {
fs.Debugf(o, "slow %s", hashType)
hashVal, err = o.Object.Hash(ctx, hashType)
if err == nil && hashVal != "" && f.keepHashes.Contains(hashType) {
if err = o.putHashes(ctx, hashMap{hashType: hashVal}); err != nil {
fs.Debugf(o, "putHashes: %v", err)
err = nil
}
}
return hashVal, err
}
if f.autoHashes.Contains(hashType) && o.Size() < int64(f.opt.AutoSize) {
_ = o.updateHashes(ctx)
if hashVal, err = o.getHash(ctx, hashType); err != nil {
fs.Debugf(o, "auto %s = %q (%v)", hashType, hashVal, err)
err = nil
}
}
return hashVal, err
}
// updateHashes performs implicit "rclone hashsum --download" and updates cache.
func (o *Object) updateHashes(ctx context.Context) error {
r, err := o.Open(ctx)
if err != nil {
fs.Infof(o, "update failed (open): %v", err)
return err
}
defer func() {
_ = r.Close()
}()
if _, err = io.Copy(io.Discard, r); err != nil {
fs.Infof(o, "update failed (copy): %v", err)
return err
}
return nil
}
// Update the object with the given data, time and size.
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
_ = o.f.pruneHash(src.Remote())
return o.Object.Update(ctx, in, src, options...)
}
// Remove an object.
func (o *Object) Remove(ctx context.Context) error {
_ = o.f.pruneHash(o.Remote())
return o.Object.Remove(ctx)
}
// SetModTime sets the modification time of the file.
// Also prunes the cache entry when modtime changes so that
// touching a file will trigger checksum recalculation even
// on backends that don't provide modTime with fingerprint.
func (o *Object) SetModTime(ctx context.Context, mtime time.Time) error {
if mtime != o.Object.ModTime(ctx) {
_ = o.f.pruneHash(o.Remote())
}
return o.Object.SetModTime(ctx, mtime)
}
// Open opens the file for read.
// Full reads will also update object hashes.
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (r io.ReadCloser, err error) {
size := o.Size()
var offset, limit int64 = 0, -1
for _, option := range options {
switch opt := option.(type) {
case *fs.SeekOption:
offset = opt.Offset
case *fs.RangeOption:
offset, limit = opt.Decode(size)
}
}
if offset < 0 {
return nil, errors.New("invalid offset")
}
if limit < 0 {
limit = size - offset
}
if r, err = o.Object.Open(ctx, options...); err != nil {
return nil, err
}
if offset != 0 || limit < size {
// It's a partial read
return r, err
}
return o.f.newHashingReader(ctx, r, func(sums hashMap) {
if err := o.putHashes(ctx, sums); err != nil {
fs.Infof(o, "auto hashing error: %v", err)
}
})
}
// Put data into the remote path with given modTime and size
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
var (
o fs.Object
common hash.Set
rehash bool
hashes hashMap
)
if fsrc := src.Fs(); fsrc != nil {
common = fsrc.Hashes().Overlap(f.keepHashes)
// Rehash if source does not have all required hashes or hashing is slow
rehash = fsrc.Features().SlowHash || common != f.keepHashes
}
wrapIn := in
if rehash {
r, err := f.newHashingReader(ctx, in, func(sums hashMap) {
hashes = sums
})
fs.Debugf(src, "Rehash in-fly due to incomplete or slow source set %v (err: %v)", common, err)
if err == nil {
wrapIn = r
} else {
rehash = false
}
}
_ = f.pruneHash(src.Remote())
oResult, err := f.Fs.Put(ctx, wrapIn, src, options...)
o, err = f.wrapObject(oResult, err)
if err != nil {
return nil, err
}
if !rehash {
hashes = hashMap{}
for _, ht := range common.Array() {
if h, e := src.Hash(ctx, ht); e == nil && h != "" {
hashes[ht] = h
}
}
}
if len(hashes) > 0 {
err := o.(*Object).putHashes(ctx, hashes)
fs.Debugf(o, "Applied %d source hashes, err: %v", len(hashes), err)
}
return o, err
}
type hashingReader struct {
rd io.Reader
hasher *hash.MultiHasher
fun func(hashMap)
}
func (f *Fs) newHashingReader(ctx context.Context, rd io.Reader, fun func(hashMap)) (*hashingReader, error) {
hasher, err := hash.NewMultiHasherTypes(f.keepHashes)
if err != nil {
return nil, err
}
hr := &hashingReader{
rd: rd,
hasher: hasher,
fun: fun,
}
return hr, nil
}
func (r *hashingReader) Read(p []byte) (n int, err error) {
n, err = r.rd.Read(p)
if err != nil && err != io.EOF {
r.hasher = nil
}
if r.hasher != nil {
if _, errHash := r.hasher.Write(p[:n]); errHash != nil {
r.hasher = nil
err = errHash
}
}
if err == io.EOF && r.hasher != nil {
r.fun(r.hasher.Sums())
r.hasher = nil
}
return
}
func (r *hashingReader) Close() error {
if rc, ok := r.rd.(io.ReadCloser); ok {
return rc.Close()
}
return nil
}
// Return object fingerprint or empty string in case of errors
//
// Note that we can't use the generic `fs.Fingerprint` here because
// this fingerprint is used to pick _derived hashes_ that are slow
// to calculate or completely unsupported by the base remote.
//
// The hasher fingerprint must be based on `fsHash`, the first _fast_
// hash supported _by the underlying remote_ (if there is one),
// while `fs.Fingerprint` would select a hash _produced by hasher_
// creating unresolvable fingerprint loop.
func (o *Object) fingerprint(ctx context.Context) string {
size := o.Object.Size()
timeStr := "-"
if o.f.fpTime {
timeStr = o.Object.ModTime(ctx).UTC().Format(timeFormat)
if timeStr == "" {
return ""
}
}
hashStr := "-"
if o.f.fpHash != hash.None {
var err error
hashStr, err = o.Object.Hash(ctx, o.f.fpHash)
if hashStr == "" || err != nil {
return ""
}
}
return fmt.Sprintf("%d,%s,%s", size, timeStr, hashStr)
}

View File

@@ -1,3 +1,4 @@
//go:build !plan9
// +build !plan9
package hdfs
@@ -91,7 +92,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
if opt.ServicePrincipalName != "" {
options.KerberosClient, err = getKerberosClient()
if err != nil {
return nil, fmt.Errorf("Problem with kerberos authentication: %s", err)
return nil, fmt.Errorf("problem with kerberos authentication: %w", err)
}
options.KerberosServicePrincipleName = opt.ServicePrincipalName
@@ -262,6 +263,98 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
return f.client.RemoveAll(realpath)
}
// Move src to this remote using server-side move operations.
//
// This is stored with the remote path given.
//
// It returns the destination Object and a possible error.
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantMove
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object)
if !ok {
fs.Debugf(src, "Can't move - not same remote type")
return nil, fs.ErrorCantMove
}
// Get the real paths from the remote specs:
sourcePath := srcObj.fs.realpath(srcObj.remote)
targetPath := f.realpath(remote)
fs.Debugf(f, "rename [%s] to [%s]", sourcePath, targetPath)
// Make sure the target folder exists:
dirname := path.Dir(targetPath)
err := f.client.MkdirAll(dirname, 0755)
if err != nil {
return nil, err
}
// Do the move
// Note that the underlying HDFS library hard-codes Overwrite=True, but this is expected rclone behaviour.
err = f.client.Rename(sourcePath, targetPath)
if err != nil {
return nil, err
}
// Look up the resulting object
info, err := f.client.Stat(targetPath)
if err != nil {
return nil, err
}
// And return it:
return &Object{
fs: f,
remote: remote,
size: info.Size(),
modTime: info.ModTime(),
}, nil
}
// DirMove moves src, srcRemote to this remote at dstRemote
// using server-side move operations.
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantDirMove
//
// If destination exists then return fs.ErrorDirExists
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) (err error) {
srcFs, ok := src.(*Fs)
if !ok {
return fs.ErrorCantDirMove
}
// Get the real paths from the remote specs:
sourcePath := srcFs.realpath(srcRemote)
targetPath := f.realpath(dstRemote)
fs.Debugf(f, "rename [%s] to [%s]", sourcePath, targetPath)
// Check if the destination exists:
info, err := f.client.Stat(targetPath)
if err == nil {
fs.Debugf(f, "target directory already exits, IsDir = [%t]", info.IsDir())
return fs.ErrorDirExists
}
// Make sure the targets parent folder exists:
dirname := path.Dir(targetPath)
err = f.client.MkdirAll(dirname, 0755)
if err != nil {
return err
}
// Do the move
err = f.client.Rename(sourcePath, targetPath)
if err != nil {
return err
}
return nil
}
// About gets quota information from the Fs
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
info, err := f.client.StatFs()
@@ -317,4 +410,6 @@ var (
_ fs.Purger = (*Fs)(nil)
_ fs.PutStreamer = (*Fs)(nil)
_ fs.Abouter = (*Fs)(nil)
_ fs.Mover = (*Fs)(nil)
_ fs.DirMover = (*Fs)(nil)
)

View File

@@ -1,5 +1,7 @@
//go:build !plan9
// +build !plan9
// Package hdfs provides an interface to the HDFS storage system.
package hdfs
import (
@@ -18,41 +20,31 @@ func init() {
NewFs: NewFs,
Options: []fs.Option{{
Name: "namenode",
Help: "hadoop name node and port",
Help: "Hadoop name node and port.\n\nE.g. \"namenode:8020\" to connect to host namenode at port 8020.",
Required: true,
Examples: []fs.OptionExample{{
Value: "namenode:8020",
Help: "Connect to host namenode at port 8020",
}},
}, {
Name: "username",
Help: "hadoop user name",
Required: false,
Name: "username",
Help: "Hadoop user name.",
Examples: []fs.OptionExample{{
Value: "root",
Help: "Connect to hdfs as root",
Help: "Connect to hdfs as root.",
}},
}, {
Name: "service_principal_name",
Help: `Kerberos service principal name for the namenode
Help: `Kerberos service principal name for the namenode.
Enables KERBEROS authentication. Specifies the Service Principal Name
(<SERVICE>/<FQDN>) for the namenode.`,
Required: false,
Examples: []fs.OptionExample{{
Value: "hdfs/namenode.hadoop.docker",
Help: "Namenode running as service 'hdfs' with FQDN 'namenode.hadoop.docker'.",
}},
(SERVICE/FQDN) for the namenode. E.g. \"hdfs/namenode.hadoop.docker\"
for namenode running as service 'hdfs' with FQDN 'namenode.hadoop.docker'.`,
Advanced: true,
}, {
Name: "data_transfer_protection",
Help: `Kerberos data transfer protection: authentication|integrity|privacy
Help: `Kerberos data transfer protection: authentication|integrity|privacy.
Specifies whether or not authentication, data signature integrity
checks, and wire encryption is required when communicating the the
datanodes. Possible values are 'authentication', 'integrity' and
'privacy'. Used only with KERBEROS enabled.`,
Required: false,
Examples: []fs.OptionExample{{
Value: "privacy",
Help: "Ensure authentication, integrity and encryption enabled.",

View File

@@ -1,5 +1,6 @@
// Test HDFS filesystem interface
//go:build !plan9
// +build !plan9
package hdfs_test

View File

@@ -1,6 +1,7 @@
// Build for hdfs for unsupported platforms to stop go complaining
// about "no buildable Go source files "
//go:build plan9
// +build plan9
package hdfs

View File

@@ -1,3 +1,4 @@
//go:build !plan9
// +build !plan9
package hdfs
@@ -114,7 +115,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return err
}
info, err := o.fs.client.Stat(realpath)
_, err = o.fs.client.Stat(realpath)
if err == nil {
err = o.fs.client.Remove(realpath)
if err != nil {
@@ -146,7 +147,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return err
}
info, err = o.fs.client.Stat(realpath)
info, err := o.fs.client.Stat(realpath)
if err != nil {
return err
}

View File

@@ -0,0 +1,81 @@
package api
import (
"encoding/json"
"net/url"
"path"
"strings"
"time"
)
// Some presets for different amounts of information that can be requested for fields;
// it is recommended to only request the information that is actually needed.
var (
HiDriveObjectNoMetadataFields = []string{"name", "type"}
HiDriveObjectWithMetadataFields = append(HiDriveObjectNoMetadataFields, "id", "size", "mtime", "chash")
HiDriveObjectWithDirectoryMetadataFields = append(HiDriveObjectWithMetadataFields, "nmembers")
DirectoryContentFields = []string{"nmembers"}
)
// QueryParameters represents the parameters passed to an API-call.
type QueryParameters struct {
url.Values
}
// NewQueryParameters initializes an instance of QueryParameters and
// returns a pointer to it.
func NewQueryParameters() *QueryParameters {
return &QueryParameters{url.Values{}}
}
// SetFileInDirectory sets the appropriate parameters
// to specify a path to a file in a directory.
// This is used by requests that work with paths for files that do not exist yet.
// (For example when creating a file).
// Most requests use the format produced by SetPath(...).
func (p *QueryParameters) SetFileInDirectory(filePath string) {
directory, file := path.Split(path.Clean(filePath))
p.Set("dir", path.Clean(directory))
p.Set("name", file)
// NOTE: It would be possible to switch to pid-based requests
// by modifying this function.
}
// SetPath sets the appropriate parameters to access the given path.
func (p *QueryParameters) SetPath(objectPath string) {
p.Set("path", path.Clean(objectPath))
// NOTE: It would be possible to switch to pid-based requests
// by modifying this function.
}
// SetTime sets the key to the time-value. It replaces any existing values.
func (p *QueryParameters) SetTime(key string, value time.Time) error {
valueAPI := Time(value)
valueBytes, err := json.Marshal(&valueAPI)
if err != nil {
return err
}
p.Set(key, string(valueBytes))
return nil
}
// AddList adds the given values as a list
// with each value separated by the separator.
// It appends to any existing values associated with key.
func (p *QueryParameters) AddList(key string, separator string, values ...string) {
original := p.Get(key)
p.Set(key, strings.Join(values, separator))
if original != "" {
p.Set(key, original+separator+p.Get(key))
}
}
// AddFields sets the appropriate parameter to access the given fields.
// The given fields will be appended to any other existing fields.
func (p *QueryParameters) AddFields(prefix string, fields ...string) {
modifiedFields := make([]string, len(fields))
for i, field := range fields {
modifiedFields[i] = prefix + field
}
p.AddList("fields", ",", modifiedFields...)
}

View File

@@ -0,0 +1,135 @@
// Package api has type definitions and code related to API-calls for the HiDrive-API.
package api
import (
"encoding/json"
"fmt"
"net/url"
"strconv"
"time"
)
// Time represents date and time information for the API.
type Time time.Time
// MarshalJSON turns Time into JSON (in Unix-time/UTC).
func (t *Time) MarshalJSON() ([]byte, error) {
secs := time.Time(*t).Unix()
return []byte(strconv.FormatInt(secs, 10)), nil
}
// UnmarshalJSON turns JSON into Time.
func (t *Time) UnmarshalJSON(data []byte) error {
secs, err := strconv.ParseInt(string(data), 10, 64)
if err != nil {
return err
}
*t = Time(time.Unix(secs, 0))
return nil
}
// Error is returned from the API when things go wrong.
type Error struct {
Code json.Number `json:"code"`
ContextInfo json.RawMessage
Message string `json:"msg"`
}
// Error returns a string for the error and satisfies the error interface.
func (e *Error) Error() string {
out := fmt.Sprintf("Error %q", e.Code.String())
if e.Message != "" {
out += ": " + e.Message
}
if e.ContextInfo != nil {
out += fmt.Sprintf(" (%+v)", e.ContextInfo)
}
return out
}
// Check Error satisfies the error interface.
var _ error = (*Error)(nil)
// possible types for HiDriveObject
const (
HiDriveObjectTypeDirectory = "dir"
HiDriveObjectTypeFile = "file"
HiDriveObjectTypeSymlink = "symlink"
)
// HiDriveObject describes a folder, a symlink or a file.
// Depending on the type and content, not all fields are present.
type HiDriveObject struct {
Type string `json:"type"`
ID string `json:"id"`
ParentID string `json:"parent_id"`
Name string `json:"name"`
Path string `json:"path"`
Size int64 `json:"size"`
MemberCount int64 `json:"nmembers"`
ModifiedAt Time `json:"mtime"`
ChangedAt Time `json:"ctime"`
MetaHash string `json:"mhash"`
MetaOnlyHash string `json:"mohash"`
NameHash string `json:"nhash"`
ContentHash string `json:"chash"`
IsTeamfolder bool `json:"teamfolder"`
Readable bool `json:"readable"`
Writable bool `json:"writable"`
Shareable bool `json:"shareable"`
MIMEType string `json:"mime_type"`
}
// ModTime returns the modification time of the HiDriveObject.
func (i *HiDriveObject) ModTime() time.Time {
t := time.Time(i.ModifiedAt)
if t.IsZero() {
t = time.Time(i.ChangedAt)
}
return t
}
// UnmarshalJSON turns JSON into HiDriveObject and
// introduces specific default-values where necessary.
func (i *HiDriveObject) UnmarshalJSON(data []byte) error {
type objectAlias HiDriveObject
defaultObject := objectAlias{
Size: -1,
MemberCount: -1,
}
err := json.Unmarshal(data, &defaultObject)
if err != nil {
return err
}
name, err := url.PathUnescape(defaultObject.Name)
if err == nil {
defaultObject.Name = name
}
*i = HiDriveObject(defaultObject)
return nil
}
// DirectoryContent describes the content of a directory.
type DirectoryContent struct {
TotalCount int64 `json:"nmembers"`
Entries []HiDriveObject `json:"members"`
}
// UnmarshalJSON turns JSON into DirectoryContent and
// introduces specific default-values where necessary.
func (d *DirectoryContent) UnmarshalJSON(data []byte) error {
type directoryContentAlias DirectoryContent
defaultDirectoryContent := directoryContentAlias{
TotalCount: -1,
}
err := json.Unmarshal(data, &defaultDirectoryContent)
if err != nil {
return err
}
*d = DirectoryContent(defaultDirectoryContent)
return nil
}

888
backend/hidrive/helpers.go Normal file
View File

@@ -0,0 +1,888 @@
package hidrive
// This file is for helper-functions which may provide more general and
// specialized functionality than the generic interfaces.
// There are two sections:
// 1. methods bound to Fs
// 2. other functions independent from Fs used throughout the package
// NOTE: Functions accessing paths expect any relative paths
// to be resolved prior to execution with resolvePath(...).
import (
"bytes"
"context"
"errors"
"io"
"net/http"
"path"
"strconv"
"sync"
"time"
"github.com/rclone/rclone/backend/hidrive/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/lib/ranges"
"github.com/rclone/rclone/lib/readers"
"github.com/rclone/rclone/lib/rest"
"golang.org/x/sync/errgroup"
"golang.org/x/sync/semaphore"
)
const (
// MaximumUploadBytes represents the maximum amount of bytes
// a single upload-operation will support.
MaximumUploadBytes = 2147483647 // = 2GiB - 1
// iterationChunkSize represents the chunk size used to iterate directory contents.
iterationChunkSize = 5000
)
var (
// retryErrorCodes is a slice of error codes that we will always retry.
retryErrorCodes = []int{
429, // Too Many Requests
500, // Internal Server Error
502, // Bad Gateway
503, // Service Unavailable
504, // Gateway Timeout
509, // Bandwidth Limit Exceeded
}
// ErrorFileExists is returned when a query tries to create a file
// that already exists.
ErrorFileExists = errors.New("destination file already exists")
)
// MemberType represents the possible types of entries a directory can contain.
type MemberType string
// possible values for MemberType
const (
AllMembers MemberType = "all"
NoMembers MemberType = "none"
DirectoryMembers MemberType = api.HiDriveObjectTypeDirectory
FileMembers MemberType = api.HiDriveObjectTypeFile
SymlinkMembers MemberType = api.HiDriveObjectTypeSymlink
)
// SortByField represents possible fields to sort entries of a directory by.
type SortByField string
// possible values for SortByField
const (
descendingSort string = "-"
SortByName SortByField = "name"
SortByModTime SortByField = "mtime"
SortByObjectType SortByField = "type"
SortBySize SortByField = "size"
SortByNameDescending SortByField = SortByField(descendingSort) + SortByName
SortByModTimeDescending SortByField = SortByField(descendingSort) + SortByModTime
SortByObjectTypeDescending SortByField = SortByField(descendingSort) + SortByObjectType
SortBySizeDescending SortByField = SortByField(descendingSort) + SortBySize
)
var (
// Unsorted disables sorting and can therefore not be combined with other values.
Unsorted = []SortByField{"none"}
// DefaultSorted does not specify how to sort and
// therefore implies the default sort order.
DefaultSorted = []SortByField{}
)
// CopyOrMoveOperationType represents the possible types of copy- and move-operations.
type CopyOrMoveOperationType int
// possible values for CopyOrMoveOperationType
const (
MoveOriginal CopyOrMoveOperationType = iota
CopyOriginal
CopyOriginalPreserveModTime
)
// OnExistAction represents possible actions the API should take,
// when a request tries to create a path that already exists.
type OnExistAction string
// possible values for OnExistAction
const (
// IgnoreOnExist instructs the API not to execute
// the request in case of a conflict, but to return an error.
IgnoreOnExist OnExistAction = "ignore"
// AutoNameOnExist instructs the API to automatically rename
// any conflicting request-objects.
AutoNameOnExist OnExistAction = "autoname"
// OverwriteOnExist instructs the API to overwrite any conflicting files.
// This can only be used, if the request operates on files directly.
// (For example when moving/copying a file.)
// For most requests this action will simply be ignored.
OverwriteOnExist OnExistAction = "overwrite"
)
// shouldRetry returns a boolean as to whether this resp and err deserve to be retried.
// It tries to expire/invalidate the token, if necessary.
// It returns the err as a convenience.
func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
if fserrors.ContextError(ctx, &err) {
return false, err
}
if resp != nil && (resp.StatusCode == 401 || isHTTPError(err, 401)) && len(resp.Header["Www-Authenticate"]) > 0 {
fs.Debugf(f, "Token might be invalid: %v", err)
if f.tokenRenewer != nil {
iErr := f.tokenRenewer.Expire()
if iErr == nil {
return true, err
}
}
}
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
}
// resolvePath resolves the given (relative) path and
// returns a path suitable for API-calls.
// This will consider the root-path of the fs and any needed prefixes.
//
// Any relative paths passed to functions that access these paths should
// be resolved with this first!
func (f *Fs) resolvePath(objectPath string) string {
resolved := path.Join(f.opt.RootPrefix, f.root, f.opt.Enc.FromStandardPath(objectPath))
return resolved
}
// iterateOverDirectory calls the given function callback
// on each item found in a given directory.
//
// If callback ever returns true then this exits early with found = true.
func (f *Fs) iterateOverDirectory(ctx context.Context, directory string, searchOnly MemberType, callback func(*api.HiDriveObject) bool, fields []string, sortBy []SortByField) (found bool, err error) {
parameters := api.NewQueryParameters()
parameters.SetPath(directory)
parameters.AddFields("members.", fields...)
parameters.AddFields("", api.DirectoryContentFields...)
parameters.Set("members", string(searchOnly))
for _, v := range sortBy {
// The explicit conversion is necessary for each element.
parameters.AddList("sort", ",", string(v))
}
opts := rest.Opts{
Method: "GET",
Path: "/dir",
Parameters: parameters.Values,
}
iterateContent := func(result *api.DirectoryContent, err error) (bool, error) {
if err != nil {
return false, err
}
for _, item := range result.Entries {
item.Name = f.opt.Enc.ToStandardName(item.Name)
if callback(&item) {
return true, nil
}
}
return false, nil
}
return f.paginateDirectoryAccess(ctx, &opts, iterationChunkSize, 0, iterateContent)
}
// paginateDirectoryAccess executes requests specified via ctx and opts
// which should produce api.DirectoryContent.
// This will paginate the requests using limit starting at the given offset.
//
// The given function callback is called on each api.DirectoryContent found
// along with any errors that occurred.
// If callback ever returns true then this exits early with found = true.
// If callback ever returns an error then this exits early with that error.
func (f *Fs) paginateDirectoryAccess(ctx context.Context, opts *rest.Opts, limit int64, offset int64, callback func(*api.DirectoryContent, error) (bool, error)) (found bool, err error) {
for {
opts.Parameters.Set("limit", strconv.FormatInt(offset, 10)+","+strconv.FormatInt(limit, 10))
var result api.DirectoryContent
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, opts, nil, &result)
return f.shouldRetry(ctx, resp, err)
})
found, err = callback(&result, err)
if found || err != nil {
return found, err
}
offset += int64(len(result.Entries))
if offset >= result.TotalCount || limit > int64(len(result.Entries)) {
break
}
}
return false, nil
}
// fetchMetadataForPath reads the metadata from the path.
func (f *Fs) fetchMetadataForPath(ctx context.Context, path string, fields []string) (*api.HiDriveObject, error) {
parameters := api.NewQueryParameters()
parameters.SetPath(path)
parameters.AddFields("", fields...)
opts := rest.Opts{
Method: "GET",
Path: "/meta",
Parameters: parameters.Values,
}
var result api.HiDriveObject
var resp *http.Response
var err error
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
return f.shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, err
}
return &result, nil
}
// copyOrMove copies or moves a directory or file
// from the source-path to the destination-path.
//
// The operation will only be successful
// if the parent-directory of the destination-path exists.
//
// NOTE: Use the explicit methods instead of directly invoking this method.
// (Those are: copyDirectory, moveDirectory, copyFile, moveFile.)
func (f *Fs) copyOrMove(ctx context.Context, isDirectory bool, operationType CopyOrMoveOperationType, source string, destination string, onExist OnExistAction) (*api.HiDriveObject, error) {
parameters := api.NewQueryParameters()
parameters.Set("src", source)
parameters.Set("dst", destination)
if onExist == AutoNameOnExist ||
(onExist == OverwriteOnExist && !isDirectory) {
parameters.Set("on_exist", string(onExist))
}
endpoint := "/"
if isDirectory {
endpoint += "dir"
} else {
endpoint += "file"
}
switch operationType {
case MoveOriginal:
endpoint += "/move"
case CopyOriginalPreserveModTime:
parameters.Set("preserve_mtime", strconv.FormatBool(true))
fallthrough
case CopyOriginal:
endpoint += "/copy"
}
opts := rest.Opts{
Method: "POST",
Path: endpoint,
Parameters: parameters.Values,
}
var result api.HiDriveObject
var resp *http.Response
var err error
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
return f.shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, err
}
return &result, nil
}
// copyDirectory moves the directory at the source-path to the destination-path and
// returns the resulting api-object if successful.
//
// The operation will only be successful
// if the parent-directory of the destination-path exists.
func (f *Fs) copyDirectory(ctx context.Context, source string, destination string, onExist OnExistAction) (*api.HiDriveObject, error) {
return f.copyOrMove(ctx, true, CopyOriginalPreserveModTime, source, destination, onExist)
}
// moveDirectory moves the directory at the source-path to the destination-path and
// returns the resulting api-object if successful.
//
// The operation will only be successful
// if the parent-directory of the destination-path exists.
func (f *Fs) moveDirectory(ctx context.Context, source string, destination string, onExist OnExistAction) (*api.HiDriveObject, error) {
return f.copyOrMove(ctx, true, MoveOriginal, source, destination, onExist)
}
// copyFile copies the file at the source-path to the destination-path and
// returns the resulting api-object if successful.
//
// The operation will only be successful
// if the parent-directory of the destination-path exists.
//
// NOTE: This operation will expand sparse areas in the content of the source-file
// to blocks of 0-bytes in the destination-file.
func (f *Fs) copyFile(ctx context.Context, source string, destination string, onExist OnExistAction) (*api.HiDriveObject, error) {
return f.copyOrMove(ctx, false, CopyOriginalPreserveModTime, source, destination, onExist)
}
// moveFile moves the file at the source-path to the destination-path and
// returns the resulting api-object if successful.
//
// The operation will only be successful
// if the parent-directory of the destination-path exists.
//
// NOTE: This operation may expand sparse areas in the content of the source-file
// to blocks of 0-bytes in the destination-file.
func (f *Fs) moveFile(ctx context.Context, source string, destination string, onExist OnExistAction) (*api.HiDriveObject, error) {
return f.copyOrMove(ctx, false, MoveOriginal, source, destination, onExist)
}
// createDirectory creates the directory at the given path and
// returns the resulting api-object if successful.
//
// The directory will only be created if its parent-directory exists.
// This returns fs.ErrorDirNotFound if the parent-directory is not found.
// This returns fs.ErrorDirExists if the directory already exists.
func (f *Fs) createDirectory(ctx context.Context, directory string, onExist OnExistAction) (*api.HiDriveObject, error) {
parameters := api.NewQueryParameters()
parameters.SetPath(directory)
if onExist == AutoNameOnExist {
parameters.Set("on_exist", string(onExist))
}
opts := rest.Opts{
Method: "POST",
Path: "/dir",
Parameters: parameters.Values,
}
var result api.HiDriveObject
var resp *http.Response
var err error
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
return f.shouldRetry(ctx, resp, err)
})
switch {
case err == nil:
return &result, nil
case isHTTPError(err, 404):
return nil, fs.ErrorDirNotFound
case isHTTPError(err, 409):
return nil, fs.ErrorDirExists
}
return nil, err
}
// createDirectories creates the directory at the given path
// along with any missing parent directories and
// returns the resulting api-object (of the created directory) if successful.
//
// This returns fs.ErrorDirExists if the directory already exists.
//
// If an error occurs while the parent directories are being created,
// any directories already created will NOT be deleted again.
func (f *Fs) createDirectories(ctx context.Context, directory string, onExist OnExistAction) (*api.HiDriveObject, error) {
result, err := f.createDirectory(ctx, directory, onExist)
if err == nil {
return result, nil
}
if err != fs.ErrorDirNotFound {
return nil, err
}
parentDirectory := path.Dir(directory)
_, err = f.createDirectories(ctx, parentDirectory, onExist)
if err != nil && err != fs.ErrorDirExists {
return nil, err
}
// NOTE: Ignoring fs.ErrorDirExists does no harm,
// since it does not mean the child directory cannot be created.
return f.createDirectory(ctx, directory, onExist)
}
// deleteDirectory deletes the directory at the given path.
//
// If recursive is false, the directory will only be deleted if it is empty.
// If recursive is true, the directory will be deleted regardless of its content.
// This returns fs.ErrorDirNotFound if the directory is not found.
// This returns fs.ErrorDirectoryNotEmpty if the directory is not empty and
// recursive is false.
func (f *Fs) deleteDirectory(ctx context.Context, directory string, recursive bool) error {
parameters := api.NewQueryParameters()
parameters.SetPath(directory)
parameters.Set("recursive", strconv.FormatBool(recursive))
opts := rest.Opts{
Method: "DELETE",
Path: "/dir",
Parameters: parameters.Values,
NoResponse: true,
}
var resp *http.Response
var err error
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.Call(ctx, &opts)
return f.shouldRetry(ctx, resp, err)
})
switch {
case isHTTPError(err, 404):
return fs.ErrorDirNotFound
case isHTTPError(err, 409):
return fs.ErrorDirectoryNotEmpty
}
return err
}
// deleteObject deletes the object/file at the given path.
//
// This returns fs.ErrorObjectNotFound if the object is not found.
func (f *Fs) deleteObject(ctx context.Context, path string) error {
parameters := api.NewQueryParameters()
parameters.SetPath(path)
opts := rest.Opts{
Method: "DELETE",
Path: "/file",
Parameters: parameters.Values,
NoResponse: true,
}
var resp *http.Response
var err error
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.Call(ctx, &opts)
return f.shouldRetry(ctx, resp, err)
})
if isHTTPError(err, 404) {
return fs.ErrorObjectNotFound
}
return err
}
// createFile creates a file at the given path
// with the content of the io.ReadSeeker.
// This guarantees that existing files will not be overwritten.
// The maximum size of the content is limited by MaximumUploadBytes.
// The io.ReadSeeker should be resettable by seeking to its start.
// If modTime is not the zero time instant,
// it will be set as the file's modification time after the operation.
//
// This returns fs.ErrorDirNotFound
// if the parent directory of the file is not found.
// This returns ErrorFileExists if a file already exists at the specified path.
func (f *Fs) createFile(ctx context.Context, path string, content io.ReadSeeker, modTime time.Time, onExist OnExistAction) (*api.HiDriveObject, error) {
parameters := api.NewQueryParameters()
parameters.SetFileInDirectory(path)
if onExist == AutoNameOnExist {
parameters.Set("on_exist", string(onExist))
}
var err error
if !modTime.IsZero() {
err = parameters.SetTime("mtime", modTime)
if err != nil {
return nil, err
}
}
opts := rest.Opts{
Method: "POST",
Path: "/file",
Body: content,
ContentType: "application/octet-stream",
Parameters: parameters.Values,
}
var result api.HiDriveObject
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
// Reset the reading index (in case this is a retry).
if _, err = content.Seek(0, io.SeekStart); err != nil {
return false, err
}
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
return f.shouldRetry(ctx, resp, err)
})
switch {
case err == nil:
return &result, nil
case isHTTPError(err, 404):
return nil, fs.ErrorDirNotFound
case isHTTPError(err, 409):
return nil, ErrorFileExists
}
return nil, err
}
// overwriteFile updates the content of the file at the given path
// with the content of the io.ReadSeeker.
// If the file does not exist it will be created.
// The maximum size of the content is limited by MaximumUploadBytes.
// The io.ReadSeeker should be resettable by seeking to its start.
// If modTime is not the zero time instant,
// it will be set as the file's modification time after the operation.
//
// This returns fs.ErrorDirNotFound
// if the parent directory of the file is not found.
func (f *Fs) overwriteFile(ctx context.Context, path string, content io.ReadSeeker, modTime time.Time) (*api.HiDriveObject, error) {
parameters := api.NewQueryParameters()
parameters.SetFileInDirectory(path)
var err error
if !modTime.IsZero() {
err = parameters.SetTime("mtime", modTime)
if err != nil {
return nil, err
}
}
opts := rest.Opts{
Method: "PUT",
Path: "/file",
Body: content,
ContentType: "application/octet-stream",
Parameters: parameters.Values,
}
var result api.HiDriveObject
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
// Reset the reading index (in case this is a retry).
if _, err = content.Seek(0, io.SeekStart); err != nil {
return false, err
}
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
return f.shouldRetry(ctx, resp, err)
})
switch {
case err == nil:
return &result, nil
case isHTTPError(err, 404):
return nil, fs.ErrorDirNotFound
}
return nil, err
}
// uploadFileChunked updates the content of the existing file at the given path
// with the content of the io.Reader.
// Returns the position of the last successfully written byte, stopping before the first failed write.
// If nothing was written this will be 0.
// Returns the resulting api-object if successful.
//
// Replaces the file contents by uploading multiple chunks of the given size in parallel.
// Therefore this can and be used to upload files of any size efficiently.
// The number of parallel transfers is limited by transferLimit which should larger than 0.
// If modTime is not the zero time instant,
// it will be set as the file's modification time after the operation.
//
// NOTE: This method uses updateFileChunked and may create sparse files,
// if the upload of a chunk fails unexpectedly.
// See note about sparse files in patchFile.
// If any of the uploads fail, the process will be aborted and
// the first error that occurred will be returned.
// This is not an atomic operation,
// therefore if the upload fails the file may be partially modified.
//
// This returns fs.ErrorObjectNotFound if the object is not found.
func (f *Fs) uploadFileChunked(ctx context.Context, path string, content io.Reader, modTime time.Time, chunkSize int, transferLimit int64) (okSize uint64, info *api.HiDriveObject, err error) {
okSize, err = f.updateFileChunked(ctx, path, content, 0, chunkSize, transferLimit)
if err == nil {
info, err = f.resizeFile(ctx, path, okSize, modTime)
}
return okSize, info, err
}
// updateFileChunked updates the content of the existing file at the given path
// starting at the given offset.
// Returns the position of the last successfully written byte, stopping before the first failed write.
// If nothing was written this will be 0.
//
// Replaces the file contents starting from the given byte offset
// with the content of the io.Reader.
// If the offset is beyond the file end, the file is extended up to the offset.
//
// The upload is done multiple chunks of the given size in parallel.
// Therefore this can and be used to upload files of any size efficiently.
// The number of parallel transfers is limited by transferLimit which should larger than 0.
//
// NOTE: Because it is inefficient to set the modification time with every chunk,
// setting it to a specific value must be done in a separate request
// after this operation finishes.
//
// NOTE: This method uses patchFile and may create sparse files,
// especially if the upload of a chunk fails unexpectedly.
// See note about sparse files in patchFile.
// If any of the uploads fail, the process will be aborted and
// the first error that occurred will be returned.
// This is not an atomic operation,
// therefore if the upload fails the file may be partially modified.
//
// This returns fs.ErrorObjectNotFound if the object is not found.
func (f *Fs) updateFileChunked(ctx context.Context, path string, content io.Reader, offset uint64, chunkSize int, transferLimit int64) (okSize uint64, err error) {
var (
okChunksMu sync.Mutex // protects the variables below
okChunks []ranges.Range
)
g, gCtx := errgroup.WithContext(ctx)
transferSemaphore := semaphore.NewWeighted(transferLimit)
var readErr error
startMoreTransfers := true
zeroTime := time.Time{}
for chunk := uint64(0); startMoreTransfers; chunk++ {
// Acquire semaphore to limit number of transfers in parallel.
readErr = transferSemaphore.Acquire(gCtx, 1)
if readErr != nil {
break
}
// Read a chunk of data.
chunkReader, bytesRead, readErr := readerForChunk(content, chunkSize)
if bytesRead < chunkSize {
startMoreTransfers = false
}
if readErr != nil || bytesRead <= 0 {
break
}
// Transfer the chunk.
chunkOffset := uint64(chunkSize)*chunk + offset
g.Go(func() error {
// After this upload is done,
// signal that another transfer can be started.
defer transferSemaphore.Release(1)
uploadErr := f.patchFile(gCtx, path, cachedReader(chunkReader), chunkOffset, zeroTime)
if uploadErr == nil {
// Remember successfully written chunks.
okChunksMu.Lock()
okChunks = append(okChunks, ranges.Range{Pos: int64(chunkOffset), Size: int64(bytesRead)})
okChunksMu.Unlock()
fs.Debugf(f, "Done uploading chunk of size %v at offset %v.", bytesRead, chunkOffset)
} else {
fs.Infof(f, "Error while uploading chunk at offset %v. Error is %v.", chunkOffset, uploadErr)
}
return uploadErr
})
}
if readErr != nil {
// Log the error in case it is later ignored because of an upload-error.
fs.Infof(f, "Error while reading/preparing to upload a chunk. Error is %v.", readErr)
}
err = g.Wait()
// Compute the first continuous range of the file content,
// which does not contain any failed chunks.
// Do not forget to add the file content up to the starting offset,
// which is presumed to be already correct.
rs := ranges.Ranges{}
rs.Insert(ranges.Range{Pos: 0, Size: int64(offset)})
for _, chunkRange := range okChunks {
rs.Insert(chunkRange)
}
if len(rs) > 0 && rs[0].Pos == 0 {
okSize = uint64(rs[0].Size)
}
if err != nil {
return okSize, err
}
if readErr != nil {
return okSize, readErr
}
return okSize, nil
}
// patchFile updates the content of the existing file at the given path
// starting at the given offset.
//
// Replaces the file contents starting from the given byte offset
// with the content of the io.ReadSeeker.
// If the offset is beyond the file end, the file is extended up to the offset.
// The maximum size of the update is limited by MaximumUploadBytes.
// The io.ReadSeeker should be resettable by seeking to its start.
// If modTime is not the zero time instant,
// it will be set as the file's modification time after the operation.
//
// NOTE: By extending the file up to the offset this may create sparse files,
// which allocate less space on the file system than their apparent size indicates,
// since holes between data chunks are "real" holes
// and not regions made up of consecutive 0-bytes.
// Subsequent operations (such as copying data)
// usually expand the holes into regions of 0-bytes.
//
// This returns fs.ErrorObjectNotFound if the object is not found.
func (f *Fs) patchFile(ctx context.Context, path string, content io.ReadSeeker, offset uint64, modTime time.Time) error {
parameters := api.NewQueryParameters()
parameters.SetPath(path)
parameters.Set("offset", strconv.FormatUint(offset, 10))
if !modTime.IsZero() {
err := parameters.SetTime("mtime", modTime)
if err != nil {
return err
}
}
opts := rest.Opts{
Method: "PATCH",
Path: "/file",
Body: content,
ContentType: "application/octet-stream",
Parameters: parameters.Values,
NoResponse: true,
}
var resp *http.Response
var err error
err = f.pacer.Call(func() (bool, error) {
// Reset the reading index (in case this is a retry).
_, err = content.Seek(0, io.SeekStart)
if err != nil {
return false, err
}
resp, err = f.srv.Call(ctx, &opts)
if isHTTPError(err, 423) {
return true, err
}
return f.shouldRetry(ctx, resp, err)
})
if isHTTPError(err, 404) {
return fs.ErrorObjectNotFound
}
return err
}
// resizeFile updates the existing file at the given path to be of the given size
// and returns the resulting api-object if successful.
//
// If the given size is smaller than the current filesize,
// the file is cut/truncated at that position.
// If the given size is larger, the file is extended up to that position.
// If modTime is not the zero time instant,
// it will be set as the file's modification time after the operation.
//
// NOTE: By extending the file this may create sparse files,
// which allocate less space on the file system than their apparent size indicates,
// since holes between data chunks are "real" holes
// and not regions made up of consecutive 0-bytes.
// Subsequent operations (such as copying data)
// usually expand the holes into regions of 0-bytes.
//
// This returns fs.ErrorObjectNotFound if the object is not found.
func (f *Fs) resizeFile(ctx context.Context, path string, size uint64, modTime time.Time) (*api.HiDriveObject, error) {
parameters := api.NewQueryParameters()
parameters.SetPath(path)
parameters.Set("size", strconv.FormatUint(size, 10))
if !modTime.IsZero() {
err := parameters.SetTime("mtime", modTime)
if err != nil {
return nil, err
}
}
opts := rest.Opts{
Method: "POST",
Path: "/file/truncate",
Parameters: parameters.Values,
}
var result api.HiDriveObject
var resp *http.Response
var err error
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
return f.shouldRetry(ctx, resp, err)
})
switch {
case err == nil:
return &result, nil
case isHTTPError(err, 404):
return nil, fs.ErrorObjectNotFound
}
return nil, err
}
// ------------------------------------------------------------
// isHTTPError compares the numerical status code
// of an api.Error to the given HTTP status.
//
// If the given error is not an api.Error or
// a numerical status code could not be determined, this returns false.
// Otherwise this returns whether the status code of the error is equal to the given status.
func isHTTPError(err error, status int64) bool {
if apiErr, ok := err.(*api.Error); ok {
errStatus, decodeErr := apiErr.Code.Int64()
if decodeErr == nil && errStatus == status {
return true
}
}
return false
}
// createHiDriveScopes creates oauth-scopes
// from the given user-role and access-permissions.
//
// If the arguments are empty, they will not be included in the result.
func createHiDriveScopes(role string, access string) []string {
switch {
case role != "" && access != "":
return []string{access + "," + role}
case role != "":
return []string{role}
case access != "":
return []string{access}
}
return []string{}
}
// cachedReader returns a version of the reader that caches its contents and
// can therefore be reset using Seek.
func cachedReader(reader io.Reader) io.ReadSeeker {
bytesReader, ok := reader.(*bytes.Reader)
if ok {
return bytesReader
}
repeatableReader, ok := reader.(*readers.RepeatableReader)
if ok {
return repeatableReader
}
return readers.NewRepeatableReader(reader)
}
// readerForChunk reads a chunk of bytes from reader (after handling any accounting).
// Returns a new io.Reader (chunkReader) for that chunk
// and the number of bytes that have been read from reader.
func readerForChunk(reader io.Reader, length int) (chunkReader io.Reader, bytesRead int, err error) {
// Unwrap any accounting from the input if present.
reader, wrap := accounting.UnWrap(reader)
// Read a chunk of data.
buffer := make([]byte, length)
bytesRead, err = io.ReadFull(reader, buffer)
if err == io.EOF || err == io.ErrUnexpectedEOF {
err = nil
}
if err != nil {
return nil, bytesRead, err
}
// Truncate unused capacity.
buffer = buffer[:bytesRead]
// Use wrap to put any accounting back for chunkReader.
return wrap(bytes.NewReader(buffer)), bytesRead, nil
}

1002
backend/hidrive/hidrive.go Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,45 @@
// Test HiDrive filesystem interface
package hidrive
import (
"testing"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote.
func TestIntegration(t *testing.T) {
name := "TestHiDrive"
fstests.Run(t, &fstests.Opt{
RemoteName: name + ":",
NilObject: (*Object)(nil),
ChunkedUpload: fstests.ChunkedUploadConfig{
MinChunkSize: 1,
MaxChunkSize: MaximumUploadBytes,
CeilChunkSize: nil,
NeedMultipleChunks: false,
},
})
}
// Change the configured UploadChunkSize.
// Will only be called while no transfer is in progress.
func (f *Fs) SetUploadChunkSize(chunksize fs.SizeSuffix) (fs.SizeSuffix, error) {
var old fs.SizeSuffix
old, f.opt.UploadChunkSize = f.opt.UploadChunkSize, chunksize
return old, nil
}
// Change the configured UploadCutoff.
// Will only be called while no transfer is in progress.
func (f *Fs) SetUploadCutoff(cutoff fs.SizeSuffix) (fs.SizeSuffix, error) {
var old fs.SizeSuffix
old, f.opt.UploadCutoff = f.opt.UploadCutoff, cutoff
return old, nil
}
var (
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
_ fstests.SetUploadCutoffer = (*Fs)(nil)
)

View File

@@ -0,0 +1,410 @@
// Package hidrivehash implements the HiDrive hashing algorithm which combines SHA-1 hashes hierarchically to a single top-level hash.
//
// Note: This implementation does not grant access to any partial hashes generated.
//
// See: https://developer.hidrive.com/wp-content/uploads/2021/07/HiDrive_Synchronization-v3.3-rev28.pdf
// (link to newest version: https://static.hidrive.com/dev/0001)
package hidrivehash
import (
"bytes"
"crypto/sha1"
"encoding"
"encoding/binary"
"errors"
"fmt"
"hash"
"io"
"github.com/rclone/rclone/backend/hidrive/hidrivehash/internal"
)
const (
// BlockSize of the checksum in bytes.
BlockSize = 4096
// Size of the checksum in bytes.
Size = sha1.Size
// sumsPerLevel is the number of checksums
sumsPerLevel = 256
)
var (
// zeroSum is a special hash consisting of 20 null-bytes.
// This will be the hash of any empty file (or ones containing only null-bytes).
zeroSum = [Size]byte{}
// ErrorInvalidEncoding is returned when a hash should be decoded from a binary form that is invalid.
ErrorInvalidEncoding = errors.New("encoded binary form is invalid for this hash")
// ErrorHashFull is returned when a hash reached its capacity and cannot accept any more input.
ErrorHashFull = errors.New("hash reached its capacity")
)
// writeByBlock writes len(p) bytes from p to the io.Writer in blocks of size blockSize.
// It returns the number of bytes written from p (0 <= n <= len(p))
// and any error encountered that caused the write to stop early.
//
// A pointer bytesInBlock to a counter needs to be supplied,
// that is used to keep track how many bytes have been written to the writer already.
// A pointer onlyNullBytesInBlock to a boolean needs to be supplied,
// that is used to keep track whether the block so far only consists of null-bytes.
// The callback onBlockWritten is called whenever a full block has been written to the writer
// and is given as input the number of bytes that still need to be written.
func writeByBlock(p []byte, writer io.Writer, blockSize uint32, bytesInBlock *uint32, onlyNullBytesInBlock *bool, onBlockWritten func(remaining int) error) (n int, err error) {
total := len(p)
nullBytes := make([]byte, blockSize)
for len(p) > 0 {
toWrite := int(blockSize - *bytesInBlock)
if toWrite > len(p) {
toWrite = len(p)
}
c, err := writer.Write(p[:toWrite])
*bytesInBlock += uint32(c)
*onlyNullBytesInBlock = *onlyNullBytesInBlock && bytes.Equal(nullBytes[:toWrite], p[:toWrite])
// Discard data written through a reslice
p = p[c:]
if err != nil {
return total - len(p), err
}
if *bytesInBlock == blockSize {
err = onBlockWritten(len(p))
if err != nil {
return total - len(p), err
}
*bytesInBlock = 0
*onlyNullBytesInBlock = true
}
}
return total, nil
}
// level is a hash.Hash that is used to aggregate the checksums produced by the level hierarchically beneath it.
// It is used to represent any level-n hash, except for level-0.
type level struct {
checksum [Size]byte // aggregated checksum of this level
sumCount uint32 // number of sums contained in this level so far
bytesInHasher uint32 // number of bytes written into hasher so far
onlyNullBytesInHasher bool // whether the hasher only contains null-bytes so far
hasher hash.Hash
}
// NewLevel returns a new hash.Hash computing any level-n hash, except level-0.
func NewLevel() hash.Hash {
l := &level{}
l.Reset()
return l
}
// Add takes a position-embedded SHA-1 checksum and adds it to the level.
func (l *level) Add(sha1sum []byte) {
var tmp uint
var carry bool
for i := Size - 1; i >= 0; i-- {
tmp = uint(sha1sum[i]) + uint(l.checksum[i])
if carry {
tmp++
}
carry = tmp > 255
l.checksum[i] = byte(tmp)
}
}
// IsFull returns whether the number of checksums added to this level reached its capacity.
func (l *level) IsFull() bool {
return l.sumCount >= sumsPerLevel
}
// Write (via the embedded io.Writer interface) adds more data to the running hash.
// Contrary to the specification from hash.Hash, this DOES return an error,
// specifically ErrorHashFull if and only if IsFull() returns true.
func (l *level) Write(p []byte) (n int, err error) {
if l.IsFull() {
return 0, ErrorHashFull
}
onBlockWritten := func(remaining int) error {
if !l.onlyNullBytesInHasher {
c, err := l.hasher.Write([]byte{byte(l.sumCount)})
l.bytesInHasher += uint32(c)
if err != nil {
return err
}
l.Add(l.hasher.Sum(nil))
}
l.sumCount++
l.hasher.Reset()
if remaining > 0 && l.IsFull() {
return ErrorHashFull
}
return nil
}
return writeByBlock(p, l.hasher, uint32(l.BlockSize()), &l.bytesInHasher, &l.onlyNullBytesInHasher, onBlockWritten)
}
// Sum appends the current hash to b and returns the resulting slice.
// It does not change the underlying hash state.
func (l *level) Sum(b []byte) []byte {
return append(b, l.checksum[:]...)
}
// Reset resets the Hash to its initial state.
func (l *level) Reset() {
l.checksum = zeroSum // clear the current checksum
l.sumCount = 0
l.bytesInHasher = 0
l.onlyNullBytesInHasher = true
l.hasher = sha1.New()
}
// Size returns the number of bytes Sum will return.
func (l *level) Size() int {
return Size
}
// BlockSize returns the hash's underlying block size.
// The Write method must be able to accept any amount
// of data, but it may operate more efficiently if all writes
// are a multiple of the block size.
func (l *level) BlockSize() int {
return Size
}
// MarshalBinary encodes the hash into a binary form and returns the result.
func (l *level) MarshalBinary() ([]byte, error) {
b := make([]byte, Size+4+4+1)
copy(b, l.checksum[:])
binary.BigEndian.PutUint32(b[Size:], l.sumCount)
binary.BigEndian.PutUint32(b[Size+4:], l.bytesInHasher)
if l.onlyNullBytesInHasher {
b[Size+4+4] = 1
}
encodedHasher, err := l.hasher.(encoding.BinaryMarshaler).MarshalBinary()
if err != nil {
return nil, err
}
b = append(b, encodedHasher...)
return b, nil
}
// UnmarshalBinary decodes the binary form generated by MarshalBinary.
// The hash will replace its internal state accordingly.
func (l *level) UnmarshalBinary(b []byte) error {
if len(b) < Size+4+4+1 {
return ErrorInvalidEncoding
}
copy(l.checksum[:], b)
l.sumCount = binary.BigEndian.Uint32(b[Size:])
l.bytesInHasher = binary.BigEndian.Uint32(b[Size+4:])
switch b[Size+4+4] {
case 0:
l.onlyNullBytesInHasher = false
case 1:
l.onlyNullBytesInHasher = true
default:
return ErrorInvalidEncoding
}
err := l.hasher.(encoding.BinaryUnmarshaler).UnmarshalBinary(b[Size+4+4+1:])
return err
}
// hidriveHash is the hash computing the actual checksum used by HiDrive by combining multiple level-hashes.
type hidriveHash struct {
levels []*level // collection of level-hashes, one for each level starting at level-1
lastSumWritten [Size]byte // the last checksum written to any of the levels
bytesInBlock uint32 // bytes written into blockHash so far
onlyNullBytesInBlock bool // whether the hasher only contains null-bytes so far
blockHash hash.Hash
}
// New returns a new hash.Hash computing the HiDrive checksum.
func New() hash.Hash {
h := &hidriveHash{}
h.Reset()
return h
}
// aggregateToLevel writes the checksum to the level at the given index
// and if necessary propagates any changes to levels above.
func (h *hidriveHash) aggregateToLevel(index int, sum []byte) {
for i := index; ; i++ {
if i >= len(h.levels) {
h.levels = append(h.levels, NewLevel().(*level))
}
_, err := h.levels[i].Write(sum)
copy(h.lastSumWritten[:], sum)
if err != nil {
panic(fmt.Errorf("level-hash should not have produced an error: %w", err))
}
if !h.levels[i].IsFull() {
break
}
sum = h.levels[i].Sum(nil)
h.levels[i].Reset()
}
}
// Write (via the embedded io.Writer interface) adds more data to the running hash.
// It never returns an error.
func (h *hidriveHash) Write(p []byte) (n int, err error) {
onBlockWritten := func(remaining int) error {
var sum []byte
if h.onlyNullBytesInBlock {
sum = zeroSum[:]
} else {
sum = h.blockHash.Sum(nil)
}
h.blockHash.Reset()
h.aggregateToLevel(0, sum)
return nil
}
return writeByBlock(p, h.blockHash, uint32(BlockSize), &h.bytesInBlock, &h.onlyNullBytesInBlock, onBlockWritten)
}
// Sum appends the current hash to b and returns the resulting slice.
// It does not change the underlying hash state.
func (h *hidriveHash) Sum(b []byte) []byte {
// Save internal state.
state, err := h.MarshalBinary()
if err != nil {
panic(fmt.Errorf("saving the internal state should not have produced an error: %w", err))
}
if h.bytesInBlock > 0 {
// Fill remainder of block with null-bytes.
filler := make([]byte, h.BlockSize()-int(h.bytesInBlock))
_, err = h.Write(filler)
if err != nil {
panic(fmt.Errorf("filling with null-bytes should not have an error: %w", err))
}
}
checksum := zeroSum
for i := 0; i < len(h.levels); i++ {
level := h.levels[i]
if i < len(h.levels)-1 {
// Aggregate non-empty non-final levels.
if level.sumCount >= 1 {
h.aggregateToLevel(i+1, level.Sum(nil))
level.Reset()
}
} else {
// Determine sum of final level.
if level.sumCount > 1 {
copy(checksum[:], level.Sum(nil))
} else {
// This is needed, otherwise there is no way to return
// the non-position-embedded checksum.
checksum = h.lastSumWritten
}
}
}
// Restore internal state.
err = h.UnmarshalBinary(state)
if err != nil {
panic(fmt.Errorf("restoring the internal state should not have produced an error: %w", err))
}
return append(b, checksum[:]...)
}
// Reset resets the Hash to its initial state.
func (h *hidriveHash) Reset() {
h.levels = nil
h.lastSumWritten = zeroSum // clear the last written checksum
h.bytesInBlock = 0
h.onlyNullBytesInBlock = true
h.blockHash = sha1.New()
}
// Size returns the number of bytes Sum will return.
func (h *hidriveHash) Size() int {
return Size
}
// BlockSize returns the hash's underlying block size.
// The Write method must be able to accept any amount
// of data, but it may operate more efficiently if all writes
// are a multiple of the block size.
func (h *hidriveHash) BlockSize() int {
return BlockSize
}
// MarshalBinary encodes the hash into a binary form and returns the result.
func (h *hidriveHash) MarshalBinary() ([]byte, error) {
b := make([]byte, Size+4+1+8)
copy(b, h.lastSumWritten[:])
binary.BigEndian.PutUint32(b[Size:], h.bytesInBlock)
if h.onlyNullBytesInBlock {
b[Size+4] = 1
}
binary.BigEndian.PutUint64(b[Size+4+1:], uint64(len(h.levels)))
for _, level := range h.levels {
encodedLevel, err := level.MarshalBinary()
if err != nil {
return nil, err
}
encodedLength := make([]byte, 8)
binary.BigEndian.PutUint64(encodedLength, uint64(len(encodedLevel)))
b = append(b, encodedLength...)
b = append(b, encodedLevel...)
}
encodedBlockHash, err := h.blockHash.(encoding.BinaryMarshaler).MarshalBinary()
if err != nil {
return nil, err
}
b = append(b, encodedBlockHash...)
return b, nil
}
// UnmarshalBinary decodes the binary form generated by MarshalBinary.
// The hash will replace its internal state accordingly.
func (h *hidriveHash) UnmarshalBinary(b []byte) error {
if len(b) < Size+4+1+8 {
return ErrorInvalidEncoding
}
copy(h.lastSumWritten[:], b)
h.bytesInBlock = binary.BigEndian.Uint32(b[Size:])
switch b[Size+4] {
case 0:
h.onlyNullBytesInBlock = false
case 1:
h.onlyNullBytesInBlock = true
default:
return ErrorInvalidEncoding
}
amount := binary.BigEndian.Uint64(b[Size+4+1:])
h.levels = make([]*level, int(amount))
offset := Size + 4 + 1 + 8
for i := range h.levels {
length := int(binary.BigEndian.Uint64(b[offset:]))
offset += 8
h.levels[i] = NewLevel().(*level)
err := h.levels[i].UnmarshalBinary(b[offset : offset+length])
if err != nil {
return err
}
offset += length
}
err := h.blockHash.(encoding.BinaryUnmarshaler).UnmarshalBinary(b[offset:])
return err
}
// Sum returns the HiDrive checksum of the data.
func Sum(data []byte) [Size]byte {
h := New().(*hidriveHash)
_, _ = h.Write(data)
var result [Size]byte
copy(result[:], h.Sum(nil))
return result
}
// Check the interfaces are satisfied.
var (
_ hash.Hash = (*level)(nil)
_ encoding.BinaryMarshaler = (*level)(nil)
_ encoding.BinaryUnmarshaler = (*level)(nil)
_ internal.LevelHash = (*level)(nil)
_ hash.Hash = (*hidriveHash)(nil)
_ encoding.BinaryMarshaler = (*hidriveHash)(nil)
_ encoding.BinaryUnmarshaler = (*hidriveHash)(nil)
)

View File

@@ -0,0 +1,395 @@
package hidrivehash_test
import (
"crypto/sha1"
"encoding"
"encoding/hex"
"fmt"
"io"
"testing"
"github.com/rclone/rclone/backend/hidrive/hidrivehash"
"github.com/rclone/rclone/backend/hidrive/hidrivehash/internal"
"github.com/stretchr/testify/assert"
)
// helper functions to set up test-tables
func sha1ArrayAsSlice(sum [sha1.Size]byte) []byte {
return sum[:]
}
func mustDecode(hexstring string) []byte {
result, err := hex.DecodeString(hexstring)
if err != nil {
panic(err)
}
return result
}
// ------------------------------------------------------------
var testTableLevelPositionEmbedded = []struct {
ins [][]byte
outs [][]byte
name string
}{
{
[][]byte{
sha1ArrayAsSlice([20]byte{245, 202, 195, 223, 121, 198, 189, 112, 138, 202, 222, 2, 146, 156, 127, 16, 208, 233, 98, 88}),
sha1ArrayAsSlice([20]byte{78, 188, 156, 219, 173, 54, 81, 55, 47, 220, 222, 207, 201, 21, 57, 252, 255, 239, 251, 186}),
},
[][]byte{
sha1ArrayAsSlice([20]byte{245, 202, 195, 223, 121, 198, 189, 112, 138, 202, 222, 2, 146, 156, 127, 16, 208, 233, 98, 88}),
sha1ArrayAsSlice([20]byte{68, 135, 96, 187, 38, 253, 14, 167, 186, 167, 188, 210, 91, 177, 185, 13, 208, 217, 94, 18}),
},
"documentation-v3.2rev27-example L0 (position-embedded)",
},
{
[][]byte{
sha1ArrayAsSlice([20]byte{68, 254, 92, 166, 52, 37, 104, 180, 22, 123, 249, 144, 182, 78, 64, 74, 57, 117, 225, 195}),
sha1ArrayAsSlice([20]byte{75, 211, 153, 190, 125, 179, 67, 49, 60, 149, 98, 246, 142, 20, 11, 254, 159, 162, 129, 237}),
sha1ArrayAsSlice([20]byte{150, 2, 9, 153, 97, 153, 189, 104, 147, 14, 77, 203, 244, 243, 25, 212, 67, 48, 111, 107}),
},
[][]byte{
sha1ArrayAsSlice([20]byte{68, 254, 92, 166, 52, 37, 104, 180, 22, 123, 249, 144, 182, 78, 64, 74, 57, 117, 225, 195}),
sha1ArrayAsSlice([20]byte{144, 209, 246, 100, 177, 216, 171, 229, 83, 17, 92, 135, 68, 98, 76, 72, 217, 24, 99, 176}),
sha1ArrayAsSlice([20]byte{38, 211, 255, 254, 19, 114, 105, 77, 230, 31, 170, 83, 57, 85, 102, 29, 28, 72, 211, 27}),
},
"documentation-example L0 (position-embedded)",
},
{
[][]byte{
sha1ArrayAsSlice([20]byte{173, 123, 132, 245, 176, 172, 43, 183, 121, 40, 66, 252, 101, 249, 188, 193, 160, 189, 2, 116}),
sha1ArrayAsSlice([20]byte{40, 34, 8, 238, 37, 5, 237, 184, 79, 105, 10, 167, 171, 254, 13, 229, 132, 112, 254, 8}),
sha1ArrayAsSlice([20]byte{39, 112, 26, 86, 190, 35, 100, 101, 28, 131, 122, 191, 254, 144, 239, 107, 253, 124, 104, 203}),
},
[][]byte{
sha1ArrayAsSlice([20]byte{173, 123, 132, 245, 176, 172, 43, 183, 121, 40, 66, 252, 101, 249, 188, 193, 160, 189, 2, 116}),
sha1ArrayAsSlice([20]byte{213, 157, 141, 227, 213, 178, 25, 111, 200, 145, 77, 164, 17, 247, 202, 167, 37, 46, 0, 124}),
sha1ArrayAsSlice([20]byte{253, 13, 168, 58, 147, 213, 125, 212, 229, 20, 200, 100, 16, 136, 186, 19, 34, 170, 105, 71}),
},
"documentation-example L1 (position-embedded)",
},
}
var testTableLevel = []struct {
ins [][]byte
outs [][]byte
name string
}{
{
[][]byte{
mustDecode("09f077820a8a41f34a639f2172f1133b1eafe4e6"),
mustDecode("09f077820a8a41f34a639f2172f1133b1eafe4e6"),
mustDecode("09f077820a8a41f34a639f2172f1133b1eafe4e6"),
},
[][]byte{
mustDecode("44fe5ca6342568b4167bf990b64e404a3975e1c3"),
mustDecode("90d1f664b1d8abe553115c8744624c48d91863b0"),
mustDecode("26d3fffe1372694de61faa533955661d1c48d31b"),
},
"documentation-example L0",
},
{
[][]byte{
mustDecode("75a9f88fb219ef1dd31adf41c93e2efaac8d0245"),
mustDecode("daedc425199501b1e86b5eaba5649cbde205e6ae"),
mustDecode("286ac5283f99c4e0f11683900a3e39661c375dd6"),
},
[][]byte{
mustDecode("ad7b84f5b0ac2bb7792842fc65f9bcc1a0bd0274"),
mustDecode("d59d8de3d5b2196fc8914da411f7caa7252e007c"),
mustDecode("fd0da83a93d57dd4e514c8641088ba1322aa6947"),
},
"documentation-example L1",
},
{
[][]byte{
mustDecode("0000000000000000000000000000000000000000"),
mustDecode("0000000000000000000000000000000000000000"),
mustDecode("75a9f88fb219ef1dd31adf41c93e2efaac8d0245"),
mustDecode("0000000000000000000000000000000000000000"),
mustDecode("daedc425199501b1e86b5eaba5649cbde205e6ae"),
mustDecode("0000000000000000000000000000000000000000"),
mustDecode("0000000000000000000000000000000000000000"),
mustDecode("0000000000000000000000000000000000000000"),
mustDecode("286ac5283f99c4e0f11683900a3e39661c375dd6"),
mustDecode("0000000000000000000000000000000000000000"),
},
[][]byte{
mustDecode("0000000000000000000000000000000000000000"),
mustDecode("0000000000000000000000000000000000000000"),
mustDecode("a197464ec19f2b2b2bc6b21f6c939c7e57772843"),
mustDecode("a197464ec19f2b2b2bc6b21f6c939c7e57772843"),
mustDecode("b04769357aa4eb4b52cd5bec6935bc8f977fa3a1"),
mustDecode("b04769357aa4eb4b52cd5bec6935bc8f977fa3a1"),
mustDecode("b04769357aa4eb4b52cd5bec6935bc8f977fa3a1"),
mustDecode("b04769357aa4eb4b52cd5bec6935bc8f977fa3a1"),
mustDecode("8f56351897b4e1d100646fa122c924347721b2f5"),
mustDecode("8f56351897b4e1d100646fa122c924347721b2f5"),
},
"mixed-with-empties",
},
}
var testTable = []struct {
data []byte
// pattern describes how to use data to construct the hash-input.
// For every entry n at even indices this repeats the data n times.
// For every entry m at odd indices this repeats a null-byte m times.
// The input-data is constructed by concatenating the results in order.
pattern []int64
out []byte
name string
}{
{
[]byte("#ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789abcdefghijklmnopqrstuvwxyz\n"),
[]int64{64},
mustDecode("09f077820a8a41f34a639f2172f1133b1eafe4e6"),
"documentation-example L0",
},
{
[]byte("#ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789abcdefghijklmnopqrstuvwxyz\n"),
[]int64{64 * 256},
mustDecode("75a9f88fb219ef1dd31adf41c93e2efaac8d0245"),
"documentation-example L1",
},
{
[]byte("#ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789abcdefghijklmnopqrstuvwxyz\n"),
[]int64{64 * 256, 0, 64 * 128, 4096 * 128, 64*2 + 32},
mustDecode("fd0da83a93d57dd4e514c8641088ba1322aa6947"),
"documentation-example L2",
},
{
[]byte("hello rclone\n"),
[]int64{316},
mustDecode("72370f9c18a2c20b31d71f3f4cee7a3cd2703737"),
"not-block-aligned",
},
{
[]byte("hello rclone\n"),
[]int64{13, 4096 * 3, 4},
mustDecode("a6990b81791f0d2db750b38f046df321c975aa60"),
"not-block-aligned-with-null-bytes",
},
{
[]byte{},
[]int64{},
mustDecode("0000000000000000000000000000000000000000"),
"empty",
},
{
[]byte{},
[]int64{0, 4096 * 256 * 256},
mustDecode("0000000000000000000000000000000000000000"),
"null-bytes",
},
}
// ------------------------------------------------------------
func TestLevelAdd(t *testing.T) {
for _, test := range testTableLevelPositionEmbedded {
l := hidrivehash.NewLevel().(internal.LevelHash)
t.Run(test.name, func(t *testing.T) {
for i := range test.ins {
l.Add(test.ins[i])
assert.Equal(t, test.outs[i], l.Sum(nil))
}
})
}
}
func TestLevelWrite(t *testing.T) {
for _, test := range testTableLevel {
l := hidrivehash.NewLevel()
t.Run(test.name, func(t *testing.T) {
for i := range test.ins {
l.Write(test.ins[i])
assert.Equal(t, test.outs[i], l.Sum(nil))
}
})
}
}
func TestLevelIsFull(t *testing.T) {
content := [hidrivehash.Size]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19}
l := hidrivehash.NewLevel()
for i := 0; i < 256; i++ {
assert.False(t, l.(internal.LevelHash).IsFull())
written, err := l.Write(content[:])
assert.Equal(t, len(content), written)
if !assert.NoError(t, err) {
t.FailNow()
}
}
assert.True(t, l.(internal.LevelHash).IsFull())
written, err := l.Write(content[:])
assert.True(t, l.(internal.LevelHash).IsFull())
assert.Equal(t, 0, written)
assert.ErrorIs(t, err, hidrivehash.ErrorHashFull)
}
func TestLevelReset(t *testing.T) {
l := hidrivehash.NewLevel()
zeroHash := l.Sum(nil)
_, err := l.Write([]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19})
if assert.NoError(t, err) {
assert.NotEqual(t, zeroHash, l.Sum(nil))
l.Reset()
assert.Equal(t, zeroHash, l.Sum(nil))
}
}
func TestLevelSize(t *testing.T) {
l := hidrivehash.NewLevel()
assert.Equal(t, 20, l.Size())
}
func TestLevelBlockSize(t *testing.T) {
l := hidrivehash.NewLevel()
assert.Equal(t, 20, l.BlockSize())
}
func TestLevelBinaryMarshaler(t *testing.T) {
content := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19}
l := hidrivehash.NewLevel().(internal.LevelHash)
l.Write(content[:10])
encoded, err := l.MarshalBinary()
if assert.NoError(t, err) {
d := hidrivehash.NewLevel().(internal.LevelHash)
err = d.UnmarshalBinary(encoded)
if assert.NoError(t, err) {
assert.Equal(t, l.Sum(nil), d.Sum(nil))
l.Write(content[10:])
d.Write(content[10:])
assert.Equal(t, l.Sum(nil), d.Sum(nil))
}
}
}
func TestLevelInvalidEncoding(t *testing.T) {
l := hidrivehash.NewLevel().(internal.LevelHash)
err := l.UnmarshalBinary([]byte{})
assert.ErrorIs(t, err, hidrivehash.ErrorInvalidEncoding)
}
// ------------------------------------------------------------
type infiniteReader struct {
source []byte
offset int
}
func (m *infiniteReader) Read(b []byte) (int, error) {
count := copy(b, m.source[m.offset:])
m.offset += count
m.offset %= len(m.source)
return count, nil
}
func writeInChunks(writer io.Writer, chunkSize int64, data []byte, pattern []int64) error {
readers := make([]io.Reader, len(pattern))
nullBytes := [4096]byte{}
for i, n := range pattern {
if i%2 == 0 {
readers[i] = io.LimitReader(&infiniteReader{data, 0}, n*int64(len(data)))
} else {
readers[i] = io.LimitReader(&infiniteReader{nullBytes[:], 0}, n)
}
}
reader := io.MultiReader(readers...)
for {
_, err := io.CopyN(writer, reader, chunkSize)
if err != nil {
if err == io.EOF {
err = nil
}
return err
}
}
}
func TestWrite(t *testing.T) {
for _, test := range testTable {
t.Run(test.name, func(t *testing.T) {
h := hidrivehash.New()
err := writeInChunks(h, int64(h.BlockSize()), test.data, test.pattern)
if assert.NoError(t, err) {
normalSum := h.Sum(nil)
assert.Equal(t, test.out, normalSum)
// Test if different block-sizes produce differing results.
for _, blockSize := range []int64{397, 512, 4091, 8192, 10000} {
t.Run(fmt.Sprintf("block-size %v", blockSize), func(t *testing.T) {
h := hidrivehash.New()
err := writeInChunks(h, blockSize, test.data, test.pattern)
if assert.NoError(t, err) {
assert.Equal(t, normalSum, h.Sum(nil))
}
})
}
}
})
}
}
func TestReset(t *testing.T) {
h := hidrivehash.New()
zeroHash := h.Sum(nil)
_, err := h.Write([]byte{1})
if assert.NoError(t, err) {
assert.NotEqual(t, zeroHash, h.Sum(nil))
h.Reset()
assert.Equal(t, zeroHash, h.Sum(nil))
}
}
func TestSize(t *testing.T) {
h := hidrivehash.New()
assert.Equal(t, 20, h.Size())
}
func TestBlockSize(t *testing.T) {
h := hidrivehash.New()
assert.Equal(t, 4096, h.BlockSize())
}
func TestBinaryMarshaler(t *testing.T) {
for _, test := range testTable {
h := hidrivehash.New()
d := hidrivehash.New()
half := len(test.pattern) / 2
t.Run(test.name, func(t *testing.T) {
err := writeInChunks(h, int64(h.BlockSize()), test.data, test.pattern[:half])
assert.NoError(t, err)
encoded, err := h.(encoding.BinaryMarshaler).MarshalBinary()
if assert.NoError(t, err) {
err = d.(encoding.BinaryUnmarshaler).UnmarshalBinary(encoded)
if assert.NoError(t, err) {
assert.Equal(t, h.Sum(nil), d.Sum(nil))
err = writeInChunks(h, int64(h.BlockSize()), test.data, test.pattern[half:])
assert.NoError(t, err)
err = writeInChunks(d, int64(d.BlockSize()), test.data, test.pattern[half:])
assert.NoError(t, err)
assert.Equal(t, h.Sum(nil), d.Sum(nil))
}
}
})
}
}
func TestInvalidEncoding(t *testing.T) {
h := hidrivehash.New()
err := h.(encoding.BinaryUnmarshaler).UnmarshalBinary([]byte{})
assert.ErrorIs(t, err, hidrivehash.ErrorInvalidEncoding)
}
func TestSum(t *testing.T) {
assert.Equal(t, [hidrivehash.Size]byte{}, hidrivehash.Sum([]byte{}))
content := []byte{1}
h := hidrivehash.New()
h.Write(content)
sum := hidrivehash.Sum(content)
assert.Equal(t, h.Sum(nil), sum[:])
}

View File

@@ -0,0 +1,18 @@
// Package internal provides utilities for HiDrive.
package internal
import (
"encoding"
"hash"
)
// LevelHash is an internal interface for level-hashes.
type LevelHash interface {
encoding.BinaryMarshaler
encoding.BinaryUnmarshaler
hash.Hash
// Add takes a position-embedded checksum and adds it to the level.
Add(sum []byte)
// IsFull returns whether the number of checksums added to this level reached its capacity.
IsFull() bool
}

View File

@@ -6,17 +6,17 @@ package http
import (
"context"
"errors"
"fmt"
"io"
"mime"
"net/http"
"net/url"
"path"
"strconv"
"strings"
"sync"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
@@ -34,37 +34,29 @@ var (
func init() {
fsi := &fs.RegInfo{
Name: "http",
Description: "http Connection",
Description: "HTTP",
NewFs: NewFs,
Options: []fs.Option{{
Name: "url",
Help: "URL of http host to connect to",
Help: "URL of HTTP host to connect to.\n\nE.g. \"https://example.com\", or \"https://user:pass@example.com\" to use a username and password.",
Required: true,
Examples: []fs.OptionExample{{
Value: "https://example.com",
Help: "Connect to example.com",
}, {
Value: "https://user:pass@example.com",
Help: "Connect to example.com using a username and password",
}},
}, {
Name: "headers",
Help: `Set HTTP headers for all transactions
Help: `Set HTTP headers for all transactions.
Use this to set additional HTTP headers for all transactions
Use this to set additional HTTP headers for all transactions.
The input format is comma separated list of key,value pairs. Standard
[CSV encoding](https://godoc.org/encoding/csv) may be used.
For example to set a Cookie use 'Cookie,name=value', or '"Cookie","name=value"'.
For example, to set a Cookie use 'Cookie,name=value', or '"Cookie","name=value"'.
You can set multiple headers, e.g. '"Cookie","name=value","Authorization","xxx"'.
`,
You can set multiple headers, e.g. '"Cookie","name=value","Authorization","xxx"'.`,
Default: fs.CommaSepList{},
Advanced: true,
}, {
Name: "no_slash",
Help: `Set this if the site doesn't end directories with /
Help: `Set this if the site doesn't end directories with /.
Use this if your target website does not use / on the end of
directories.
@@ -80,8 +72,9 @@ directories.`,
Advanced: true,
}, {
Name: "no_head",
Help: `Don't use HEAD requests to find file sizes in dir listing
Help: `Don't use HEAD requests.
HEAD requests are mainly used to find file sizes in dir listing.
If your site is being very slow to load then you can try this option.
Normally rclone does a HEAD request for each potential file in a
directory listing to:
@@ -90,12 +83,9 @@ directory listing to:
- check it really exists
- check to see if it is a directory
If you set this option, rclone will not do the HEAD request. This will mean
- directory listings are much quicker
- rclone won't have the times or sizes of any files
- some files that don't exist may be in the listing
`,
If you set this option, rclone will not do the HEAD request. This will mean
that directory listings are much quicker, but rclone won't have the times or
sizes of any files, and some files that don't exist may be in the listing.`,
Default: false,
Advanced: true,
}},
@@ -139,11 +129,87 @@ func statusError(res *http.Response, err error) error {
}
if res.StatusCode < 200 || res.StatusCode > 299 {
_ = res.Body.Close()
return errors.Errorf("HTTP Error %d: %s", res.StatusCode, res.Status)
return fmt.Errorf("HTTP Error: %s", res.Status)
}
return nil
}
// getFsEndpoint decides if url is to be considered a file or directory,
// and returns a proper endpoint url to use for the fs.
func getFsEndpoint(ctx context.Context, client *http.Client, url string, opt *Options) (string, bool) {
// If url ends with '/' it is already a proper url always assumed to be a directory.
if url[len(url)-1] == '/' {
return url, false
}
// If url does not end with '/' we send a HEAD request to decide
// if it is directory or file, and if directory appends the missing
// '/', or if file returns the directory url to parent instead.
createFileResult := func() (string, bool) {
fs.Debugf(nil, "If path is a directory you must add a trailing '/'")
parent, _ := path.Split(url)
return parent, true
}
createDirResult := func() (string, bool) {
fs.Debugf(nil, "To avoid the initial HEAD request add a trailing '/' to the path")
return url + "/", false
}
// If HEAD requests are not allowed we just have to assume it is a file.
if opt.NoHead {
fs.Debugf(nil, "Assuming path is a file as --http-no-head is set")
return createFileResult()
}
// Use a client which doesn't follow redirects so the server
// doesn't redirect http://host/dir to http://host/dir/
noRedir := *client
noRedir.CheckRedirect = func(req *http.Request, via []*http.Request) error {
return http.ErrUseLastResponse
}
req, err := http.NewRequestWithContext(ctx, "HEAD", url, nil)
if err != nil {
fs.Debugf(nil, "Assuming path is a file as HEAD request could not be created: %v", err)
return createFileResult()
}
addHeaders(req, opt)
res, err := noRedir.Do(req)
if err != nil {
fs.Debugf(nil, "Assuming path is a file as HEAD request could not be sent: %v", err)
return createFileResult()
}
if res.StatusCode == http.StatusNotFound {
fs.Debugf(nil, "Assuming path is a directory as HEAD response is it does not exist as a file (%s)", res.Status)
return createDirResult()
}
if res.StatusCode == http.StatusMovedPermanently ||
res.StatusCode == http.StatusFound ||
res.StatusCode == http.StatusSeeOther ||
res.StatusCode == http.StatusTemporaryRedirect ||
res.StatusCode == http.StatusPermanentRedirect {
redir := res.Header.Get("Location")
if redir != "" {
if redir[len(redir)-1] == '/' {
fs.Debugf(nil, "Assuming path is a directory as HEAD response is redirect (%s) to a path that ends with '/': %s", res.Status, redir)
return createDirResult()
}
fs.Debugf(nil, "Assuming path is a file as HEAD response is redirect (%s) to a path that does not end with '/': %s", res.Status, redir)
return createFileResult()
}
fs.Debugf(nil, "Assuming path is a file as HEAD response is redirect (%s) but no location header", res.Status)
return createFileResult()
}
if res.StatusCode < 200 || res.StatusCode > 299 {
// Example is 403 (http.StatusForbidden) for servers not allowing HEAD requests.
fs.Debugf(nil, "Assuming path is a file as HEAD response is an error (%s)", res.Status)
return createFileResult()
}
fs.Debugf(nil, "Assuming path is a file as HEAD response is success (%s)", res.Status)
return createFileResult()
}
// NewFs creates a new Fs object from the name and root. It connects to
// the host specified in the config file.
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
@@ -174,37 +240,9 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
client := fshttp.NewClient(ctx)
var isFile = false
if !strings.HasSuffix(u.String(), "/") {
// Make a client which doesn't follow redirects so the server
// doesn't redirect http://host/dir to http://host/dir/
noRedir := *client
noRedir.CheckRedirect = func(req *http.Request, via []*http.Request) error {
return http.ErrUseLastResponse
}
// check to see if points to a file
req, err := http.NewRequestWithContext(ctx, "HEAD", u.String(), nil)
if err == nil {
addHeaders(req, opt)
res, err := noRedir.Do(req)
err = statusError(res, err)
if err == nil {
isFile = true
}
}
}
newRoot := u.String()
if isFile {
// Point to the parent if this is a file
newRoot, _ = path.Split(u.String())
} else {
if !strings.HasSuffix(newRoot, "/") {
newRoot += "/"
}
}
u, err = url.Parse(newRoot)
endpoint, isFile := getFsEndpoint(ctx, client, u.String(), opt)
fs.Debugf(nil, "Root: %s", endpoint)
u, err = url.Parse(endpoint)
if err != nil {
return nil, err
}
@@ -222,12 +260,16 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
f.features = (&fs.Features{
CanHaveEmptyDirectories: true,
}).Fill(ctx, f)
if isFile {
// return an error with an fs which points to the parent
return f, fs.ErrorIsFile
}
if !strings.HasSuffix(f.endpointURL, "/") {
return nil, errors.New("internal error: url doesn't end with /")
}
return f, nil
}
@@ -262,7 +304,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
fs: f,
remote: remote,
}
err := o.stat(ctx)
err := o.head(ctx)
if err != nil {
return nil, err
}
@@ -274,15 +316,6 @@ func (f *Fs) url(remote string) string {
return f.endpointURL + rest.URLPathEscape(remote)
}
// parse s into an int64, on failure return def
func parseInt64(s string, def int64) int64 {
n, e := strconv.ParseInt(s, 10, 64)
if e != nil {
return def
}
return n
}
// Errors returned by parseName
var (
errURLJoinFailed = errors.New("URLJoin failed")
@@ -303,7 +336,7 @@ func parseName(base *url.URL, name string) (string, error) {
}
// check it doesn't have URL parameters
uStr := u.String()
if strings.Index(uStr, "?") >= 0 {
if strings.Contains(uStr, "?") {
return "", errFoundQuestionMark
}
// check that this is going back to the same host and scheme
@@ -384,15 +417,15 @@ func (f *Fs) readDir(ctx context.Context, dir string) (names []string, err error
URL := f.url(dir)
u, err := url.Parse(URL)
if err != nil {
return nil, errors.Wrap(err, "failed to readDir")
return nil, fmt.Errorf("failed to readDir: %w", err)
}
if !strings.HasSuffix(URL, "/") {
return nil, errors.Errorf("internal error: readDir URL %q didn't end in /", URL)
return nil, fmt.Errorf("internal error: readDir URL %q didn't end in /", URL)
}
// Do the request
req, err := http.NewRequestWithContext(ctx, "GET", URL, nil)
if err != nil {
return nil, errors.Wrap(err, "readDir failed")
return nil, fmt.Errorf("readDir failed: %w", err)
}
f.addHeaders(req)
res, err := f.httpClient.Do(req)
@@ -404,7 +437,7 @@ func (f *Fs) readDir(ctx context.Context, dir string) (names []string, err error
}
err = statusError(res, err)
if err != nil {
return nil, errors.Wrap(err, "failed to readDir")
return nil, fmt.Errorf("failed to readDir: %w", err)
}
contentType := strings.SplitN(res.Header.Get("Content-Type"), ";", 2)[0]
@@ -412,10 +445,10 @@ func (f *Fs) readDir(ctx context.Context, dir string) (names []string, err error
case "text/html":
names, err = parse(u, res.Body)
if err != nil {
return nil, errors.Wrap(err, "readDir")
return nil, fmt.Errorf("readDir: %w", err)
}
default:
return nil, errors.Errorf("Can't parse content type %q", contentType)
return nil, fmt.Errorf("can't parse content type %q", contentType)
}
return names, nil
}
@@ -435,7 +468,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
}
names, err := f.readDir(ctx, dir)
if err != nil {
return nil, errors.Wrapf(err, "error listing %q", dir)
return nil, fmt.Errorf("error listing %q: %w", dir, err)
}
var (
entriesMu sync.Mutex // to protect entries
@@ -457,7 +490,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
fs: f,
remote: remote,
}
switch err := file.stat(ctx); err {
switch err := file.head(ctx); err {
case nil:
add(file)
case fs.ErrorNotAFile:
@@ -536,8 +569,8 @@ func (o *Object) url() string {
return o.fs.url(o.remote)
}
// stat updates the info field in the Object
func (o *Object) stat(ctx context.Context) error {
// head sends a HEAD request to update info fields in the Object
func (o *Object) head(ctx context.Context) error {
if o.fs.opt.NoHead {
o.size = -1
o.modTime = timeUnset
@@ -547,7 +580,7 @@ func (o *Object) stat(ctx context.Context) error {
url := o.url()
req, err := http.NewRequestWithContext(ctx, "HEAD", url, nil)
if err != nil {
return errors.Wrap(err, "stat failed")
return fmt.Errorf("stat failed: %w", err)
}
o.fs.addHeaders(req)
res, err := o.fs.httpClient.Do(req)
@@ -556,20 +589,26 @@ func (o *Object) stat(ctx context.Context) error {
}
err = statusError(res, err)
if err != nil {
return errors.Wrap(err, "failed to stat")
return fmt.Errorf("failed to stat: %w", err)
}
return o.decodeMetadata(ctx, res)
}
// decodeMetadata updates info fields in the Object according to HTTP response headers
func (o *Object) decodeMetadata(ctx context.Context, res *http.Response) error {
t, err := http.ParseTime(res.Header.Get("Last-Modified"))
if err != nil {
t = timeUnset
}
o.size = parseInt64(res.Header.Get("Content-Length"), -1)
o.modTime = t
o.contentType = res.Header.Get("Content-Type")
o.size = rest.ParseSizeFromHeaders(res.Header)
// If NoSlash is set then check ContentType to see if it is a directory
if o.fs.opt.NoSlash {
mediaType, _, err := mime.ParseMediaType(o.contentType)
if err != nil {
return errors.Wrapf(err, "failed to parse Content-Type: %q", o.contentType)
return fmt.Errorf("failed to parse Content-Type: %q: %w", o.contentType, err)
}
if mediaType == "text/html" {
return fs.ErrorNotAFile
@@ -595,7 +634,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
url := o.url()
req, err := http.NewRequestWithContext(ctx, "GET", url, nil)
if err != nil {
return nil, errors.Wrap(err, "Open failed")
return nil, fmt.Errorf("Open failed: %w", err)
}
// Add optional headers
@@ -608,7 +647,10 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
res, err := o.fs.httpClient.Do(req)
err = statusError(res, err)
if err != nil {
return nil, errors.Wrap(err, "Open failed")
return nil, fmt.Errorf("Open failed: %w", err)
}
if err = o.decodeMetadata(ctx, res); err != nil {
return nil, fmt.Errorf("decodeMetadata failed: %w", err)
}
return res.Body, nil
}

Some files were not shown because too many files have changed in this diff Show More