1
0
mirror of https://github.com/rclone/rclone.git synced 2025-12-06 00:03:32 +00:00

Compare commits

...

704 Commits

Author SHA1 Message Date
Nick Craig-Wood
77c7077458 Version v1.62.0 2023-03-14 12:42:23 +00:00
Nick Craig-Wood
ffd4ab222c docs: add idrive e2 as a major sponsor 2023-03-14 12:37:34 +00:00
Nick Craig-Wood
676277e255 docs: move FUSE-T docs from auto generated file to source file
Docs commited in wrong place in

c0a5283416 docs: rclone mount on macOS with macFUSE and  FUSE-T
2023-03-14 12:37:34 +00:00
Justin Winokur
c0a5283416 docs: rclone mount on macOS with macFUSE and FUSE-T 2023-03-13 10:55:39 +00:00
Nick Craig-Wood
e405ca7733 vfs: make uploaded files retain modtime with non-modtime backends
Before this change if a file was uploaded to a backend which didn't
support modtimes, the time of the file read after the upload had
completed would change to the time the file was uploaded on the
backend.

When using `--vfs-cache-mode writes` or `full` this time would be
different by the `--vfs-write-back` delay which would cause
applications to think the file had been modified.

This changes uses the last modification time read by the OS as a
virtual modtime for backends which don't support setting modtimes. It
does not change the modtime to that actually uploaded.

This means that as long as the file remains in the directory cache it
will have the expected modtime.

See: https://forum.rclone.org/t/saving-files-causes-wrong-modified-time-to-be-set-for-a-few-seconds-on-webdav-mount-with-bitrix24/36451
2023-03-10 15:00:01 +00:00
Nick Craig-Wood
580d72f0f6 operations: skip --max-delete tests on chunker integration tests
The recent changes to remove race conditions from --max-delete have
made these tests fail on chunker with s3 because they do copy then
delete and the deletes are being counted in the --max-delete(-size)
counts.
2023-03-10 12:13:44 +00:00
Nick Craig-Wood
22daeaa6f3 build: update dependencies
This fixes the azureblob backend so it builds again after the SDK
changes.

This doesn't update bazil.org/fuse because it doesn't build on FreeBSD

https://github.com/bazil/fuse/issues/295
2023-03-10 11:15:07 +00:00
Nick Craig-Wood
ca9ad7935a Add dependabot[bot] to contributors 2023-03-10 11:15:07 +00:00
Nick Craig-Wood
dd6e229327 move: if --check-first and --order-by are set then delete with perfect ordering
If using rclone move and --check-first and --order-by then rclone uses
the transfer routine to delete files to ensure perfect ordering.

This will cause the transfer stats to have a larger than expected
number of items in it so we don't enable this by default.

Fixes #6033
2023-03-10 08:23:32 +00:00
dependabot[bot]
4edcd16f5f build(deps): bump github.com/gdamore/tcell/v2 from 2.5.4 to 2.6.0
Bumps [github.com/gdamore/tcell/v2](https://github.com/gdamore/tcell) from 2.5.4 to 2.6.0.
- [Release notes](https://github.com/gdamore/tcell/releases)
- [Changelog](https://github.com/gdamore/tcell/blob/main/CHANGESv2.md)
- [Commits](https://github.com/gdamore/tcell/compare/v2.5.4...v2.6.0)

---
updated-dependencies:
- dependency-name: github.com/gdamore/tcell/v2
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2023-03-09 18:38:47 +00:00
dependabot[bot]
534e3acd06 build(deps): bump github.com/iguanesolutions/go-systemd/v5
Bumps [github.com/iguanesolutions/go-systemd/v5](https://github.com/iguanesolutions/go-systemd) from 5.1.0 to 5.1.1.
- [Release notes](https://github.com/iguanesolutions/go-systemd/releases)
- [Commits](https://github.com/iguanesolutions/go-systemd/compare/v5.1.0...v5.1.1)

---
updated-dependencies:
- dependency-name: github.com/iguanesolutions/go-systemd/v5
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2023-03-09 18:38:04 +00:00
dependabot[bot]
cf75ddabd3 build(deps): bump golang.org/x/term from 0.5.0 to 0.6.0
Bumps [golang.org/x/term](https://github.com/golang/term) from 0.5.0 to 0.6.0.
- [Release notes](https://github.com/golang/term/releases)
- [Commits](https://github.com/golang/term/compare/v0.5.0...v0.6.0)

---
updated-dependencies:
- dependency-name: golang.org/x/term
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2023-03-09 18:37:23 +00:00
dependabot[bot]
6edcacf932 build(deps): bump github.com/Azure/azure-sdk-for-go/sdk/azidentity
Bumps [github.com/Azure/azure-sdk-for-go/sdk/azidentity](https://github.com/Azure/azure-sdk-for-go) from 1.2.0 to 1.2.2.
- [Release notes](https://github.com/Azure/azure-sdk-for-go/releases)
- [Changelog](https://github.com/Azure/azure-sdk-for-go/blob/sdk/azidentity/v1.2.2/CHANGELOG.md)
- [Commits](https://github.com/Azure/azure-sdk-for-go/compare/v1.2...sdk/azidentity/v1.2.2)

---
updated-dependencies:
- dependency-name: github.com/Azure/azure-sdk-for-go/sdk/azidentity
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2023-03-09 18:36:23 +00:00
dependabot[bot]
51506a7ccd build(deps): bump github.com/Azure/azure-sdk-for-go/sdk/azcore
Bumps [github.com/Azure/azure-sdk-for-go/sdk/azcore](https://github.com/Azure/azure-sdk-for-go) from 1.3.0 to 1.4.0.
- [Release notes](https://github.com/Azure/azure-sdk-for-go/releases)
- [Changelog](https://github.com/Azure/azure-sdk-for-go/blob/main/documentation/release.md)
- [Commits](https://github.com/Azure/azure-sdk-for-go/compare/sdk/azcore/v1.3.0...sdk/azcore/v1.4.0)

---
updated-dependencies:
- dependency-name: github.com/Azure/azure-sdk-for-go/sdk/azcore
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2023-03-09 18:33:40 +00:00
Ryan Caezar Itang
a50fd2a2a2 ci: add dependabot 2023-03-09 15:05:15 +00:00
Ryan Caezar Itang
efac7e18fb ci: add winget releaser workflow 2023-03-09 14:56:37 +00:00
Ryan Caezar Itang
02dd8eacea docs: add winget installation method 2023-03-09 14:56:37 +00:00
Nick Craig-Wood
e2984227bb fs: fix race conditions in --max-delete and --max-delete-size 2023-03-09 09:25:31 +00:00
Nick Craig-Wood
a35ee30d9f Add Leandro Sacchet to contributors 2023-03-09 09:25:31 +00:00
Leandro Sacchet
f689db4422 fs: Add --max-delete-size a delete size threshold
Fixes #3329
2023-03-08 17:12:31 +00:00
Nick Craig-Wood
fb4600f6f9 tree: fix display of files with illegal Windows file system names
Before this change, files with illegal Windows names (eg those
containing \) would not be displayed properly in tree.

This change adds the local encoding to the Windows file names so \
will be displayed as its wide unicode equivalent.

See: https://forum.rclone.org/t/error-with-build-v1-61-1-tree-command-panic-runtime-error-invalid-memory-address-or-nil-pointer-dereference/35922/
2023-03-07 15:30:11 +00:00
Nick Craig-Wood
1d0c75b0c2 ftp: retry errors when initiating downloads
This adds a retry loop to the Open() call in the FTP server so it can
retry failures opening files.

This should make downloading multipart files more reliable.

See: https://forum.rclone.org/t/downloads-fail-from-remote-server-error-426-failure-writing-network-stream/33839/
2023-03-07 12:34:20 +00:00
Nick Craig-Wood
2e435af4de ftp: retry 426 errors
Before this change we didn't retry 426 errors which are

    426 Connection closed; transfer aborted.

Or in this particular case

    426 Failure writing network stream.

These seem like they might be temporary so retry them.

See: https://forum.rclone.org/t/downloads-fail-from-remote-server-error-426-failure-writing-network-stream/33839/
2023-03-07 12:34:20 +00:00
Nick Craig-Wood
62a7765e57 smb: allow SPN (service principal name) to be configured
This enables connection to clusters.

Fixes #6515
2023-03-07 12:18:32 +00:00
Nick Craig-Wood
5ad942ed87 local: fix exclusion of dangling symlinks with -L/--copy-links
Before this fix, a dangling symlink was erroring the sync. It was
writing an ERROR log and causing rclone to exit with an error. The
List method wasn't returning an error though.

This fix makes sure that we don't log or report a global error on a
file/directory that has been excluded.

This feature was first implemented in:

a61d219bc local: fix -L/--copy-links with filters missing directories

Then fixed in:

8d1fff9a8 local: obey file filters in listing to fix errors on excluded files

This commit also adds test cases for the failure modes of those commits.

See #6376
2023-03-07 12:15:10 +00:00
Nick Craig-Wood
96609e3d6e ftp: revert to upstream github.com/jlaffaye/ftp now fix is merged
This reverts to using the upstream now the patch to fix hang when
using ExplicitTLS to certain servers is merged.

Fixes #6426
2023-03-07 12:12:07 +00:00
Nick Craig-Wood
28a8ebce5b vfs: fix rename of directory containing files to be uploaded
Before this change, if you renamed a directory containg files yet to
be uploaded then deleted the directory the files would still be
uploaded.

This fixes the problem by changing the directory path in all the file
objects in a directory when it is renamed. This wasn't necessary until
we introduced virtual files and directories which lived beyond the
directory flush mechanism.

Fixes #6809
2023-03-07 11:40:50 +00:00
Nick Craig-Wood
17854663de vfs: log size of File and Dir in tests for optimization 2023-03-07 11:40:50 +00:00
Nick Craig-Wood
a4a6b5930a Add Peter Brunner to contributors 2023-03-07 11:40:50 +00:00
Nick Craig-Wood
e9ae620844 Add Ryan Caezar Itang to contributors 2023-03-07 11:40:50 +00:00
Nick Craig-Wood
e7cfb8ad8e Add Ninh Pham to contributors 2023-03-07 11:40:50 +00:00
Nick Craig-Wood
786a1c212c Add Peter Brunner to contributors 2023-03-07 11:40:50 +00:00
Peter Brunner
03bc270730 gcs: fix google cloud storage provider help 2023-03-07 11:39:02 +00:00
Ryan Caezar Itang
7cef042231 docs: add scoop installation method 2023-03-07 11:36:07 +00:00
Ninh Pham
1155cc0d3f drive: Make --drive-stop-on-upload-limit to respond to storageQuotaExceeded
Before this change, if a "--drive-stop-on-upload-limit" was set,
rclone would not stop the upload if a "storageQuotaExceeded" error occurred.

This fix now checks for the "storageQuotaExceeded" error
and "--drive-stop-on-upload-limit", and fails fast.
2023-03-07 11:00:08 +00:00
Peter Brunner
13c3f67ab0 gcs: add env_auth to pick up IAM credentials from env/instance
This change provides the ability to pass `env_auth` as a parameter to
the google cloud storage provider. This enables the provider to pull IAM
credentials from the environment or instance metadata. Previously if no
auth method was given it would default to requesting oauth.
2023-03-06 18:18:33 +00:00
Nick Craig-Wood
ab2cdd840f serve ftp: fix timestamps older than 1 year in listings
Fixes #6785
2023-03-06 15:59:56 +00:00
Nick Craig-Wood
143285e2b7 vfs: fix incorrect modtime on fs which don't support setting modtime
Before this change we were using the Precision literally to round the
precision of the mod times.

However fs.ModTimeNotSupported is 100y on backends which don't support
setting modtimes so rounding to 100y was producing very strange
results.

See: https://forum.rclone.org/t/saving-files-causes-wrong-modified-time-to-be-set-for-a-few-seconds-on-webdav-mount-with-bitrix24/36451/
2023-03-06 10:54:21 +00:00
Nick Craig-Wood
19e8c8d42a s3: make purge remove directory markers too
See: https://forum.rclone.org/t/cannot-purge-aws-s3/36169/
2023-03-03 15:51:00 +00:00
Nick Craig-Wood
de9c4a3611 s3: use bucket.Join instead of path.Join to preserve paths
Before this change, path.Join would remove the trailing / from objects
which had them. The simplified bucket.Join does not.
2023-03-03 15:51:00 +00:00
Nick Craig-Wood
d7ad13d929 bucket: add Join function for a simplified path.Join 2023-03-03 15:51:00 +00:00
albertony
f9d50f677d lib/terminal: enable windows console virtual terminal sequences processing (ANSI/VT100 colors)
This ensures the virtual terminal processing mode is enabled on the rclone process
for Windows 10 consoles (by using Windows Console API functions GetConsoleMode/SetConsoleMode
and flag ENABLE_VIRTUAL_TERMINAL_PROCESSING), which adds native support for ANSI/VT100
escape sequences. This mode is default in many cases, e.g. when using the Windows
Terminal application, but in other cases it is not, and the default can also be
controlled with registry setting (see below), and therefore configuring it on the process
seem to be the only reliable way of ensuring it is enabled when supported.

[HKEY_CURRENT_USER\Console]
"VirtualTerminalLevel"=dword:00000001
2023-03-03 12:37:01 +01:00
albertony
3641993fab tree: fix colored output on windows
Since rclone version 1.61.0 the tree command uses ANSI color sequences in output by
default, but this lead to issues in Windows terminals that were not handling these (#6668).

This commit ensures the tree command uses the terminal package for output. It relies on
go-colorable to properly handle ANSI color sequences: If stdout is connected to a terminal
the escape sequences are decoded and the text are written with color formatting using
Windows Console API. If stdout is not connected to a terminal, e.g. redirected to file,
the escape sequences are stripped off. The tree command has its own method for writing
directly to a file, specified with flag --output, and then the output is not passed
through the terminal package and must therefore be written without ansi codes.
2023-03-03 12:37:01 +01:00
Nick Craig-Wood
93d3ae04c7 deletefile: return error code 4 if file does not exist
Before this change `rclone deletefile` would return error code 1 if
the file it was trying to delete does not exist.

Rclone can't actually tell at this point whether the file doesn't
exist or what you tried to delete is a directory, but it seems more
logical to return error code 4 "object not found" here.

See: https://forum.rclone.org/t/rclone-deletefile-cmd-return-exit-code-1-when-file-not-found-in-remote-why-1-and-not-exit-code-4/
2023-03-03 09:51:23 +00:00
Nick Craig-Wood
e25e9fbf22 Add NodudeWasTaken to contributors 2023-03-03 09:51:23 +00:00
NodudeWasTaken
fe26d6116d mega: add --mega-use-https flag
Some ISPs throttle HTTP which MEGA uses by default, so some users may find using HTTPS beneficial.
2023-03-02 20:28:10 +00:00
Fred
06e1e18793 seafile: fix for flaky tests #6799 2023-03-02 20:03:25 +00:00
Nick Craig-Wood
23d17b76be onedrive: default onedrive personal to QuickXorHash
Before this change the hash used for Onedrive Personal was SHA1. From
July 2023 Microsoft is phasing out SHA1 hashes in favour of
QuickXorHash in Onedrive Personal. Onedrive Business and Sharepoint
remain using QuickXorHash as before.

This choice can be changed using the --onedrive-hash-type flag (and
config option) so that SHA1 can be selected while it is still
available in the transition period.

See: https://forum.rclone.org/t/microsoft-is-switching-onedrive-personal-to-quickxorhash-from-sha1/36296/
2023-03-02 19:32:35 +00:00
Nick Craig-Wood
dfe4e78a77 onedrive: add --onedrive-hash-type to change the hash in use
In preparation for Microsoft removing the SHA1 hash on OneDrive
Personal this allows the hash type to be set on OneDrive.

See: https://forum.rclone.org/t/microsoft-is-switching-onedrive-personal-to-quickxorhash-from-sha1/36296/
2023-03-02 19:32:35 +00:00
Nick Craig-Wood
59e7982040 s3: add --s3-sts-endpoint to specify STS endpoint
See: https://forum.rclone.org/t/s3-profile-failing-when-explicit-s3-endpoint-is-present/36063/
2023-03-02 09:56:09 +00:00
Nick Craig-Wood
c6b0587dc0 s3: fix AWS STS failing if --s3-endpoint is set
Before this change if an --s3-profile was set which used AWS STS (eg
to assume a role) and --s3-endpoint was set then rclone would use the
value from --s3-endpoint to contact the STS server which did not work.

This fix implements an endpoint resolver which only overrides the "s3"
service if --s3-endpoint is set. It sends the "sts" service (and any
other service) to the default resolver.

Fixes #6443
See: https://forum.rclone.org/t/s3-profile-failing-when-explicit-s3-endpoint-is-present/36063/
2023-03-01 16:24:40 +00:00
Nick Craig-Wood
9baa4d1c3c accounting: show checking tag if available even on transfers 2023-03-01 11:10:38 +00:00
Nick Craig-Wood
a5390dbbeb sync,operations: fix correct concurrency: use --checkers unless transferring files
There were some places (e.g. deleting files) where we were using
--transfers instead of --checkers to control the concurrency when
files weren't being transferred.

These have been updated to use --checkers.
2023-03-01 11:10:38 +00:00
Nick Craig-Wood
019a486d5b accounting: Make checkers show what they are doing
Before this change, all types of checkers showed "checking" after the
file name despite the fact that not all of them were checking.

After this change, they can show

- checking
- deleting
- hashing
- importing
- listing
- merging
- moving
- renaming

See: https://forum.rclone.org/t/what-is-rclone-checking-during-a-purge/35931/
2023-03-01 11:10:38 +00:00
Nick Craig-Wood
34ce11d2be Add ToBeFree to contributors 2023-03-01 11:10:38 +00:00
Nick Craig-Wood
88e8ede0aa Add Gerard Bosch to contributors 2023-03-01 11:10:38 +00:00
Nick Craig-Wood
f6f250c507 Add logopk to contributors 2023-03-01 11:10:38 +00:00
Nick Craig-Wood
2c45e901f0 Add Hunter Wittenborn to contributors 2023-03-01 11:10:38 +00:00
ToBeFree
9e1443799a docs: crypt: fix typo 2023-02-28 11:50:53 +00:00
Gerard Bosch
dd72aff98a docs: bisync: clarification of --resync 2023-02-28 11:47:28 +00:00
logopk
5039f9be48 docker: fix volume plugin does not remount volume on docker restart
docker volume plugin restoreState: skip fs option if empty

Fixes #6769
Co-authored-by: Peter Kreuser <logo@kreuser.name>
2023-02-28 11:29:07 +00:00
Hunter Wittenborn
56b582cdb9 authorize: add support for custom templates
This adds support for providing custom Go templates for use in the
`rclone authorize` command.

Fixes #6741
2023-02-24 15:08:38 +00:00
Aaron Gokaslan
745c0af571 all: Apply codeql fixes 2023-02-23 10:31:51 +00:00
Nick Craig-Wood
2dabbe83ac serve http: tests for --auth-proxy 2023-02-23 10:28:13 +00:00
Nick Craig-Wood
90561176fb Add Matthias Baur to contributors 2023-02-23 10:28:13 +00:00
Matthias Baur
a0b5d77427 serve http: support --auth-proxy 2023-02-22 14:55:24 +00:00
Manoj Ghosh
ce8b1cd861 oracle-object-storage: bring your own encryption keys 2023-02-21 14:45:02 +00:00
Manoj Ghosh
5bd6e3d1e9 fix vulnerablities: upgrade golang.org/x/net@v0.5.0 to golang.org/x/net@v0.7.0 2023-02-21 10:11:16 +00:00
Nick Craig-Wood
d4d7a6a55e sftp: fix uploads being 65% slower than they should be with crypt
The block size for crypt is 64k + a few bytes. The default block size
for sftp is 32k. This means that the blocks for crypt get split over 3
sftp packets two of 32k and one of a few bytes.

However due to a bug in pkg/sftp it was sending 32k instead of just a
few bytes, leading to the 65% slowdown.

This was fixed in the upstream library.

This bug probably affected transfers from over the network sources
also.

Fixes #6763
See: https://github.com/pkg/sftp/pull/537
2023-02-14 15:47:19 +00:00
Nick Craig-Wood
b3e0672535 s3: Check multipart upload ETag when --s3-no-head is in use
Before this change if --s3-no-head was in use rclone didn't check the
multipart upload ETag at all. However the ETag is returned in the
final POST request when completing the object.

This change uses that ETag from the final POST if --s3-no-head is in
use, otherwise it uses the ETag from a fresh HEAD request.

See: https://forum.rclone.org/t/in-some-cases-rclone-does-not-use-etag-to-verify-files/36095/
2023-02-14 12:04:28 +00:00
Nick Craig-Wood
a407437e92 Add Simmon Li (he/him) to contributors 2023-02-14 12:04:28 +00:00
Manoj Ghosh
0164a4e686 add more documentation around oci authentication ways 2023-02-14 11:58:38 +00:00
Simmon Li (he/him)
b8ea79042c docs: drive: make clear "testing" apps have short token grant time 2023-02-13 14:30:20 +00:00
albertony
49a6533bc1 docs/mount: improve explanation of windows filesystem permissions 2023-02-10 23:21:33 +01:00
Nick Craig-Wood
21459f3cc0 tree: fix nil pointer exception on stat failure
This fixes the crash by updating the upstream.

See: https://forum.rclone.org/t/error-with-build-v1-61-1-tree-command-panic-runtime-error-invalid-memory-address-or-nil-pointer-dereference/35922/
See: https://github.com/a8m/tree/pull/21
2023-02-08 16:21:25 +00:00
albertony
04f7e52803 accounting: show human readable elapsed time when longer than a day - fixes #6748 2023-02-06 15:02:03 +01:00
Kaloyan Raev
25535e5eac storj: update satellite urls and labels
The docs and setup wizard still contained deprecated URLs and labels of
Storj satellites. This change updates them.
2023-02-06 13:18:15 +00:00
Nick Craig-Wood
c37b6b1a43 cache: fix lint error in latest golangci-lint 2023-02-06 10:44:40 +00:00
albertony
0328878e46 accounting: limit length of ETA string
No need to report hours, minutes, and even seconds when the
ETA is several years, e.g. "292y24w3d23h47m16s". Now only
reports the 3 most significant units, sacrificing precision,
e.g. "292y24w3d", "24w3d23h", "3d23h47m", "23h47m16s".

Fixes #6381
2023-02-04 17:29:08 +01:00
albertony
67132ecaec accounting: avoid negative ETA values for very slow speeds
Integer overflow would lead to ETA such as "-255y7w4h11m22s966ms",
as reported in #6381. Now the value will be clipped at the maximum
"292y24w3d23h47m16s", and it will be shown as infinity.
2023-02-04 17:29:08 +01:00
albertony
120cfcde70 install.sh: fix arm-v6 download 2023-02-04 13:32:26 +01:00
albertony
37db2a0e44 selfupdate: consider arm version 2023-02-04 13:32:26 +01:00
albertony
f92816899c version: report arm version 2023-02-04 13:32:26 +01:00
albertony
5386ffc8f2 build: correct building for ARMv5 and ARMv6
Explicitly set ARM version in GOARM build variable, to avoid relying
on some default value which differs when compiling natively and when
cross-compiling, and which is also incorrectly documented as being
6 when in reality it is 5.

Fix incorrect labelling of ARMv5 builds as ARMv6, and change
architecture of .rpm and .deb packages containing them to
match.

Add ARMv6 builds, to complement existing ARMv5 and ARMv7, and to
reduce disruption due to previous ARMv5 builds incorrectly being
identified as ARMv6, and to provide .rpm and .deb packages with the
same ARMv6 architectures as was previously also published
(then containing ARMv5 binaries).

See #6528

Background info:

https://github.com/golang/go/wiki/GoArm
https://go.dev/doc/install/source#environment
661e931dd1/src/cmd/dist/build.go (L140-L144)
661e931dd1/src/cmd/dist/util.go (L392-L422)
2023-02-04 13:32:26 +01:00
Anagh Kumar Baranwal
3898d534f3 build: update to go1.20
Signed-off-by: Anagh Kumar Baranwal <6824881+darthShadow@users.noreply.github.com>
2023-02-03 20:15:15 +00:00
Ole Frost
34333d9fa8 docs: added troubleshooting tips for Live Photos in OneDrive 2023-02-03 16:24:30 +00:00
Ole Frost
14e852ee9d s3: fix incorrect tier support for StorJ and IDrive when pointing at a file
Fixes #6734
2023-02-02 18:12:00 +00:00
albertony
37623732c6 build: avoid running workflow twice for pull requests with branch on main repo 2023-02-01 16:47:38 +01:00
Nick Craig-Wood
adbcc83fa5 filter: emit INFO message when can't work out directory filters
See: https://forum.rclone.org/t/rclone-scans-unwanted-folder/34437
2023-02-01 14:21:45 +00:00
Nick Craig-Wood
d4ea6632ca drive: note that --drive-acknowledge-abuse needs SA Manager permission
See: https://github.com/rclone/rclone/issues/2338#issuecomment-762820600
See: https://forum.rclone.org/t/bisync-already-add-drive-acknowledge-abuse-still-got-critical-error-cannotdownloadabusivefile/35604/
2023-02-01 12:11:46 +00:00
Nick Craig-Wood
21849fd0d9 webdav: fix interop with davrods server
The davrods server returns URLS with a double / in and the // confuses
rclone into thinking these files are in a directory called "".

The fix removes leading /s from the directory listing names.

See: https://forum.rclone.org/t/upload-to-webdav-does-not-check-if-files-already-exist/35756/
2023-02-01 12:00:25 +00:00
Nick Craig-Wood
ac20ee41ca Add happyxhw to contributors 2023-02-01 12:00:25 +00:00
happyxhw
d376fb1df2 smb: check smb connection is closed - fixes #6735 2023-02-01 08:25:25 +01:00
Nick Craig-Wood
8e63a08d7f docs: note that we have test Android builds 2023-01-31 14:11:50 +00:00
Nick Craig-Wood
3aee5b3c55 Add Simmon Li (he/him) to contributors 2023-01-31 14:11:50 +00:00
Nick Craig-Wood
0145d98314 Add LXY to contributors 2023-01-31 14:11:50 +00:00
Nick Craig-Wood
4c03c71a5f Add Bryan Kaplan to contributors 2023-01-31 14:11:50 +00:00
Simmon Li (he/him)
82e2801aae update drive.md
* Updates OAuth consent screen instructions to include adding scopes for backup purposes (create, edit and delete files).
* Updates instructions to keep app in testing mode (appropriate for most users). The previous instructions suggested this, but we don't need to "publish" the app at all in order to proceed with this step.
2023-01-27 15:25:17 +00:00
LXY
dc5d5de35c onedrive: improve speed of quickxorhash
This commits ports a fast C-implementation from https://github.com/namazso/QuickXorHash

It uses new crypto/subtle code from go1.20 to avoid the use of unsafe.

Typical speedups are about 25x  when using go1.20

    goos: linux
    goarch: amd64
    cpu: Intel(R) Celeron(R) N5105 @ 2.00GHz
    QuickXorHash-Before  2.49ms   422MB/s ±11%   100.00%
    QuickXorHash-Subtle  87.9µs 11932MB/s ± 5% +2730.83% + 42.17%

Co-Author: @namazso
2023-01-26 11:50:12 +00:00
Bryan Kaplan
41cc4530f3 docs: Improve bisync check-access & check-filename
This commit documents my learnings after having encountered a failure
I reported in the rclone forum[0].

I may be a fool for having failed to understand the previous
documentation, but I am likely not the only fool to get snared by it.

This commit therefore adds details to clarify what the user must do in
order to allow `--check-access` to succeed.

While at it, I've also added some basic documentation for `--check-filename`.

[0]: https://forum.rclone.org/t/bisync-check-file-check-failed/35682
2023-01-26 11:10:01 +00:00
albertony
c5acb10151 fspath: allow the symbols at and plus in remote names - fixes #6710 2023-01-25 13:37:24 +01:00
Manoj Ghosh
8c8ee9905c oracleobjectstorage: speed up operations by using S3 pacer and setting minsleep to 10ms
Uploading 100 files of each 1 MB took 20 seconds before. With above fix it takes around 2 seconds now.

10x time improvement in line with pacer's sleep reduction from 100ms to 10ms
2023-01-25 10:48:16 +00:00
albertony
e2afd00118 mount: avoid incorrect or premature overlap check on windows
See: #6234
2023-01-24 22:27:02 +01:00
albertony
5b82576dbf build: fix condition for manual workflow run
See #5275
2023-01-24 20:46:33 +01:00
albertony
b9d9f9edb0 docs: use --interactive instead of -i in examples to avoid confusion 2023-01-24 20:43:51 +01:00
Bryan Kaplan
c40b706186 docs: Fix link in bisync doc
This commit fixes the `#check-access` anchor link in the bisync.md document.

`#check-access-option` does not exist in bisync.md; `#check-access` does.
2023-01-24 09:16:43 +01:00
Nick Craig-Wood
351fc609b1 b2: fix uploading files bigger than 1TiB
Before this change when uploading files bigger than 1TiB, the chunk
calculator would work out that the chunk size needed to be bigger than
the default 100 MiB to fit within the 10,000 parts limit.

However the uploader was still using the memory pool for the old chunk
size and this caused errors like

    panic: runtime error: slice bounds out of range [:122683392] with capacity 100663296

The fix for this is to make a temporary pool with the larger chunk
size and use it during the upload of the large file.

See: https://forum.rclone.org/t/rclone-cannot-complete-upload-to-b2-restarts-upload-frequently/35617/
2023-01-22 12:46:23 +00:00
Nick Craig-Wood
a6f6a9dcdf mount,mount2,cmount: fix --allow-non-empty #3562
Since version 3 of fuse libfuse no longer does anything when given the
nonempty option and it's default is to allow mounting over non empty
directories like normal mount does.

Some versions of libfuse give an error when using `--allow-non-empty`
which is annoying for the user.

We now do this check ourselves so we no longer need to pass the option
to libfuse.

Fixes #3562
2023-01-20 15:39:54 +00:00
Nick Craig-Wood
267a09001d mount: fix check for empty mount point on Linux #3562 2023-01-20 15:39:54 +00:00
Nick Craig-Wood
37db2abecd Add alankrit to contributors 2023-01-20 15:39:49 +00:00
albertony
0272d44192 mount: do not treat \\?\ prefixed paths as network share paths on windows
See: #6234
2023-01-20 15:40:03 +01:00
alankrit
6b17044f8e fs:Added multiple ca certificate support. 2023-01-17 12:16:11 +00:00
Nick Craig-Wood
844e8fb8bd lib/errors: add support for unwrapping go1.20 multi errors 2023-01-17 11:35:19 +00:00
Nick Craig-Wood
ca9182d6ae Add IMTheNachoMan to contributors 2023-01-17 11:35:19 +00:00
IMTheNachoMan
ec20c48523 googlephotos: fix grammar in docs (#6699) 2023-01-16 13:40:30 +01:00
Nick Craig-Wood
ec68b72387 lib/file: fix error message test after go1.20 upgrade 2023-01-16 11:19:16 +00:00
Nick Craig-Wood
2d1c2725e4 webdav: fix tests after go1.20 upgrade
Before this change we were sending webdav requests to the go http
FileServer. In go1.20 these (rightly) started returning errors which
caused the tests to fail.

The test has been changed to properly mock up an About query and
response so an end to end test of adding headers is possible.
2023-01-16 11:19:16 +00:00
Nick Craig-Wood
1680c5af8f build: update to go1.20rc3 and make go1.17 the minimum required version 2023-01-16 11:19:16 +00:00
Nick Craig-Wood
88c0d78639 build: update to fuse3 after bazil.org/fuse update 2023-01-16 11:19:16 +00:00
Nick Craig-Wood
559157cb58 azureblob: remove workarounds for SDK bugs after v0.6.1 update 2023-01-16 11:19:16 +00:00
Nick Craig-Wood
10bf8a769e build: update dependencies
This fixes the azureblob backend so it builds again after the SDK
changes.
2023-01-16 11:19:16 +00:00
Fred
f31ab6d178 seafile: renew library password - fixes #6662
Passwords for encrypted libraries are kept in memory in the server
and flushed after an hour.
This MR fixes an issue when the library password expires after 1 hour.
2023-01-15 16:26:29 +00:00
Kaloyan Raev
f08bb5bf66 storj: implement purge 2023-01-15 16:23:49 +00:00
Manoj Ghosh
e2886aaddf oracle-object-storage: expose the storage_tier option in config 2023-01-15 16:20:55 +00:00
albertony
71227986db docs: remove link to nonexistent uploadfile command - fixes #6693 2023-01-12 20:13:02 +01:00
Nick Craig-Wood
8c6ff1fa7e cmount: fix creating and renaming files on case insensitive backends
Before this fix, we told cgofuse/WinFSP that the backend was case
insensitive but didn't implement the Getpath backend function to
return the normalised case of a file.

Resently cgofuse started implementing case insensitive files properly
but since we hadn't implemented Getpath, the file names were taking
the default of all in UPPER CASE.

This patch implements Getpath for cgofuse which fixes the case
problems.

This problem came to light when we upgraded cgofuse and WinFSP (to
1.12) which had the code to implement Getpath.

Fixes #6682
2023-01-11 17:21:57 +00:00
Nick Craig-Wood
9d1b786a39 Add Kaloyan Raev to contributors 2023-01-11 17:21:57 +00:00
Nick Craig-Wood
8ee0e2efb1 Add piyushgarg to contributors 2023-01-11 17:21:57 +00:00
Alex Chen
d66f5e8db0 lib/oauthutil: handle fatal errors better
PR #6678
2023-01-12 00:50:14 +08:00
Ole Frost
02d6d28ec4 crypt: fix for unencrypted directory names on case insensitive remotes
rclone sync erroneously deleted folders renamed to a different case on
crypts where directory name encryption was disabled and the underlying
remote was case insensitive.

Example: Renaming the folder Test to tEST before a sync to a crypt having
remote=OneDrive:crypt and directory_name_encryption=false could result in
the folder and all its content being deleted. The following sync would
correctly create the tEST folder and upload all of the content.

Additional tests have revealed other potential issues when using
filename_encryption=off or directory_name_encryption=false on case
insensitive remotes. The documentation has been updated to warn about
potential problems when using these combinations.
2023-01-11 16:32:40 +00:00
Kaloyan Raev
1cafc12e8c storj: implement public link 2023-01-10 17:40:04 +00:00
piyushgarg
98fa93f6d1 webdav: Document Mapping/Accessing WebDAV shares on windows.
Fixes #6596

Co-authored-by: Piyush <piyushgarg80>
2022-12-30 11:22:46 +00:00
albertony
c6c67a29eb Add Marks Polakovs to contributors 2022-12-26 18:39:49 +01:00
Marks Polakovs
ad5395e953 backend/local: fix %!w(<nil>) in "failed to read directory" error 2022-12-26 18:37:32 +01:00
Nick Craig-Wood
1925ceaade Changelog updates from Version v1.61.1 2022-12-23 18:26:56 +00:00
Nick Craig-Wood
8aebf12797 docs: fix unescaped HTML 2022-12-23 16:53:43 +00:00
Nick Craig-Wood
ffeefe8a56 crypt: obey --ignore-checksum
Before this change the crypt backend would calculate and check upload
checksums regardless of the setting of --ignore-checksum.
2022-12-23 16:52:19 +00:00
Nick Craig-Wood
81ce5e4961 docs: correct RELEASE procedure for stable branch 2022-12-23 12:34:04 +00:00
Nick Craig-Wood
638058ef91 lib/http: shutdown all servers on exit to remove unix socket
Before this change only serve http was Shutting down its server which
was causing other servers such as serve restic to leave behind their
unix sockets.

This change moves the finalisation to lib/http so all servers have it
and removes it from serve http.

Fixes #6648
2022-12-23 12:28:07 +00:00
Nick Craig-Wood
b1b62f70d3 serve webdav: fix running duplicate Serve call
Before this change we were starting the server twice for webdav which
is inefficient and causes problems at exit.
2022-12-23 12:28:07 +00:00
Nick Craig-Wood
823d89af9a serve restic: don't serve via http if serving via --stdio
Before this change, we started the http listener even if --stdio was
supplied.

This also moves the log message so the user won't see the serving via
HTTP message unless they are really using that.

Fixes #6646
2022-12-23 12:28:07 +00:00
Nick Craig-Wood
448fff9a04 serve restic: fix immediate exit when not using stdio
In the lib/http refactor

    52443c2444 restic: refactor to use lib/http

We forgot to serve the data and wait for the server to finish. This is
not tested in the unit tests as it is part of the command line
handler.

Fixes #6644 Fixes #6647
2022-12-23 12:28:07 +00:00
Nick Craig-Wood
6257a6035c serve webdav: fix --baseurl handling after lib/http refactor
The webdav library was confused by the Path manipulation done by
lib/http when stripping the prefix.

This patch adds the prefix back before calling it.

Fixes #6650
2022-12-23 12:28:07 +00:00
Nick Craig-Wood
54c0f17f2a azureblob: fix "409 Public access is not permitted on this storage account"
This error was caused by rclone supplying an empty
`x-ms-blob-public-access:` header when creating a container for
private access, rather than omitting it completely.

This is a valid way of specifying containers should be private, but if
the storage account has the flag "Blob public access" unset then it
gives "409 Public access is not permitted on this storage account".

This patch fixes the problem by only supplying the header if the
access is set.

Fixes #6645
2022-12-23 12:28:07 +00:00
Kaloyan Raev
d049cbb59e s3/storj: update endpoints
Storj switched to a single global s3 endpoint backed by a BGP routing.
We want to stop advertizing the former regional endpoints and have the
global one as the only option.
2022-12-22 15:46:49 +00:00
Anagh Kumar Baranwal
00e853144e rc: set url to the first value of rc-addr since it has been converted to an array of strings now -- fixes #6641
Signed-off-by: Anagh Kumar Baranwal <6824881+darthShadow@users.noreply.github.com>
2022-12-22 09:02:20 +00:00
albertony
5ac8cfee56 docs: show only significant parts of version number in version introduced label 2022-12-21 12:41:47 +00:00
Nick Craig-Wood
496ae8adf6 Start v1.62.0-DEV development 2022-12-20 18:33:59 +00:00
Nick Craig-Wood
2001cc0831 Version v1.61.0 2022-12-20 17:16:14 +00:00
Ole Frost
a35490bf70 docs: Added note on Box API rate limits 2022-12-20 12:49:31 +00:00
Nick Craig-Wood
01877e5a0f s3: ignore versionIDs from uploads unless using --s3-versions or --s3-versions-at
Before this change, when a new object was created s3 returns its
versionID (on a versioned bucket) and rclone recorded it in the
object.

This means that when rclone came to delete the object it would delete
it with the versionID.

However it is common to forbid actions with versionIDs on buckets so
as to preserve the historical record and these operations would fail
whereas they succeeded in pre-v1.60.0 versions.

This patch fixes the problem by not recording versions of objects
supplied by the S3 API on upload unless `--s3-versions` or
`--s3-version-at` is used. This makes rclone behave as it did before
v1.60.0 when version support was introduced.

See: https://forum.rclone.org/t/s3-and-intermittent-403-errors-with-file-renames-and-drag-and-drop-operations-in-windows-explorer/34773
2022-12-17 10:24:56 +00:00
Nick Craig-Wood
614d79121a serve dlna: fix panic: Logger uninitialized.
Before this change we forgot to initialize the logger for the dlna
server. This meant when it needed to log something, it paniced
instead.

See: https://forum.rclone.org/t/rclone-serve-dlna-after-few-hours-of-idle-running-panic-logger-uninitialized-names/34835
2022-12-17 10:23:58 +00:00
Nick Craig-Wood
3a6f1f5cd7 filter: add metadata filters --metadata-include/exclude/filter and friends
Fixes #6353
2022-12-17 10:21:11 +00:00
Nick Craig-Wood
4a31961c4f filter: factor rules into its own file 2022-12-16 17:05:31 +00:00
Abdullah Saglam
7be9855a70 azureblob: implement --use-server-modtime
This patch implements --use-server-modtime for the Azureblob backend.

It does this by not reading the time from the metadata if the global
flag is set.
2022-12-15 15:58:36 +00:00
Nick Craig-Wood
6f8112ff67 Add Abdullah Saglam to contributors 2022-12-15 15:58:36 +00:00
Nick Craig-Wood
67fc227684 config: add config/setpath for setting config path via rc/librclone 2022-12-15 12:41:30 +00:00
Nick Craig-Wood
7edb4c0162 sftp: fix NewObject with leading /
This was breaking the use of operations/stat with remote with an
initial /

See: https://forum.rclone.org/t/rclone-rc-api-operations-stat-is-not-working-for-sftp-remotes/34560
2022-12-15 12:40:59 +00:00
Nick Craig-Wood
5db4493557 lib/http: fix race condition 2022-12-15 12:38:09 +00:00
Nick Craig-Wood
a85c0b0cc2 cmd/serve/httplib: remove as it is now replaced by lib/http 2022-12-15 12:38:09 +00:00
Nolan Woods
52443c2444 restic: refactor to use lib/http
Co-authored-by: Nick Craig-Wood <nick@craig-wood.com>
2022-12-15 12:38:09 +00:00
Nick Craig-Wood
4444d2d102 serve webdav: refactor to use lib/http 2022-12-15 12:38:09 +00:00
Nick Craig-Wood
08a1ca434b rcd: refactor rclone rc server to use lib/http 2022-12-15 12:38:09 +00:00
Nick Craig-Wood
a9ce86f9a3 lib/http: add UsingAuth method 2022-12-15 12:38:09 +00:00
Nick Craig-Wood
3167292c2f lib/http: remove unused Template from Config 2022-12-15 12:38:09 +00:00
Tom Mombourquette
ec7cc2b3c3 lib/http: Simplify server.go to export an http server rather than an interface
This also makes the implementation public.
2022-12-15 12:38:09 +00:00
Tom Mombourquette
2a2fcf1012 lib/http: rationalise names in test servers to be more consistent 2022-12-15 12:38:09 +00:00
Tom Mombourquette
6d62267227 serve http: support unix sockets and multiple listners
- add support for unix sockets (which skip the auth).
- add support for multiple listeners
- collapse unnecessary internal structure of lib/http so it can all be
  imported together
- moves files in sub directories of lib/http into the main lib/http
  directory and reworks the code that uses them.

See: https://forum.rclone.org/t/wip-rc-rcd-over-unix-socket/33619
Fixes: #6605
2022-12-15 12:38:09 +00:00
Nick Craig-Wood
dfd8ad2fff Add compiletest target to compile all the tests only 2022-12-15 12:38:09 +00:00
Nick Craig-Wood
43506f8086 test memory: read metadata if -M flag is specified 2022-12-15 12:37:19 +00:00
Nick Craig-Wood
ec3cee89d3 fstest: switch to port forwarding now Owncloud disallows wildcards
A recent security fix in the Owncloud container now causes it to
disallow wildcards in the OWNCLOUD_TRUSTED_DOMAINS setting.

This patch works around the problem by using port forwarding from the
host so we can keep the domain name constant.
2022-12-15 11:34:12 +00:00
Nick Craig-Wood
a171497a8b Add Jack to contributors 2022-12-15 11:34:12 +00:00
Jack
c6ad15e3b8 s3: make DigitalOcean name canonical 2022-12-14 16:35:05 +00:00
Jack
9a81885b51 s3: add DigitalOcean Spaces regions sfo3, fra1, syd1 2022-12-14 16:35:05 +00:00
Nick Craig-Wood
3d291da0f6 azureblob: fix directory marker detection after SDK upgrade
When the SDK was upgraded it started delivering metadata where the
keys were not in lower case as per the old SDK.

Rclone normalises the case of the keys for storage in the Object, but
the directory marker check was being done with the unnormalised keys
as it needs to be done before the Object is created.

This fixes the directory marker check to do a case insensitive compare
of the metadata keys.
2022-12-14 14:24:26 +00:00
Nick Craig-Wood
43bf177ff7 s3: fix excess memory usage when using versions
Before this change, we were taking the version ID straight from the
XML blob returned by the SDK and thus pinning the XML into memory
which bulked up the average memory per object from about 400 bytes to
4k.

Copying the string fixes the excess memory usage.
2022-12-14 14:24:26 +00:00
Nick Craig-Wood
c446651be8 Revert "s3: turn off list v2 support for Alibaba OSS since it does not work"
This reverts commit 4f386a1ccd.

It turns out that Alibaba OSS does support list v2 and the detection
code was wrong.

This means that users of the gov version of Alibaba will have to add
`list_version 1` to their config files.

See #6600
2022-12-14 14:24:26 +00:00
Nick Craig-Wood
6c407dbe15 s3: fix detection of listing routines which don't support v2 properly
In this commit

ab849b3613 s3: fix listing loop when using v2 listing on v1 server

The ContinuationToken was tested for existence, but it is the
NextContinuationToken that we are interested in.

See: #6600
2022-12-14 14:24:26 +00:00
albertony
5a59b49b6b drive: handle shared drives with leading/trailing space in name (related to #6618) 2022-12-14 10:18:12 +01:00
albertony
8b9f3bbe29 fspath: improved detection of illegal remote names starting with dash (related to #4261) 2022-12-14 10:18:12 +01:00
albertony
8e6a469f98 fspath: allow unicode numbers and letters in remote names
Previously it was limited to plain ASCII (0-9, A-Z, a-z).

Implemented by adding \p{L}\p{N} alongside the \w in the regex,
even though these overlap it means we can be sure it is 100%
backwards compatible.

Fixes #6618
2022-12-12 13:24:32 +00:00
albertony
f650a543ef docs: remote names may not start or end with space 2022-12-12 13:24:32 +00:00
albertony
683178a1f4 fspath: change remote name regex to not match when leading/trailing space 2022-12-12 13:24:32 +00:00
albertony
3937233e1e fspath: refactor away unnecessary constant for remote name regex 2022-12-12 13:24:32 +00:00
albertony
c571200812 fspath: remove unused capture group in remote name regex 2022-12-12 13:24:32 +00:00
albertony
04a663829b fspath: remove duplicate start-of-line anchor in remote name regex 2022-12-12 13:24:32 +00:00
albertony
6b4a2c1c4e fspath: remove superfluous underscore covered by existing word character class in remote name regex 2022-12-12 13:24:32 +00:00
albertony
f73be767a4 fspath: add unit tests for remote names with leading dash 2022-12-12 13:24:32 +00:00
albertony
4120dffcc1 fspath: add unit tests for remote names with space 2022-12-12 13:24:32 +00:00
Nick Craig-Wood
53ff5bb205 build: Update golang.org/x/net/http2 to fix GO-2022-1144
An attacker can cause excessive memory growth in a Go server accepting
HTTP/2 requests. HTTP/2 server connections contain a cache of HTTP
header keys sent by the client. While the total number of entries in
this cache is capped, an attacker sending very large keys can cause
the server to allocate approximately 64 MiB per open connection.
2022-12-12 12:49:12 +00:00
Nick Craig-Wood
397f428c48 Add vanplus to contributors 2022-12-12 12:49:12 +00:00
vanplus
c5a2c9b046 onedrive: document workaround for shared with me files 2022-12-12 12:04:28 +00:00
Kaloyan Raev
b98d7f6634 storj: implement server side Copy 2022-12-12 12:02:38 +00:00
Ole Frost
beea4d5119 lib/oauthutil: Improved usability of config flows needing web browser
The config question "Use auto config?" confused many users and lead to
recurring forum posts from users that were unaware that they were using
a remote or headless machine.

This commit makes the question and possible options more descriptive
and precise.

This commit also adds references to the guide on remote setup in the
documentation of backends using oauth as primary authentication.
2022-12-09 14:41:05 +00:00
Eng Zer Jun
8e507075d1 test: replace defer cleanup with t.Cleanup
Reference: https://pkg.go.dev/testing#T.Cleanup
Signed-off-by: Eng Zer Jun <engzerjun@gmail.com>
2022-12-09 14:38:05 +00:00
Nick Craig-Wood
be783a1856 dlna: properly attribute code used from https://github.com/anacrolix/dms
Fixes #4101
2022-12-09 14:27:10 +00:00
Nick Craig-Wood
450c366403 s3: fix nil pointer exception when using Versions
This was caused by

a9bd0c8de6 s3: reduce memory consumption for s3 objects

Which assumed that the StorageClass would always be set, but it isn't
set for Versions.
2022-12-09 12:23:51 +00:00
Matthew Vernon
1dbdc48a77 WASM: comply with wasm_exec.js licence terms
The BSD-style license that Go uses requires the license to be included
with the source distribution; so add it as LICENSE.wasmexec (to avoid
confusion with the other licenses in rclone) and note the location of
the license in wasm_exec.js itself.
2022-12-07 15:25:46 +00:00
Nick Craig-Wood
d7cb17848d azureblob: revamp authentication to include all methods and docs
The updates the authentication to include

- Auth from the environment
    1. Environment Variables
    2. Managed Service Identity Credentials
    3. Azure CLI credentials (as used by the az tool)
- Account and Shared Key
- SAS URL
- Service principal with client secret
- Service principal with certificate
- User with username and password
- Managed Service Identity Credentials

And rationalises the auth order.
2022-12-06 15:07:01 +00:00
Nick Craig-Wood
f3c8b7a948 azureblob: add --azureblob-no-check-container to assume container exists
Normally rclone will check the container exists before uploading if it
hasn't listed the container yet.

Often rclone will be running with a limited set of permissions which
means rclone can't create the container anyway, so this stops the
check.

This will save a transaction.
2022-12-06 15:07:01 +00:00
Nick Craig-Wood
914fbe242c azureblob: ignore AuthorizationFailure when trying to create a create a container
If we get AuthorizationFailure when trying to create a container, then
assume the container has already been created
2022-12-06 15:07:01 +00:00
Nick Craig-Wood
f746b2fe85 azureblob: port old authentication methods to new SDK
Co-authored-by: Brad Ackerman <brad@facefault.org>
2022-12-06 15:07:01 +00:00
Nick Craig-Wood
a131da2c35 azureblob: Port to new SDK
This commit switches from using the old Azure go modules

    github.com/Azure/azure-pipeline-go/pipeline
    github.com/Azure/azure-storage-blob-go/azblob
    github.com/Azure/go-autorest/autorest/adal

To the new SDK

    github.com/Azure/azure-sdk-for-go/

This stops rclone using deprecated code and enables the full range of
authentication with Azure.

See #6132 and #5284
2022-12-06 15:07:01 +00:00
Nick Craig-Wood
60e4cb6f6f Add MohammadReza to contributors 2022-12-06 15:06:51 +00:00
MohammadReza
0a8b1fe5de s3: add Liara LOS to provider list 2022-12-06 12:25:23 +00:00
asdffdsazqqq
b24c83db21 restic: fix typo in docs 'remove' should be 'remote' 2022-12-06 12:14:25 +00:00
Nick Craig-Wood
4f386a1ccd s3: turn off list v2 support for Alibaba OSS since it does not work
See: #6600
2022-12-06 12:11:21 +00:00
Nick Craig-Wood
ab849b3613 s3: fix listing loop when using v2 listing on v1 server
Before this change, rclone would enter a listing loop if it used v2
listing on a v1 server and the list exceeded 1000 items.

This change detects the problem and gives the user a helpful message.

Fixes #6600
2022-12-06 12:11:21 +00:00
Nick Craig-Wood
10aee3926a Add Kevin Verstaen to contributors 2022-12-06 12:11:21 +00:00
Nick Craig-Wood
4583b61e3d Add Erik Agterdenbos to contributors 2022-12-06 12:11:06 +00:00
Nick Craig-Wood
483e9e1ee3 Add ycdtosa to contributors 2022-12-06 12:11:06 +00:00
Kevin Verstaen
c2dfc3e5b3 fs: Add global flag '--color' to control terminal colors
* fs: add TerminalColorMode type
* fs: add new config(flags) for TerminalColorMode
* lib/terminal: use TerminalColorMode to determine how to handle colors
* Add documentation for '--terminal-color-mode'
* tree: remove obsolete --color replaced by global --color

This changes the default behaviour of tree. It now displays colors by
default instead of only displaying them when the flag -C/--color was
active. Old behaviour (no color) can be achieved by setting --color to
'never'.

Fixes: #6604
2022-12-06 12:07:06 +00:00
Erik Agterdenbos
a9bd0c8de6 s3: reduce memory consumption for s3 objects
Copying the storageClass string instead of using a pointer to the original string.
This prevents the Go garbage collector from keeping large amounts of
XMLNode structs and references in memory, created by xmlutil.XMLToStruct()
from the aws-sdk-go.
2022-12-05 23:07:08 +00:00
Anthony Pessy
1628ca0d46 ftp: Improve performance to speed up --files-from and NewObject
This commit uses the MLST command (where available) to get the status
for single files rather than listing the parent directory and looking
for the file. This makes actions such as using `--files-from` much quicker.

* use getEntry to lookup remote files when supported
*  findItem now expects the full path directly

It makes the expected argument similar to the getInfo method, the
difference now is that one is returning a FileInfo whereas
the other is returning an ftp Entry.

Fixes #6225

Co-authored-by: Nick Craig-Wood <nick@craig-wood.com>
2022-12-05 16:19:04 +00:00
albertony
313493d51b docs: remove minimum versions from command pages of pre v1 commands 2022-12-03 18:58:55 +01:00
albertony
6d18f60725 docs: add minimum versions to the command pages 2022-12-03 18:58:55 +01:00
albertony
d74662a751 docs: add badge showing version introduced and experimental/beta/deprecated status to command doc pages 2022-12-03 18:58:55 +01:00
albertony
d05fd2a14f docs: add badge for experimental/beta/deprecated status next to version in backend docs 2022-12-03 18:58:55 +01:00
albertony
097be753ab docs: minor cleanup of headers in backend docs 2022-12-03 18:58:55 +01:00
ycdtosa
50c9678cea ftp: update help text of implicit/explicit TLS options to refer to FTPS instead of FTP 2022-11-29 14:58:46 +01:00
eNV25
7672cde4f3 cmd/ncdu: use negative values for key runes
The previous version used values after the maximum Unicode code-point
to encode a key. This could lead to an overflow since a key is a int16,
a rune is int32 and the maximum Unicode code-point is larger than int16.

A better solution is to simply use negative runes for keys.
2022-11-28 10:51:11 +00:00
eNV25
a4c65532ea cmd/ncdu: use tcell directly instead of the termbox wrapper
Following up on 36add0af, which switched from termbox
to tcell's termbox wrapper.
2022-11-25 14:42:19 +00:00
Nick Craig-Wood
46b080c092 vfs: Fix IO Error opening a file with O_CREATE|O_RDONLY in --vfs-cache-mode not full
Before this fix, opening a file with `O_CREATE|O_RDONLY` caused an IO error to
be returned when using `--vfs-cache-mode off` or `--vfs-cache-mode writes`.

This was because the file was opened with read intent, but the `O_CREATE`
implies write intent to create the file even though the file is opened
`O_RDONLY`.

This fix sets write intent for the file if `O_CREATE` is passed in which fixes
the problem for all the VFS cache modes.

It also extends the exhaustive open flags testing to `--vfs-cache-mode writes`
as well as `--vfs-cache-mode full` which would have caught this problem.

See: https://forum.rclone.org/t/i-o-error-trashing-file-on-sftp-mount/34317/
2022-11-24 17:04:36 +00:00
Nick Craig-Wood
0edf6478e3 Add Nathaniel Wesley Filardo to contributors 2022-11-24 17:04:36 +00:00
Nathaniel Wesley Filardo
f7cdf318db azureblob: support simple "environment credentials"
As per
https://learn.microsoft.com/en-us/dotnet/api/azure.identity.environmentcredential?view=azure-dotnet

This supports only AZURE_CLIENT_SECRET-based authentication, as with the
existing service principal support.

Co-authored-by: Nick Craig-Wood <nick@craig-wood.com>
2022-11-24 12:06:14 +00:00
Nathaniel Wesley Filardo
6f3682c12f azureblob: make newServicePrincipalTokenRefresher take parsed principal structure 2022-11-24 12:06:14 +00:00
Nick Craig-Wood
e3d593d40c build: update dependencies 2022-11-24 11:05:54 +00:00
Nick Craig-Wood
83551bb02e cmount: update cgofuse for FUSE-T support for mounting volumes on Mac
See: https://forum.rclone.org/t/fr-fuse-t-support-for-mounting-volumes-on-mac/33110/
2022-11-24 10:51:16 +00:00
Nick Craig-Wood
430bf0d5eb crypt: fix compress wrapping crypt giving upload errors
Before this fix a chain compress -> crypt -> s3 was giving errors

    BadDigest: The Content-MD5 you specified did not match what we received.

This was because the crypt backend was encrypting the underlying local
object to calculate the hash rather than the contents of the metadata
stream.

It did this because the crypt backend incorrectly identified the
object as a local object.

This fixes the problem by making sure the crypt backend does not
unwrap anything but fs.OverrideRemote objects.

See: https://forum.rclone.org/t/not-encrypting-or-compressing-before-upload/32261/10
2022-11-21 08:02:09 +00:00
Nick Craig-Wood
dd71f5d968 fs: move operations.NewOverrideRemote to fs.NewOverrideRemote 2022-11-21 08:02:09 +00:00
albertony
7db1c506f2 smb: fix issue where spurious dot directory is created 2022-11-20 17:12:02 +00:00
Nick Craig-Wood
959cd938bc docs: Add minimum versions to all the backend pages and some of the other pages 2022-11-18 14:41:24 +00:00
Nick Craig-Wood
03b07c280c Changelog updates from Version v1.60.1 2022-11-17 16:32:25 +00:00
Nick Craig-Wood
705e8f2fe0 smb: fix Failed to sync: context canceled at the end of syncs
Before this change we were putting connections into the connection
pool which had a local context in.

This meant that when the operation had finished the context was
cancelled and the connection became unusable.

See: https://forum.rclone.org/t/failed-to-sync-context-canceled/34017/
2022-11-16 10:55:25 +00:00
Nick Craig-Wood
591fc3609a vfs: fix deadlock caused by cache cleaner and upload finishing
Before this patch a deadlock could occur if the cache cleaner was
running when an object upload finished.

This fixes the problem by delaying marking the object as clean until
we have notified the VFS layer. This means that the cache cleaner
won't consider the object until **after** the VFS layer has been
notified, thus avoiding the deadlock.

See: https://forum.rclone.org/t/rclone-mount-deadlock-when-dir-cache-time-strikes/33486/
2022-11-15 18:01:36 +00:00
Nick Craig-Wood
b4a3d1b9ed Add asdffdsazqqq to contributors 2022-11-15 18:01:36 +00:00
asdffdsazqqq
84219b95ab docs: faq: how to use a proxy server that requires a username and password - fixes #6565 2022-11-15 17:58:43 +00:00
Nick Craig-Wood
2c78f56d48 webdav: fix Move/Copy/DirMove when using -server-side-across-configs
Before this change, when using -server-side-across-configs rclone
would direct Move/Copy/DirMove to the destination server.

However this should be directed to the source server. This is a little
unclear in the RFC, but the name of the parameter "Destination:" seems
clear and this is how dCache and Rucio have implemented it.

See: https://forum.rclone.org/t/webdav-copy-request-implemented-incorrectly/34072/
2022-11-15 09:51:30 +00:00
Nick Craig-Wood
a61d219bcd local: fix -L/--copy-links with filters missing directories
In this commit

8d1fff9a82 local: obey file filters in listing to fix errors on excluded files

We introduced the concept of local backend filters.

Unfortunately the filters were being applied before we had resolved
the symlink to point to a directory. This meant that symlinks pointing
to directories were filtered out when they shouldn't have been.

This was fixed by moving the filter check until after the symlink had
been resolved.

See: https://forum.rclone.org/t/copy-links-not-following-symlinks-on-1-60-0/34073/7
2022-11-14 18:03:40 +00:00
Nick Craig-Wood
652d3cdee4 vfs: windows: fix slow opening of exe files by not truncating files when not necessary
Before this change we truncated files in the backing store regardless
of whether we needed to or not.

After, we check to see if the file is the right size and don't
truncate if it is.

Apparently Windows Defender likes to check executables each time they
are modified, and truncating a file to its existing size is enough to
trigger the Windows Defender scan. This was causing a big slowdown for
operations which opened and closed the file a lot, such as looking at
properties on an executable.

See: https://forum.rclone.org/t/for-mount-sftp-why-right-click-on-exe-file-is-so-slow-until-it-freezes/33830
2022-11-14 17:05:51 +00:00
Nick Craig-Wood
bb1fc5b86d Add Kamui to contributors 2022-11-14 17:05:51 +00:00
Kamui
efd3c6449b rcserver: avoid generating default credentials with htpasswd - fixes #4839 2022-11-14 15:26:44 +00:00
Nick Craig-Wood
0ac5795f8c fs: make all duration flags take y, M, w, d etc suffixes
Fixes #6556
2022-11-14 15:13:49 +00:00
Nick Craig-Wood
2f77651f64 Add rkettelerij to contributors 2022-11-14 15:13:49 +00:00
Nick Craig-Wood
8daacc2b99 Add techknowlogick to contributors 2022-11-14 15:13:49 +00:00
rkettelerij
87fa9f8e46 azureblob: Add support for custom upload headers 2022-11-14 15:12:28 +00:00
albertony
1392793334 sftp: auto-detect shell type for fish
Fish is different from POSIX-based Unix shells such as bash,
and a bracketed variable references like we use for the
auto-detection echo command is not supported. The command
will return with zero exit code but produce no output on
stdout. There is a message on stderr, but we don't log it
due to the zero exit code:

fish: Variables cannot be bracketed. In fish, please use {$ShellId}.

Fixes #6552
2022-11-11 15:32:44 +00:00
techknowlogick
0e427216db s3: Add additional Wasabi locations 2022-11-11 14:39:12 +00:00
Anagh Kumar Baranwal
0c56c46523 rc: Add commands to set GC Percent & Memory Limit (1.19+)
Signed-off-by: Anagh Kumar Baranwal <6824881+darthShadow@users.noreply.github.com>
2022-11-10 12:07:18 +00:00
Nick Craig-Wood
617c5d5e1b rcat: preserve metadata when Copy falls back to Rcat
Before this change if we copied files of unknown size, then they lost
their metadata.

This was particularly noticeable using --s3-decompress.

This change adds metadata to Rcat and RcatSized and changes Copy to
pass the metadata in when it calls Rcat for an unknown sized input.

Fixes #6546
2022-11-10 12:04:35 +00:00
Nick Craig-Wood
ec2024b907 fstest: use WithMetadata / WithMimeType 2022-11-10 12:04:35 +00:00
Nick Craig-Wood
458845ce89 fs/object: add WithMetadata and WithMimetype to static and memory objects 2022-11-10 12:04:35 +00:00
Nick Craig-Wood
57bde20acd Add Aaron Gokaslan to contributors 2022-11-10 12:04:35 +00:00
Aaron Gokaslan
b0248e8070 s3: fix for unchecked err value in s3 listv2 2022-11-10 11:52:59 +00:00
Nick Craig-Wood
b285efb476 mailru: allow timestamps to be before the epoch 1970-01-01
Fixes #6547
2022-11-10 11:27:01 +00:00
Nick Craig-Wood
be6f29930b dedupe: make dedupe obey the filters
See: https://forum.rclone.org/t/dial-tcp-lookup-api-pcloud-com-no-such-host/33910/
2022-11-10 09:56:02 +00:00
Nick Craig-Wood
653bc23728 dedupe: count Checks in the stats while scanning for duplicates
This allows the user to see rclone has not hung.

See: https://forum.rclone.org/t/dial-tcp-lookup-api-pcloud-com-no-such-host/33910/
2022-11-10 09:56:02 +00:00
Nick Craig-Wood
47b04580db accounting: make it so we can account directories as well as files 2022-11-10 09:56:02 +00:00
Nick Craig-Wood
919e28b8bf lib/cache: fix alias backend shutting down too soon
Before this patch, when an alias backend was created it would be
renamed to be canonical and in the process Shutdown would be called on
it. This was particularly noticeable with the dropbox backend which
gave this error when uploading files after the backend was Shutdown.

    Failed to copy: upload failed: batcher is shutting down

This patch fixes the cache Rename code not to finalize objects if the
object that is being overwritten is the same as the existing object.

See: https://forum.rclone.org/t/upload-failed-batcher-is-shutting-down/33900
2022-11-09 16:29:23 +00:00
Nick Craig-Wood
3a3bc5a1ae mailru: note that an app password is now needed - fixes #6398 2022-11-08 20:33:11 +00:00
Nick Craig-Wood
133c006c37 Add Roel Arents to contributors 2022-11-08 20:33:11 +00:00
Roel Arents
e455940f71 azureblob: allow emulator account/key override 2022-11-08 20:24:06 +00:00
Nick Craig-Wood
65528fd009 docs: remove link to rclone slack as it is no longer supported 2022-11-08 16:11:34 +00:00
Nick Craig-Wood
691159fe94 s3: allow Storj to server side copy since it seems to work now - fixes #6550 2022-11-08 16:05:24 +00:00
Nick Craig-Wood
09858c0c5a Add Arnie97 to contributors 2022-11-08 16:05:24 +00:00
Nick Craig-Wood
5fd0abb2b9 Add x3-apptech to contributors 2022-11-08 16:05:24 +00:00
Arnie97
36c37ffec1 backend/http: rename stat to decodeMetadata 2022-11-08 13:04:17 +00:00
Arnie97
6a5b7664f7 backend/http: support content-range response header 2022-11-08 13:04:17 +00:00
Arnie97
ebac854512 backend/http: do not update object size based on range requests 2022-11-08 13:04:17 +00:00
Arnie97
cafce96185 backend/http: parse get responses when no_head is set 2022-11-08 13:04:17 +00:00
João Henrique Franco
92ffcf9f86 wasm: fix walltime link error by adding up-to-date wasm_exec.js
Solves link error while running rclone's wasm version. Go's `walltime1` function was renamed to `walltime`. This commit updates wasm_exec.js with the new name.
2022-11-07 12:13:23 +00:00
albertony
64cdbb67b5 ncdu: add support for modification time 2022-11-07 11:57:44 +00:00
albertony
528fc899fb ncdu: fallback to sort by name also for sort by average size 2022-11-07 11:57:44 +00:00
x3-apptech
d452f502c3 cmd: Enable SIGINFO (Ctrl-T) handler on FreeBSD, NetBSD, OpenBSD and Dragonfly BSD 2022-11-07 11:45:04 +00:00
albertony
5d6b8141ec Replace deprecated ioutil
As of Go 1.16, the same functionality is now provided by package io or
package os, and those implementations should be preferred in new code.
2022-11-07 11:41:47 +00:00
albertony
776e5ea83a docs: fix character that was incorrectly interpreted as markdown 2022-11-07 08:59:40 +01:00
albertony
c9acc06a49 Add Clément Notin to contributors 2022-11-07 08:51:49 +01:00
Clément Notin
a2dca02594 docs: fix character that was incorrectly interpreted as markdown 2022-11-07 08:50:21 +01:00
Joda Stößer
210331bf61 docs: fix typo remove in rclone_serve_restic command 2022-11-07 08:46:05 +01:00
Nick Craig-Wood
5b5fdc6bc5 s3: add provider quirk --s3-might-gzip to fix corrupted on transfer: sizes differ
Before this change, some files were giving this error when downloaded
from Cloudflare and other providers.

    ERROR corrupted on transfer: sizes differ NNN vs MMM

This is because these providers auto gzips the object when rclone
wasn't expecting it to. (AWS does not gzip objects without their being
uploaded gzipped).

This patch adds a quirk to for fix the problem and a flag to control
it. The quirk `might_gzip` is set to `true` for all providers except
AWS.

See: https://forum.rclone.org/t/s3-error-corrupted-on-transfer-sizes-differ-nnn-vs-mmm/33694/
Fixes: #6533
2022-11-04 16:53:32 +00:00
Nick Craig-Wood
0de74864b6 Add dgouju to contributors 2022-11-04 16:53:32 +00:00
dgouju
7042a11875 sftp: add configuration options to set ssh Ciphers / MACs / KeyExchange 2022-11-03 17:11:28 +00:00
Nick Craig-Wood
028832ce73 s3: if bucket or object ACL is empty string then don't add X-Amz-Acl: header - fixes #5730
Before this fix it was impossible to stop rclone generating an
X-Amx-Acl: header which is incompatible with GCS with uniform access
control and is generally deprecated at AWS.
2022-11-03 17:06:24 +00:00
Philip Harvey
c7c9356af5 s3: stop setting object and bucket ACL to "private" if it is an empty string #5730 2022-11-03 17:06:24 +00:00
Nick Craig-Wood
3292c112c5 Add Philip Harvey to contributors 2022-11-03 17:06:24 +00:00
Nick Craig-Wood
126d71b332 Add Anthony Pessy to contributors 2022-11-03 17:06:24 +00:00
Nick Craig-Wood
df9be72a82 Add coultonluke to contributors 2022-11-03 17:06:24 +00:00
Nick Craig-Wood
6aa8f7409a Add Samuel Johnson to contributors 2022-11-03 17:06:24 +00:00
Anthony Pessy
10c884552c s3: use different strategy to resolve s3 region
The API endpoint GetBucketLocation requires
top level permission.

If we do an authenticated head request to a bucket, the bucket location will be returned in the HTTP headers.

Fixes #5066
2022-11-02 11:48:08 +00:00
albertony
2617610741 docs: add direct download link for windows arm64 2022-10-31 21:14:10 +01:00
coultonluke
53dd174f3d docs: corrected download links in windows install docs 2022-10-31 21:09:53 +01:00
albertony
65987f5970 lib/file: improve error message for create dir on non-existent network host on windows (#6420) 2022-10-28 21:00:22 +02:00
Manoj Ghosh
1fc864fb32 oracle-object-storage: doc fix
See #6521
2022-10-28 20:32:17 +02:00
albertony
22abcc9fd2 build: update golang.org/x/net dependency
This fixes vulnerability GO-2022-0969 reported by govulncheck:

HTTP/2 server connections can hang forever waiting for a clean
shutdown that was preempted by a fatal error. This condition can
be exploited by a malicious client to cause a denial of service.

Call stacks in your code:
Error: cmd/serve/restic/restic.go:150:22: github.com/rclone/rclone/cmd/serve/restic.init$1$1 calls golang.org/x/net/http2.Server.ServeConn

Found in: golang.org/x/net/http2@v0.0.0-20220805013720-a33c5aa5df48
Fixed in: golang.org/x/net/http2@v0.0.0-20220906165146-f3363e06e74c
More info: https://pkg.go.dev/vuln/GO-2022-0969
2022-10-26 12:59:31 +02:00
albertony
178cf821de build: add vulnerability testing using govulncheck 2022-10-26 12:59:31 +02:00
albertony
f4a571786c local: clean absolute paths - fixes #6493 2022-10-25 21:09:56 +02:00
albertony
c0a8ffcbef build: setup-go v3 improved semver notation 2022-10-25 20:25:39 +02:00
albertony
76eeca9eae build: setup-go v3 dropped the stable input 2022-10-25 20:25:39 +02:00
Samuel Johnson
8114744bce docs: Update faq.md with bisync
Updated FAQ to clarify that experimental bi-sync is now available.
2022-10-23 11:15:09 +01:00
Nick Craig-Wood
db5d582404 Start v1.61.0-DEV development 2022-10-21 16:15:53 +01:00
Nick Craig-Wood
01dbbff62e Version v1.60.0 2022-10-21 15:06:08 +01:00
Nick Craig-Wood
afa61e702c docs: remove hosted by tag as server has moved 2022-10-21 12:49:10 +01:00
Nick Craig-Wood
546dc82793 Add Robert Newson to contributors 2022-10-21 12:49:10 +01:00
Nick Craig-Wood
d9c4d95ab3 Add Tom Mombourquette to contributors 2022-10-21 12:49:10 +01:00
Nick Craig-Wood
0fb1b75a02 Add Manoj Ghosh to contributors 2022-10-21 12:49:10 +01:00
Tom Mombourquette
38f1f5b177 rc: Fix mount/listmounts not returning the full Fs entered in mount/mount 2022-10-21 12:48:27 +01:00
Ole Frost
0d2a62a927 docs: Describe connection strings in alias backend 2022-10-21 12:47:51 +01:00
Manoj Ghosh
b75c207208 oracle-object-storage: overview, docs update 2022-10-21 12:47:03 +01:00
Ole Frost
dff223f195 install.sh: fix arm-v7 download 2022-10-21 12:35:58 +01:00
Robert Newson
d2fef05fe4 httplib: Add --xxx-min-tls-version option to select minimum tls values for HTTP servers
This allows administrators to disable TLS 1.0 and 1.1, for example.

Example:

rclone rcd --rc-min-tls-version=tls1.2 --rc-cert <cert> --rc-key <key>
2022-10-19 17:13:12 +01:00
Tom Mombourquette
188b9f8cf1 rc: corrected mount/unmountall help msg and title 2022-10-17 17:34:48 +01:00
Manoj Ghosh
daf3162bcf oracle-object-storage: minor docs update 2022-10-17 17:08:44 +01:00
Nick Craig-Wood
5e59e7f442 ftp: Fix hang when using ExplicitTLS to certain servers.
It was discovered that doing the tls Handshake immediately on
connection causes some FTP servers (proftpd and pureftpd) to hang.

This imports a fix for it by temporarily hard forking jlaffaye/ftp to
include the fix submitted as a pull request.

See: https://forum.rclone.org/t/rclone-ftps-explicit-rclone-touch-empty-files-proftpd-unable-to-build-data-connection-operation-not-permitted/22522
See: https://github.com/rclone/rclone/issues/6426#issuecomment-1243993039
See: https://github.com/jlaffaye/ftp/pull/283
See: https://github.com/jlaffaye/ftp/issues/282
2022-10-14 12:10:03 +01:00
Nick Craig-Wood
fce22c0065 s3: add --s3-no-system-metadata to suppress read and write of system metadata
See: https://forum.rclone.org/t/problems-with-content-disposition-and-backblaze-b2-using-s3/33292/
2022-10-14 11:12:04 +01:00
Nick Craig-Wood
bb3272e837 Add Bachue Zhou to contributors 2022-10-14 11:11:56 +01:00
Nick Craig-Wood
cb5b5635c7 Add Manoj Ghosh to contributors 2022-10-14 11:11:56 +01:00
Bachue Zhou
66ed0ca726 s3: add Qiniu KODO to s3 provider list - fixes #6195 2022-10-13 15:49:22 +01:00
Manoj Ghosh
b16e50851a Add a native backend for oracle object storage - fixes #6299 2022-10-13 13:04:56 +01:00
Nick Craig-Wood
90d23139f6 s3: drop binary metadata with an ERROR message
Before this change, rclone would attempt to upload metadata with
binary contents which fail to be uploaded by net/http.

This checks the keys and values for validity as http header values
before uploading.

See: https://forum.rclone.org/t/invalid-metadata-key-names-result-in-a-failure-to-transfer-xattr-results-in-failure-to-upload-net-http-invalid-header-field-value-for-x-amz-meta-samba-pai/33406/
2022-10-13 12:00:45 +01:00
Nick Craig-Wood
5ea9398b63 swift: add --swift-no-large-objects to reduce HEAD requests
Supplying the flag --swift-no-large-objects is a promise to the swift
backend that there are no dynamic or static large objects stored.

Using that knowledge rclone can speed its operations up reducing the
number of HEAD requests.

See: https://forum.rclone.org/t/handling-or-not-of-large-objects-in-swift/33389/
See: https://forum.rclone.org/t/swift-sync-checksum-calls-head-on-every-object-so-is-very-slow/30322
2022-10-13 11:58:19 +01:00
Isaac Aymerich
3f804224f4 rc: validate Daemon option is not set when mounting a volume via RC - fixes #6469 2022-10-12 12:07:48 +01:00
Nick Craig-Wood
cf0bf159ab s3: try to keep the maximum precision in ModTime with --user-server-modtime
Before this change if --user-server-modtime was in use the ModTime
could change for an object as we receive it accurate to the nearest ms
in listings, but only accurate to the nearest second in HEAD and GET
requests.

Normally AWS returns the milliseconds as .000 in listings, but if
versions are in use it may not. Storj S3 also seems to return
milliseconds.

This patch tries to keep the maximum precision in the last modified
time, so it doesn't update a last modified time with a truncated
version if the times were the same to the nearest second.

See: https://forum.rclone.org/t/cache-fingerprint-miss-behavior-leading-to-false-positive-stalen-cache/33404/
2022-10-12 09:18:10 +01:00
Lesmiscore
6654b66114 union: propagate SlowHash feature 2022-10-10 07:58:01 +01:00
Nick Craig-Wood
9bf78d0373 local: fix "Failed to read metadata: function not implemented" on old Linux kernels
Before this change rclone used statx() to read the metadata for files
from the local filesystem when `-M` was in use.

Unfortunately statx() was only introduced in kernel 4.11 which was
released in April 2017 so there are current systems (eg Centos 7)
still on kernel versions which don't support statx().

This patch checks to see if statx() is available and if it isn't, it
falls back to using fstatat() which was introduced in Linux 2.6.16
which is guaranteed for all Go versions.

See: https://forum.rclone.org/t/metadata-from-linux-local-s3-failed-to-copy-failed-to-read-metadata-from-source-object-function-not-implemented/33233/
2022-10-07 14:14:16 +01:00
Nick Craig-Wood
0c1fb8b2b7 Add YanceyChiew to contributors 2022-10-07 14:14:08 +01:00
YanceyChiew
966654e23a dlna: run assets_generate to make new icons 2022-10-06 16:59:51 +01:00
YanceyChiew
13b65104eb dlna: add SSDP AnnounceInterval flag option
The current default AnnounceInterval is too short, causing the
multicast domain to be flooded with NOTIFY announcements,
which may prevent other dlna devices from sleeping.

This change allows users to set the announcement interval,
and it's default value also increased to 12 minutes.

Even within the interval, rclone can still passively respond to
M-SEARCH requests from other devices.
2022-10-06 16:59:51 +01:00
YanceyChiew
4a35aff33c dlna: add verification of addresses
Verify the http service listening address and the SSDP server
announcement address to prevent accidental listening of IPv6 addresses
that do not support dlna yet and may be globally accessible.

Unlistened addresses on the interface will also be filtered out of the
SSDP announcement to avoid misleading other services in the multicast domain.
2022-10-06 16:59:51 +01:00
YanceyChiew
09b6d939f5 dlna: add support for more external subtitle 2022-10-06 16:59:51 +01:00
Nick Craig-Wood
4e79de106a hubic: remove backend as service has now shutdown - fixes #6481 2022-10-05 13:33:37 +01:00
Nick Craig-Wood
b437d9461a Add Isaac Aymerich to contributors 2022-10-05 13:33:29 +01:00
Nick Craig-Wood
910af597a1 Add Lorenzo Milesi to contributors 2022-10-05 13:33:29 +01:00
Nick Craig-Wood
c10965ecfb Add Dimitri Papadopoulos Orfanos to contributors 2022-10-05 13:33:29 +01:00
albertony
5efb880772 Remove LICENSE 2022-10-04 15:40:37 +02:00
albertony
6c3b7d5820 Create LICENSE 2022-10-04 15:38:58 +02:00
Isaac Aymerich
c5109408c0 rc: handle external unmount when mounting
Before this change, if the a mount was created via the rc but unmounted
externally with `fusermount -u` say, rclone would still believe the mount
was active when it wasn't.
2022-10-03 11:24:58 +01:00
Marco Molteni
a3c06b9bbe docs/content: remove duplicate Scaleway C14 Glacier
Scaleway S3/C14 is now called S3/Glacier. Since Glacier is already
mentioned in the Rclone Scaleway section, let's just remove this
entry from here.
2022-10-02 21:58:16 +01:00
Lesmiscore
2aa264b33c smb: backend to support SMB - fixes #2042 2022-09-30 16:10:57 +01:00
albertony
4e078765f9 docs: improve description of make command in install docs 2022-09-28 16:14:12 +02:00
albertony
7fbc928a19 docs: remove "After" in systemd mount example
See #6459
2022-09-26 19:14:10 +02:00
Lorenzo Milesi
27096323db docs: remove "After" in automount example
According to [systemd.automount](https://www.freedesktop.org/software/systemd/man/systemd.automount.html) manual

> Note that automount units are separate from the mount itself, so you should 
> not set After= or Requires= for mount dependencies here. 
> For example, you should not set After=network-online.target or 
> similar on network filesystems. Doing so may result in an ordering cycle.
2022-09-26 19:11:29 +02:00
Dimitri Papadopoulos Orfanos
7e547822d6 build: update GitHub actions to latest versions 2022-09-19 19:51:07 +01:00
Nick Craig-Wood
67625b1dbd ftp: increase timeouts on tests as they were failing locally 2022-09-19 19:45:52 +01:00
Nick Craig-Wood
88086643f7 ftp: adapt to library changes to fix connection errors #6426
In https://github.com/jlaffaye/ftp/commit/212daf295f the upstream FTP
library changed the way adding your own dialer works which meant that
connections when using explicit FTP were failing.

This patch reworks our connection code to bring it into the
expectations of the library.
2022-09-18 11:31:11 +01:00
Nick Craig-Wood
5f13d84135 compress: add extra debugging in case we have a repeat of #6434 2022-09-18 11:31:11 +01:00
Nick Craig-Wood
07efdb55fa compress: fix error handling to not use or return nil objects #6434 2022-09-18 11:31:11 +01:00
Nick Craig-Wood
fb6ddd680c compress: fix crash due to nil metadata #6434
Before this fix, if an error ocurred reading the metadata, it could be
set as nil and then used, causing a crash.

This fix changes the readMetadata function so it returns an error, and
the error is always set if the metadata returned is nil.
2022-09-18 11:31:11 +01:00
Nick Craig-Wood
bc09105d2e Add Richard Bateman to contributors 2022-09-18 11:31:11 +01:00
Richard Bateman
4f374bc264 s3: add --s3-sse-customer-key-base64 to supply keys with binary data
Fixes #6400
2022-09-17 17:28:44 +01:00
Nick Craig-Wood
1c99661d8c onedrive: disable change notify in China region since it is not supported
Fixes #6444
2022-09-16 16:57:29 +01:00
Nick Craig-Wood
04b54bbb1e Add Alexander Knorr to contributors 2022-09-16 16:57:23 +01:00
Nick Craig-Wood
90cda2d6c2 Add Dmitry Deniskin to contributors 2022-09-16 16:57:23 +01:00
Nick Craig-Wood
dbd9ce78e6 Add Øyvind Heddeland Instefjord to contributors 2022-09-16 16:57:23 +01:00
Nick Craig-Wood
cbc18e2693 docs: update install docs to make more consistent
This also adds repology badges where appropriate to show versions in
external repositories.
2022-09-16 16:56:00 +01:00
Alexander Knorr
67c675d7ad docs: add cholateley package manager to install instructions 2022-09-15 16:12:39 +01:00
Dmitry Deniskin
c080b39e47 s3: add support for IONOS Cloud Storage 2022-09-15 16:04:34 +01:00
Nick Craig-Wood
8504da496b Changelog updates from Version v1.59.2 2022-09-15 11:57:07 +01:00
Lesmiscore
67240bd541 sftp: fix directory creation races
If mkdir fails then before this change it would have thrown an
error.

After this change, if the error indicated that the directory
already exists then the error is not returned to the user.

This fixes a race condition when two rclone threads are trying to
create the same directory.
2022-09-14 16:45:35 +01:00
albertony
6ce0168ba5 docs: better alignment of icons
Fixes issue with spacing between icon and text in backend docs headers.

This reverts the changes from PR #5889 and #5701, which aligned menu/dropdown items when
icons have different sizes, and implements an alternative fix which gives slightly better
results, and also is more of a native Font Awesome solution:

Font Awesome icons are designed on grid and share a consistent height. But they vary in
width depending on how wide or narrow each symbol is. If you prefer to work with icons
that have a consistent width, adding fa-fw will render each icon using the same width.
2022-09-14 12:19:57 +01:00
albertony
67f5f04a77 build: fix lint option max-issues-per-linter 2022-09-14 12:11:54 +01:00
Øyvind Heddeland Instefjord
91f8894285 ftp: Add force_list_hidden option
Forces the use of `LIST -a` command
when listing a directory which should
list all hidden folders and files.
2022-09-14 12:10:58 +01:00
partev
655d63b4fd docs: fix a typo: aftering -> after 2022-09-14 11:14:32 +01:00
Nick Craig-Wood
d3d843a11d fs: warn the user when using a remote name without a colon
A very common mistake for new users of rclone is to use a remote name
without a colon. This can be on the command line or in the config when
setting up a crypt backend.

This change checks to see if the user uses a path which matches a
remote name and gives an NOTICE like this if they do

    NOTICE: "remote" refers to a local folder, use "remote:" to refer to your remote or "./remote" to hide this warning

See: https://forum.rclone.org/t/sync-to-onedrive-personal-lands-file-in-localfilesystem-but-not-in-onedrive/32956
2022-09-13 18:06:19 +01:00
Nick Craig-Wood
57803bee22 build: update tidy-beta to new layout 2022-09-12 20:32:17 +01:00
Nick Craig-Wood
be53dcc9c9 docs: add more information about --track-renames
See: https://forum.rclone.org/t/feature-question-how-does-rclone-track-renames-and-moves/32911/4
2022-09-12 11:54:35 +01:00
Nick Craig-Wood
bd787e8f45 filter: Fix incorrect filtering with UseFilter context flag and wrapping backends
In this commit

8d1fff9a82 local: obey file filters in listing to fix errors on excluded files

We started using filters in the local backend so the user could short
circuit troublesome files/directories at a low level.

However this caused a number of integration tests to fail. This turned
out to be in backends wrapping the local backend. For example the
combine backend test failed because it changes the paths passed to the
local backend so they no longer match the paths in the current filter.

To fix this, a new feature flag `FilterAware` was added and the
UseFilter context flag is only passed to backends which support it. As
the wrapping backends don't support the flag, this fixes the problems
in the integration tests.

In future the wrapping backends could modify the active filters to
match the path modifications and then they could set the FilterAware
flag.

See #6376
2022-09-05 16:19:50 +01:00
Nick Craig-Wood
3cb7734eac config: move locking to fix fatal error: concurrent map read and map write
Before this change we assumed that github.com/Unknwon/goconfig was
threadsafe as documented.

However it turns out it is not threadsafe and looking at the code it
appears that making it threadsafe might be quite hard.

So this change increases the lock coverage in configfile to cover the
goconfig uses also.

Fixes #6378
2022-09-05 12:11:06 +01:00
Nick Craig-Wood
d08ed7d1e9 ftp: add notes on how to avoid deadlocks with concurrency - fixes #6370 2022-09-05 12:11:06 +01:00
Nick Craig-Wood
f279e4ab01 Add Josh Soref to contributors 2022-09-05 12:10:59 +01:00
albertony
35349657cd docs/sftp: document use of chunk_size option in sftp remote paired with serve sftp
Related to 0008cb4934
2022-08-31 00:04:04 +02:00
Josh Soref
ce3b65e6dc all: fix spelling across the project
* abcdefghijklmnopqrstuvwxyz
* accounting
* additional
* allowed
* almost
* already
* appropriately
* arise
* bandwidth
* behave
* bidirectional
* brackets
* cached
* characters
* cloud
* committing
* concatenating
* configured
* constructs
* current
* cutoff
* deferred
* different
* directory
* disposition
* dropbox
* either way
* error
* excess
* experiments
* explicitly
* externally
* files
* github
* gzipped
* hierarchies
* huffman
* hyphen
* implicitly
* independent
* insensitive
* integrity
* libraries
* literally
* metadata
* mimics
* missing
* modification
* multipart
* multiple
* nightmare
* nonexistent
* number
* obscure
* ourselves
* overridden
* potatoes
* preexisting
* priority
* received
* remote
* replacement
* represents
* reproducibility
* response
* satisfies
* sensitive
* separately
* separator
* specifying
* string
* successful
* synchronization
* syncing
* šenfeld
* take
* temporarily
* testcontents
* that
* the
* themselves
* throttling
* timeout
* transaction
* transferred
* unnecessary
* using
* webbrowser
* which
* with
* workspace

Signed-off-by: Josh Soref <2119212+jsoref@users.noreply.github.com>
2022-08-30 11:16:26 +02:00
albertony
0008cb4934 docs: document that serve sftp uses chunk size 32 KiB
See #6404
2022-08-30 10:57:25 +02:00
albertony
2ea5b4f0b8 Add YFdyh000 to contributors 2022-08-30 10:26:03 +02:00
YFdyh000
b5818454f7 onedrive: cleanup brand name 2022-08-30 10:23:29 +02:00
albertony
555def2da7 build: add package comments to silence revive linter 2022-08-28 13:43:51 +02:00
albertony
02b7613104 docs/jottacloud: improve description of the standard authentication 2022-08-28 10:31:39 +02:00
albertony
b342c6cf9c docs/ftp: improve documentation of anonymous ftp - fixes #5650 2022-08-28 10:22:29 +02:00
albertony
8a6857c295 Add Simon Bos to contributors 2022-08-28 10:19:39 +02:00
albertony
21fd13f10d Add Ryan Morey to contributors 2022-08-28 10:18:33 +02:00
albertony
5cc7797f9e Add anonion to contributors 2022-08-28 10:18:14 +02:00
albertony
8bf2d6b6c8 Add João Henrique Franco to contributors 2022-08-28 10:16:37 +02:00
João Henrique Franco
85eb9776bd crypt: fix typo in comment
strign -> string
2022-08-22 10:43:54 +02:00
anonion
47539ec0e6 docs: fix minor typo in onedrive docs 2022-08-21 22:09:33 +02:00
Ryan Morey
58b327a9f6 docs: fix typo in filter pattern example 2022-08-18 21:14:26 +02:00
Simon Bos
1107da7247 dlna: specify SSDP interface names from command line 2022-08-13 12:06:03 +01:00
Nick Craig-Wood
8d1fff9a82 local: obey file filters in listing to fix errors on excluded files
Fixes #6376
2022-08-11 12:23:06 +01:00
Nick Craig-Wood
2c5923ab1a filter: make sure we check --files-from when looking for a single file 2022-08-11 12:20:17 +01:00
Nick Craig-Wood
1ad22b8881 gcs: add --gcs-endpoint flag and config parameter
See: https://forum.rclone.org/t/how-to-modify-google-cloud-storage-endpoint-uri/32342
2022-08-09 17:33:21 +01:00
Nick Craig-Wood
0501773db1 azureblob,b2,s3: fix chunksize calculations producing too many parts
Before this fix, the chunksize calculator was using the previous size
of the object, not the new size of the object to calculate the chunk
sizes.

This meant that uploading a replacement object which needed a new
chunk size would fail, using too many parts.

This fix fixes the calculator to take the size explicitly.
2022-08-09 12:57:38 +01:00
Nick Craig-Wood
cb8842941b Add Mark Trolley to contributors 2022-08-09 12:57:38 +01:00
Mark Trolley
5439a2c5c6 docs: fix script installation command on downloads page
Script installation instructions in `downloads.md` differ from those in
`install.md` and fail on MacOS.
2022-08-09 11:58:04 +01:00
Nick Craig-Wood
d347ac0154 local: disable xattr support if the filesystems indicates it is not supported
Before this change, if rclone was run with `-M` on a filesystem
without xattr support, it would error out.

This patch makes rclone detect the not supported errors and disable
xattrs from then on. It prints one ERROR level message about this.

See: https://forum.rclone.org/t/metadata-update-local-s3/32277/7
2022-08-09 09:27:56 +01:00
Nick Craig-Wood
9f33eb2e65 Changelog updates from Version 1.59.1 2022-08-08 19:01:11 +01:00
Nick Craig-Wood
fe801b8fef Add Joram Schrijver to contributors 2022-08-08 19:01:11 +01:00
albertony
6b158f33a3 serve sftp: document legacy code for checksum detection
See #6351
2022-08-06 20:46:38 +02:00
Joram Schrijver
5a6d233924 dlna: fix SOAP action header parsing - fixes #6354
Changes in github.com/anacrolix/dms changed upnp.ServiceURN to include a
namespace identifier. This identifier was previously hardcoded, but is
now parsed out of the URN. The old SOAP action header parsing logic was
duplicated in rclone and did not handle this field. Resulting responses
included a URN with an empty namespace identifier, breaking clients.
2022-08-06 17:23:37 +01:00
Nick Craig-Wood
df513ca90a build: update dependencies 2022-08-05 17:43:53 +01:00
Nick Craig-Wood
49bb640bae accounting: fix panic in core/stats-reset with unknown group - fixes #6327
This also adds tests for the rc commands for stats groups
2022-08-05 17:30:55 +01:00
Nick Craig-Wood
98fd00a655 serve sftp: fix checksum detection - Fixes #6351
Before this change, rclone serve sftp operating with a new rclone
after the md5sum/sha1sum detection was reworked to just run a plain
`md5sum`/`sha1sum` command in

3ea82032e7 sftp: support md5/sha1 with rsync.net #3254

Failed to signal to the remote that md5sum/sha1sum wasn't supported as
in

71e172a139 serve/sftp: support empty "md5sum" and "sha1sum" commands

We unconditionally return good hashes even if the remote being served
doesn't support the hash type in question.

This fix checks the hash type is supported and returns an error

    MD5 hash not supported

When the backend is first contacted this will cause the sftp backend
to detect that the hash type isn't available.

Unfortunately this may have cached the wrong state so editing or
remaking the config may be necessary to fix it.
2022-08-05 17:16:23 +01:00
Nick Craig-Wood
16039b350d fs: fix parsing of times and durations of the form "YYYY-MM-DD HH:MM:SS"
Before this fix, the parsing code gave an error like this

    parsing "2022-08-02 07:00:00" as fs.Time failed: expected newline

This was due to the Scan call failing to read all the data.

This patch fixes that, and redoes the tests
2022-08-05 16:45:23 +01:00
Nick Craig-Wood
ebe86c6cec s3: add --s3-decompress flag to download gzip-encoded files
Before this change, if an object compressed with "Content-Encoding:
gzip" was downloaded, a length and hash mismatch would occur since the
go runtime automatically decompressed the object on download.

If --s3-decompress is set, this change erases the length and hash on
compressed objects so they can be downloaded successfully, at the cost
of not being able to check the length or the hash of the downloaded
object.

If --s3-decompress is not set the compressed files will be downloaded
as-is providing compressed objects with intact size and hash
information.

See #2658
2022-08-05 16:45:23 +01:00
Nick Craig-Wood
1f5e7ce598 lib/readers: add GzipReader 2022-08-05 16:45:23 +01:00
Nick Craig-Wood
4b981100db s3: refactor to use generated code instead of reflection to copy structs 2022-08-05 16:45:23 +01:00
Nick Craig-Wood
4344a3e2ea s3: implement --s3-version-at flag - Fixes #1776 2022-08-05 16:45:23 +01:00
Nick Craig-Wood
1542a979f9 s3: refactor f.list() to take an options struct as it had too many parameters 2022-08-05 16:45:23 +01:00
Nick Craig-Wood
81d242473a s3: implement Purge to purge versions and backend cleanup-hidden 2022-08-05 16:45:23 +01:00
Nick Craig-Wood
0ae171416f s3: implement --s3-versions flag - See #1776 2022-08-05 16:45:23 +01:00
Nick Craig-Wood
a59fa2977d s3: factor different listing versions into separate objects 2022-08-05 16:42:30 +01:00
Nick Craig-Wood
7243918069 s3: implement backend versioning command to get/set bucket versioning 2022-08-05 16:42:30 +01:00
Nick Craig-Wood
fa49971d49 docs: move time/duration option docs to the main docs 2022-08-05 16:42:30 +01:00
Nick Craig-Wood
77e3512714 fstests: Make InternalTestFiles so the internal tests know the current state 2022-08-05 16:42:30 +01:00
Nick Craig-Wood
337b43e7e4 fstests: make ReadObject publically accessible 2022-08-05 16:42:30 +01:00
Nick Craig-Wood
6fd9e3d717 build: reformat comments to pass go1.19 vet
See: https://go.dev/doc/go1.19#go-doc
2022-08-05 16:35:41 +01:00
Nick Craig-Wood
876f791ecd Revert "build: lint with go1.18 until golangci-lint is updated"
This reverts commit 2170376d1b.
2022-08-05 16:04:54 +01:00
Nick Craig-Wood
918bd6d3c3 dropox: fix ChangeNotify was unable to decrypt errors
Before this fix, the dropbox backend wasn't decoding the file names
received in changenotify events into rclone standard format.

This meant that changenotify events for filenames which had encoded
characters were failing to be decrypted properly if wrapped in crypt.

See: https://forum.rclone.org/t/rclone-vfs-cache-says-file-name-too-long/31535
2022-08-04 10:26:25 +01:00
Nick Craig-Wood
f49be033c6 mega: Fix nil pointer exception when bad node received
Fixes: #6336
2022-08-04 10:22:57 +01:00
Nick Craig-Wood
2a817e21cb vfs: fix excess CPU used by VFS cache cleaner looping
Before this change the VFS cache cleaner would loop indefinitely while
the cache was above quota. This used up all the CPU.

This fix prevents the cache cleaner from looping. It will be kicked on
ENOSPACE and run in its scheduled time otherwise so this should be
sufficient.

See: https://forum.rclone.org/t/vfs-keeps-checking-same-files/32120
2022-08-04 10:19:47 +01:00
Nick Craig-Wood
a07d376fb1 vfs: reduce memory usage by re-ordering commonly used structures 2022-08-04 10:19:47 +01:00
Nick Craig-Wood
e749bc58f4 vfs: reduce memory use by embedding sync.Cond 2022-08-04 10:19:47 +01:00
Nick Craig-Wood
821e084f28 combine: fix errors with backends shutting down while in use
Before this patch backends could be shutdown when they fell out of the
cache when they were in use with combine. This was particularly
noticeable with the dropbox backend which gave this error when
uploading files after the backend was Shutdown.

    Failed to copy: upload failed: batcher is shutting down

This patch gets the combine remote to pin them until it is finished.

See: https://forum.rclone.org/t/rclone-combine-upload-failed-batcher-is-shutting-down/32168
2022-08-04 10:13:41 +01:00
Nick Craig-Wood
2170376d1b build: lint with go1.18 until golangci-lint is updated
See: https://github.com/golangci/golangci-lint/pull/3037
2022-08-03 18:04:58 +01:00
Nick Craig-Wood
8125b1cf08 build: update to v3 of golangci-lint action 2022-08-03 18:04:58 +01:00
Nick Craig-Wood
ba60984f33 build: update to go1.19 and make go1.17 the minimum required version 2022-08-03 18:04:58 +01:00
Nick Craig-Wood
a875320e37 sync,operations: optimise --copy-dest and --compare-dest
Before this change --compare-dest and --copy-dest would check to see
if the compare/copy object existed first, before seeing if the
destination object was present.

This is inefficient, because in most --copy-dest syncs the destination
will be present and the compare/copy object need never be tested.
--compare-dest syncs may also be speeded up if they are done to the
same directory repeatedly.

This fixes the problem by re-arranging the logic so if the transfer is
not needed then the compare/copy object is never tested.

See: https://forum.rclone.org/t/union-with-copy-dest-enabled-is-slower-than-expected/32172
2022-08-03 17:44:47 +01:00
Nick Craig-Wood
639624184d build: fix android build after GitHub actions change
Before this change the android build started failing with

    gomobile: ANDROID_NDK_HOME specifies /usr/local/lib/android/sdk/ndk/25.0.8775105
    which is unusable: unsupported API version 16 (not in 19..33)

This was caused by a change to github actions, but is ultimately due
to an issue in gomobile with the newest version of the SDK.

This change fixes the problem by declaring a minimum API version of 21
and using version 21 compilers to build everything and using the
default NDK in github actions.

See: https://github.com/actions/virtual-environments/issues/5930
See: https://github.com/lightningnetwork/lnd/issues/6651
2022-08-03 17:22:35 +01:00
Nick Craig-Wood
fe84cca1ad Revert "build: disable revive linter pending a fix in golangci-lint"
This reverts commit 7a24c173f6.
2022-08-03 13:14:51 +01:00
Nick Craig-Wood
9d3958bd0b build: fix formatting after golangci-lint update 2022-08-03 10:11:16 +01:00
Nick Craig-Wood
3a8e52de74 dropbox: fix infinite loop on uploading a corrupted file
Before this change, if rclone attempted to upload a file which read
more bytes than the size it declared then the uploader would enter an
infinite loop.

See: https://forum.rclone.org/t/transfer-percentages-100-again/32109
2022-07-29 17:40:05 +01:00
albertony
72227a0151 jottacloud: do not store username in config when using standard auth
Previously, with standard auth, the username would be stored in config - but only after
entering the non-standard device/mountpoint sequence during config (a feature introduced
with #5926). Regardless of that, rclone always requests the username from the api at
startup (NewFS).

In #6270 (commit 9dbed02329) this was changed to always
store username in config (consistency), and then also use it to avoid the repeated
customer info request in NewFs (performance). But, as reported in #6309, it did not work
with legacy auth, where user enters username manually, if user entered an email address
instead of the internal username required for api requests. This change was therefore
recently reverted.

The current commit takes another step back to not store the username in config during
the non-standard device/mountpoint config sequence (consistentcy). The username will
now only be stored in config when using legacy auth, where it is an input parameter.
2022-07-25 18:23:09 +01:00
Nick Craig-Wood
9f40cb114a Revert "jottacloud: always store username in config and use it to avoid initial api request"
This reverts commit 9dbed02329.

See: #6309
2022-07-25 18:23:09 +01:00
Lesmiscore
2f461f13e3 internetarchive: handle hash symbol in the middle of filename 2022-07-22 13:08:42 +01:00
Nick Craig-Wood
7a24c173f6 build: disable revive linter pending a fix in golangci-lint
The revive linter got extremely slow in golangci-lint 1.47.1 causing
the CI to time out.

Disable for the time being until it is fixed.

See: https://github.com/golangci/golangci-lint/issues/2997
2022-07-20 23:07:20 +01:00
Nick Craig-Wood
fb60aeddae Add Jordi Gonzalez Muñoz to contributors 2022-07-20 23:07:02 +01:00
Nick Craig-Wood
695736d1e4 Add Steve Kowalik to contributors 2022-07-20 23:07:02 +01:00
albertony
f0396070eb sftp: fix issue with WS_FTP by working around failing RealPath 2022-07-20 18:07:50 +01:00
Jordi Gonzalez Muñoz
f1166757ba librclone: add PHP bindings and test program 2022-07-20 17:20:12 +01:00
Steve Kowalik
9b76434ad5 drive: make --drive-stop-on-upload-limit obey quota exceeded error
Extend the shouldRetry function by also checking for the quotaExceeded
reason, and since this function appeared to be untested, add a test case
for the existing errors and this new one.

Fixes #615
2022-07-20 10:37:34 +01:00
Nick Craig-Wood
440d0cd179 s3: fix --s3-no-head panic: reflect: Elem of invalid type s3.PutObjectInput
In

22abd785eb s3: implement reading and writing of metadata #111

The reading information of objects was refactored to use the
s3.HeadObjectOutput structure.

Unfortunately the code branch with `--s3-no-head` was not tested
otherwise this panic would have been discovered.

This shows that this is path is not integration tested, so this adds a
new integration test.

Fixes #6322
2022-07-18 23:38:50 +01:00
Nick Craig-Wood
a047d30eca Add Yen Hu to contributors 2022-07-18 23:38:50 +01:00
Yen Hu
03d0f331f7 onedrive: rename Onedrive(cn) 21Vianet to Vnet Group
The old site had shown a redirect page to the new one since 2021-4-21.
https://www.21vianet.com
The official site had renamed to Vnet Group also.
https://www.vnet.com/en/about
2022-07-17 17:07:23 +01:00
Lesmiscore
049674aeab backend/internetarchive: ignore checksums for files using the different method 2022-07-17 14:02:40 +01:00
Nick Craig-Wood
50f053cada dropbox: fix hang on quit with --dropbox-batch-mode off
This problem was created by the fact that we are much more diligent
about calling Shutdown now, and the dropbox backend had a hang if the
batch mode was "off" in the Shutdown method.

See: https://forum.rclone.org/t/dropbox-lsjson-in-1-59-stuck-on-commiting-upload/31853
2022-07-17 12:51:44 +01:00
Nick Craig-Wood
140af43c26 build: add 32 bit test runner to avoid problems like #6311 2022-07-14 20:13:03 +01:00
Nick Craig-Wood
f467188876 Add Evan Spensley to contributors 2022-07-14 20:13:03 +01:00
Evan Spensley
4a4379b312 jobs: add ability to stop group
Adds new rc call to stop all running jobs in a group. Fixes #5561
2022-07-13 18:13:31 +01:00
Nick Naumann
8c02fe7b89 sync: update docs and error messages to reflect fixes to overlap checks 2022-07-13 16:04:53 +01:00
Nick Naumann
11be920e90 sync: add filter-sensitivity to --backup-dir option
The old Overlapping function and corresponding tests have been removed, as it has been completely replaced by the OverlappingFilterCheck function.
2022-07-13 16:04:53 +01:00
albertony
8c19b355a5 docs: fix links to mount command from install docs 2022-07-13 12:33:54 +02:00
r-ricci
67fd60275a union: fix panic due to misalignment of struct field in 32 bit architectures
`FS.cacheExpiry` is accessed through sync/atomic.
According to the documentation, "On ARM, 386, and 32-bit MIPS, it is
the caller's responsibility to arrange for 64-bit alignment of 64-bit
words accessed atomically. The first word in a variable or in an
allocated struct, array, or slice can be relied upon to be 64-bit
aligned."
Before commit 1d2fe0d856 this field was
aligned, but then a new field was added to the structure, causing the
test suite to panic on linux/386.
No other field is used with sync/atomic, so `cacheExpiry` can just be
placed at the beginning of the stuct to ensure it is always aligned.
2022-07-11 18:34:06 +01:00
Nick Craig-Wood
b310490fa5 union: fix multiple files being uploaded when roots don't exist
See: https://forum.rclone.org/t/union-backend-copying-to-all-remotes-while-it-shouldnt/31781
2022-07-11 18:19:36 +01:00
Nick Craig-Wood
0ee0812a2b union: fix duplicated files when using directories with leading /
See: https://forum.rclone.org/t/union-backend-copying-to-all-remotes-while-it-shouldnt/31781
2022-07-11 18:19:36 +01:00
Nick Craig-Wood
55bbff6346 operations: add --server-side-across-configs global flag for any backend 2022-07-11 18:17:42 +01:00
Nick Craig-Wood
9c6cfc1ff0 combine: throw error if duplicate directory name is specified
See: https://forum.rclone.org/t/v1-59-combine-qs/31814
2022-07-10 15:40:30 +01:00
Nick Craig-Wood
f753d7cd42 combine: fix docs showing remote= instead of upstreams=
See: https://forum.rclone.org/t/v1-59-combine-qs/31814
2022-07-10 15:34:48 +01:00
Nick Craig-Wood
f5be1d6b65 Start v1.60.0-DEV development 2022-07-09 20:43:17 +01:00
Nick Craig-Wood
00a684d877 Version v1.59.0 2022-07-09 18:09:25 +01:00
Nick Craig-Wood
1c4ee2feee gcs: add --gcs-decompress flag to download gzip-encoded files
By default these will be downloaded compressed.

This changes the default of the previous commit

2781f8e2f1 gcs: Fix download of "Content-Encoding: gzip" compressed objects

But will fit in better with the metadata framework when copying
gzip-encoded objects from backend to backend.
2022-07-09 17:31:12 +01:00
Nick Craig-Wood
876f12f2c4 Add Ovidiu Victor Tatar to contributors 2022-07-09 12:35:59 +01:00
Nick Craig-Wood
6e9c1eebd9 Add Claudio Maradonna to contributors 2022-07-09 12:35:59 +01:00
Lesmiscore (Naoya Ozaki)
42dfadfa1b internetarchive: add support for Metadata 2022-07-08 23:47:50 +01:00
Ovidiu Victor Tatar
b4d847cadd new backend: hidrive - fixes #1069 2022-07-08 18:24:54 +01:00
Ovidiu Victor Tatar
502226bfc8 pacer: add ZeroDelayCalculator 2022-07-08 18:24:54 +01:00
Ovidiu Victor Tatar
53400d7edc oauthlib: add method to set a token as expired
This can be used by backends to trigger a refresh of an access token if
they detect an invalid token.
2022-07-08 18:24:54 +01:00
Claudio Maradonna
62bcc84f6f vfs: add --vfs-disk-space-total-size option to manually set the total disk space
Now you can specify --vfs-disk-space-total-size to set the total disk
space (default to -1)

fixes #3270
2022-07-08 17:26:54 +01:00
Nick Craig-Wood
2e54b56a01 rcat: check checksums by default like copy does #6305
Before this change we were calculating the checksum for an rcat
transfer but never checking it.

See: https://forum.rclone.org/t/optimize-rclone-on-raspberry-pi-4-8gb/31741
2022-07-07 16:09:09 +01:00
Nick Craig-Wood
2515039e18 Add Lorenzo Maiorfi to contributors 2022-07-07 16:09:09 +01:00
Nick Craig-Wood
a9c531b9eb Add Paul Norman to contributors 2022-07-07 16:09:09 +01:00
Nick Craig-Wood
0db50ecb2f Add zzr93 to contributors 2022-07-07 16:09:09 +01:00
Nick Craig-Wood
388da82762 Add Anthrazz to contributors 2022-07-07 16:09:09 +01:00
Lorenzo Maiorfi
b5efffee9d azureblob: allow remote emulator (azurite) - fixes #6290 2022-07-06 11:54:04 +01:00
Paul Norman
3ec07d5db9 docs: fix typo in license webpage 2022-07-06 11:25:31 +01:00
albertony
5c6a958ad8 go mod tidy: github.com/pkg/xattr should be direct 2022-07-04 11:24:59 +02:00
albertony
ad8c94e982 staticcheck: redundant return statement 2022-07-04 11:24:59 +02:00
albertony
e5bf6a813c staticcheck: google api New is deprecated: please use NewService instead 2022-07-04 11:24:59 +02:00
albertony
f18095b004 staticcheck: ignore deprecations that are not relevant 2022-07-04 11:24:59 +02:00
albertony
c70e890966 staticcheck: TLS config NameToCertificate is deprecated, should instead let library select the first compatible chain from Certificates 2022-07-04 11:24:59 +02:00
albertony
986bb17656 staticcheck: awserr.BatchError is deprecated: Replaced with BatchedErrors 2022-07-04 11:24:59 +02:00
albertony
92a43c5f7b staticcheck: should use a simple channel send/receive instead of select with a single case 2022-07-04 11:24:59 +02:00
albertony
9612ca6110 staticcheck: ignore unused if platform dependent 2022-07-04 11:24:59 +02:00
albertony
1f9560e873 selfupdate: replace deprecated x/crypto/openpgp package with ProtonMail/go-crypto 2022-07-04 11:24:59 +02:00
albertony
c9d67c86fb staticcheck: ignore suggestion to use context.TODO instead of nil when testing nil Context 2022-07-04 11:24:59 +02:00
albertony
3e9c5eca3b yandex: handle api error on server-side move 2022-07-04 11:24:59 +02:00
albertony
a1fd60ec2b staticcheck: empty branch 2022-07-04 11:24:59 +02:00
albertony
7b8c974dec staticcheck: ineffective break statement 2022-07-04 11:24:59 +02:00
albertony
5b579cea47 staticcheck: use golang.org/x/text/cases instead of deprecated strings.Title
strings.Title has been deprecated since Go 1.18 and an alternative has been
available since Go 1.0. The rule Title uses for word boundaries does not handle
Unicode punctuation properly. Use golang.org/x/text/cases instead.
2022-07-04 11:24:59 +02:00
albertony
7822df565e staticcheck: unused func 2022-07-04 11:24:59 +02:00
albertony
3435bf7f34 staticcheck: no value of type int64 is greater than math.MaxInt64 2022-07-04 11:24:59 +02:00
albertony
0772cae314 staticcheck: use result of type assertion to simplify cases 2022-07-04 11:24:59 +02:00
zzr93
060c8dfff0 operations: use correct src/dst in some log messages
Most of the time this will make no difference to user logs, however
the difference may be visible in JSON logs and on the rare occasions
src and dst are pointing to different file names.
2022-07-04 10:18:04 +01:00
Anthrazz
424a1f39eb sftp: add Hetzner Storage Boxes to supported sftp backends 2022-07-04 10:08:33 +01:00
Nick Craig-Wood
06182a3443 s3: actually compress the payload for content-type gzip test 2022-07-04 09:42:49 +01:00
Nick Craig-Wood
a58b482061 fstests: fix Metadata tests on remotes with additional config 2022-07-04 09:42:49 +01:00
Nick Craig-Wood
accf91742c fs: add Type and FindFromFs to manage Fs and RegInfo 2022-07-04 09:42:49 +01:00
albertony
9dbed02329 jottacloud: always store username in config and use it to avoid initial api request
Existing version did save username in config, but only when entering the custom
device/mountpoint sequence in config. Regardless of that, it did always look up the
username at startup with an api request.

This commit improves it so that the username will always be stored in config,
and when using standard authentication it picks it from the login token instead of
requesting it from the remote api, and also in fs constructor it picks it from config
instead of requesting it from remote api (again).
2022-07-03 12:56:25 +02:00
albertony
73e3bb09d7 http: fix missing response when using custom auth handler 2022-07-02 15:29:50 +02:00
Nick Craig-Wood
7e7a8a95e9 hasher: support metadata 2022-06-29 17:30:37 +01:00
Nick Craig-Wood
ed87ae51c0 union: support metadata 2022-06-29 17:30:37 +01:00
Nick Craig-Wood
bf4a16ae30 crypt: support metadata 2022-06-29 17:30:37 +01:00
Nick Craig-Wood
c198700812 compress: support metadata 2022-06-29 17:30:37 +01:00
Nick Craig-Wood
8c483daf85 combine: support metadata 2022-06-29 17:30:37 +01:00
Nick Craig-Wood
ba5760ff38 chunker: mark as not supporting metadata 2022-06-29 17:30:37 +01:00
Nick Craig-Wood
cd1735bb10 cache: mark as not supporting metadata 2022-06-29 17:30:37 +01:00
Nick Craig-Wood
866c873daa backend: allow wrapping backend tests to run in make quicktest 2022-06-29 17:30:37 +01:00
Nick Craig-Wood
c556e98f49 local: add Metadata support #111 2022-06-29 14:29:36 +01:00
Nick Craig-Wood
22abd785eb s3: implement reading and writing of metadata #111 2022-06-29 14:29:36 +01:00
Nick Craig-Wood
a692bd2cd4 s3: change metadata storage to normal map with lowercase keys 2022-06-29 14:29:36 +01:00
Nick Craig-Wood
776a083892 lsf: add metadata support with M flag 2022-06-29 14:29:36 +01:00
Nick Craig-Wood
d823a38ce5 lsjson: add --metadata/-M flag
Note that this removes the `-M` flag from `--encrypted` as it
conflicted with the global flag and adds it to `--metadata`.
2022-06-29 14:29:36 +01:00
Nick Craig-Wood
78d52882ca fs: add --metadata/-M flag to control whether metadata is copied 2022-06-29 14:29:36 +01:00
Nick Craig-Wood
c4451bc43a fs: add --metadata-set flag to specify metadata for uploads 2022-06-29 14:29:36 +01:00
Nick Craig-Wood
0652ec95db fs: implement MetadataInfo to show info about metadata in help and rc
Info about this will appear in operations/fsinfo and in the backend
help (`rclone help backend s3`).
2022-06-29 14:29:36 +01:00
Nick Craig-Wood
6a0e021dac fs: implement optional Metadata interface for Objects #111
This implements integration tests for the feature also.
2022-06-29 11:21:29 +01:00
Nick Craig-Wood
461d041c4d fstest: remove spurious contents return from PutTestContents and friends 2022-06-29 11:18:02 +01:00
Nick Craig-Wood
35f24d5b84 Add vyloy to contributors 2022-06-29 10:55:03 +01:00
Nick Craig-Wood
370c8fa220 Add mirekphd to contributors 2022-06-29 10:55:03 +01:00
Nick Craig-Wood
0fca4d2c86 Add buda to contributors 2022-06-29 10:55:03 +01:00
Martin Czygan
5de9278650 fs/cache: make sure we call the Shutdown method on backends
This change ensures we call the Shutdown method on backends when
they drop out of the fs/cache and at program exit.

Some backends implement the optional fs.Shutdowner interface. Until now,
Shutdown is only checked and called, when a backend is wrapped (e.g.
crypt, compress, ...).

To have a general way to perform operations at the end of the backend
lifecycle with proper error handling, we can call Shutdown at cache
clear time.

We add a finalize hook to the cache which will be called when values
drop out of the cache.

Previous discussion: https://forum.rclone.org/t/31336
2022-06-28 12:51:59 +01:00
vyloy
326c43ab3f s3: add IDrive e2 to provider list 2022-06-28 09:12:36 +01:00
mirekphd
32006033e6 docs: note wider impact of --checkers=N on parallelism #6280 2022-06-27 19:33:07 +01:00
buda
517e7d9271 accounting: fix unknown length file transfers count 3 transfers each #6213
This was caused by nested calls to NewTransfer/Done.

This fixes the problem by only incrementing transfers if the remote is
present in the transferMap which means we only increment it once.
2022-06-27 17:56:03 +01:00
albertony
fdd2f8e6d2 Error strings should not be capitalized
Reported by staticcheck 2022.1.2 (v0.3.2)

See: staticcheck.io
2022-06-23 23:26:02 +02:00
Abhiraj
027746ef6e drive: moved rclone_folder_id to advanced section - fixes #3463 2022-06-23 22:08:09 +02:00
albertony
53f831f40a docs: add missing code section formatting to commands and flags 2022-06-21 23:43:00 +02:00
albertony
9f81b4df4f docs/lsjson: fix code block indentation 2022-06-21 23:43:00 +02:00
albertony
bf54c909c9 docs: improved capitalization 2022-06-21 23:43:00 +02:00
albertony
dbf1234edf docs: skip "Connection" suffix from FTP, SSH/SFTP and HTTP backend names 2022-06-21 23:43:00 +02:00
albertony
70d9d75801 docs: improve serve command descriptions 2022-06-21 23:43:00 +02:00
albertony
bc70a95fca docs: consistent capitalization of WebDAV, DLNA, HTTP 2022-06-21 23:43:00 +02:00
albertony
ee87e919c5 docs/ncdu: note that refresh screen shortcut will fix screen corruption 2022-06-21 23:43:00 +02:00
albertony
4f0eae366f docs/ncdu: fix inconsistency in key map help text
Ctrl+L was listed as ^L, while Ctrl+c was listed as c-C. Changed the latter to ^c.
2022-06-21 23:43:00 +02:00
albertony
de5ccaab8e docs: cross link doc pages for related commands 2022-06-21 23:43:00 +02:00
albertony
4b7dc35cf4 Fix sync docs incorrect merge
This copies the changes from an autogenerated section in the following commit:
f2a15a174f
2022-06-21 23:43:00 +02:00
Nick Craig-Wood
bc705e14d8 vfscache: fix fatal error: sync: unlock of unlocked mutex error
This message is a double panic and was actually caused by an assertion
panic in:

vfs/vfscache/downloaders/downloaders.go

This is triggered by the code added relatively recently to fix a bug
with renaming files:

ec72432cec vfs: fix failed to _ensure cache internal error: downloaders is nil error

So it appears that item.o may be nil at this point.

This patch detects item.o being nil and fetches it again with NewObject.

Fixes #6190 Fixes #6235
2022-06-21 14:28:53 +01:00
Nick Craig-Wood
ea5bb79366 drive: document export for google apps scripts better
This also adds some more mime types from the code.

See: https://forum.rclone.org/t/how-do-i-copy-google-apps-scripts-with-rclone/31373
2022-06-21 14:28:53 +01:00
Nick Craig-Wood
e95dff2fa1 drive: add backend commands exportformats and importformats for debugging 2022-06-21 14:28:53 +01:00
Nick Craig-Wood
99dfe1eeae Add Martin Czygan to contributors 2022-06-21 14:28:53 +01:00
Nick Craig-Wood
ed92bf335d Add J-P Treen to contributors 2022-06-21 14:28:53 +01:00
Nick Craig-Wood
3d55b537c6 Add Caleb to contributors 2022-06-21 14:28:53 +01:00
Nick Craig-Wood
d03fffdf8d Add eNV25 to contributors 2022-06-21 14:28:53 +01:00
Martin Czygan
7a909ebfb0 fs/cache: fix cache unpin 2022-06-20 12:14:58 +01:00
buengese
ac0dc9922e copyurl: add tests for the option to honor the HTTP header filename directive 2022-06-20 12:06:09 +01:00
J-P Treen
8b8802a078 copyurl: Add option to honor the HTTP header filename directive.
Implemented --header-filename for use with copyurl.

For specifically setting preferred download filenames for HTTP requests, RFC 6226
specifies a 'filename' directive, available within 'Content-Disposition'
header. We can handle with 'mime.ParseMediaType'.

See below for details:
https://httpwg.org/specs/rfc6266.html#disposition.parameter.filename
https://httpwg.org/specs/rfc6266.html#advice.generating

Co-authored-by: buengese <buengese@protonmail.com>
2022-06-20 12:06:09 +01:00
Caleb
f2a15a174f docs: grammatical clarification in sync docs 2022-06-19 15:26:53 +02:00
buengese
21c746a56c fichier: parse api error codes and them accordingly 2022-06-19 15:07:33 +02:00
eNV25
36add0afbf ncdu: replace termbox with tcell's termbox wrapper
The https://github.com/nsf/termbox-go library is no longer maintained
so this change replaces it with the maintained
github.com/gdamore/tcell library which has a termbox backwards
compatibility layer.

There are a few minor changes from the termbox library:

- Using Clear with fg bg ColorDefault resulted in a white background for some reason.
    - Clear with fg ColorWhite bg ColorBlack was used instead.
- tcell's termbox wrapper doesn't support ColorLightYellow.
    - ColorYellow + 8 was used instead.
2022-06-19 11:22:45 +01:00
Nick Craig-Wood
14e0396fcb test_all: allow internet archive backend more time 2022-06-18 15:13:18 +01:00
Nick Craig-Wood
100acc570a test_all: fix -clean so it works on remotes with paths 2022-06-18 15:10:09 +01:00
Nick Craig-Wood
b9de37af80 test_all: Only run backend tests for Internet Archive as it is too slow 2022-06-17 16:52:30 +01:00
Nick Craig-Wood
f7c36ce0f9 s3: unwrap SDK errors to reveal underlying errors on upload
The SDK doesn't wrap errors in a Go standard way so they can't be
unwrapped and tested for - eg fatal error.

The code looks for a Serialization or RequestError and returns the
unwrapped underlying error if possible.

This fixes the fs/operations integration tests checking for fatal
errors being returned.
2022-06-17 16:52:30 +01:00
Nick Craig-Wood
f829ded456 Add Phil Shackleton to contributors 2022-06-17 16:52:30 +01:00
Nick Craig-Wood
2fac8fdde6 Add Scott Grimes to contributors 2022-06-17 16:52:30 +01:00
Phil Shackleton
8e2d9a4cb9 drive: update Internal OAuth consent screen docs
Updated instructions in `Making your own client_id` section to record process for selecting "Internal" OAuth consent screen.
2022-06-17 16:50:01 +01:00
Scott Grimes
295006f662 opendrive: resolve lag and truncate bugs fixes #5936
Co-authored-by: buengese <buengese@protonmail.com>
2022-06-17 16:48:03 +01:00
Lu Wang
dcc128c70d docs/onedrive: document creation of client ID for OneDrive Business 2022-06-17 07:33:56 +02:00
Nick Craig-Wood
c85fbebce6 s3: simplify PutObject code to use the Request.SetStreamingBody method
In this commit

e5974ac4b0 s3: use PutObject from the aws SDK to upload single part objects

rclone was made to upload objects to s3 using PUT requests rather than
using signed uploads.

However this change missed the fact that there is a supported way to
do this in the SDK using the SetStreamingBody method on the Request.

This therefore reverts a lot of the previous commit to do with making
an unsigned connection and other complication and uses the SDK
facility.
2022-06-16 23:26:19 +01:00
Nick Craig-Wood
e59801c69b Add Maciej Radzikowski to contributors 2022-06-16 23:26:19 +01:00
Nick Craig-Wood
5697dbc80f local: fix parsing of --local-nounc flag 2022-06-16 22:13:50 +01:00
Nick Craig-Wood
7d3648dc46 serve ftp: check --passive-port arguments are correct
See: https://forum.rclone.org/t/serve-ftp-passive-port-validity-check/27458
2022-06-16 22:13:50 +01:00
Nick Craig-Wood
a6ca4b3817 test info: check file name lengths using 1,2,3,4 byte unicode characters 2022-06-16 22:13:50 +01:00
Nick Craig-Wood
e57fe14b61 mount: log IO errors at ERROR level - fixes #6217 2022-06-16 22:13:50 +01:00
Nick Craig-Wood
115f1c2cc9 operations: speed up hash checking by aborting the other hash if first returns nothing
This speeds up hash checks when a Hash() function returns "" - this
means that the hash can be canceled for the other side.

In the common case of local hash vs remote hash empty this saves a lot
of time.

See: https://forum.rclone.org/t/rclone-s3-backend-copy-is-2x-slower-than-aws-s3-cp/27321/9
2022-06-16 22:13:50 +01:00
Nick Craig-Wood
5e4caa69ce local: make Hash function cancelable via context 2022-06-16 22:13:50 +01:00
Nick Craig-Wood
e7483b40b3 fshttp: add --disable-http-keep-alives to disable HTTP Keep Alives
See: https://forum.rclone.org/t/getting-rate-limited-before-advertised-limit-on-s3-compatible-object-storage/31010/
2022-06-16 22:13:50 +01:00
Nick Craig-Wood
fa48b880c2 s3: retry RequestTimeout errors
See: https://forum.rclone.org/t/s3-failed-upload-large-files-bad-request-400/27695
2022-06-16 22:13:50 +01:00
Nick Craig-Wood
4ac875a811 sync: fix --max-duration and --cutoff-mode soft
Before this change using --max-duration and --cutoff-mode soft would
work like --cutoff-mode hard.

This bug was introduced in this commit which made transfers be
cancelable - before that transfers couldn't be canceled.

122a47fba6 accounting: Allow transfers to be canceled with context #3257

This change adds the timeout to the input context for reading files
rather than the transfer context so the files transfers themselves
aren't canceled if --cutoff-mode soft is in action.

This also adds a test for cutoff mode soft and max duration which was
missing.

See: https://forum.rclone.org/t/max-duration-and-retries-not-working-correctly/27738
2022-06-16 22:13:50 +01:00
Nick Craig-Wood
3f61869179 cmount: add tracing for *xattr FUSE callbacks
See: https://github.com/winfsp/cgofuse/issues/66
See: https://forum.rclone.org/t/cannot-copy-files-to-mounted-azure-storage-windows/30092
2022-06-16 22:13:50 +01:00
Nick Craig-Wood
60d87185e1 sftp: add --sftp-set-env option to set environment variables
Fixes #6094
2022-06-16 22:13:50 +01:00
Nick Craig-Wood
78120d40d9 sftp: add --sftp-concurrency to improve high latency transfers
See: https://forum.rclone.org/t/increasing-sftp-transfer-speed/29928
2022-06-16 22:13:50 +01:00
Nick Craig-Wood
95e0934755 sftp: add --sftp-chunk-size to control packets sizes for high latency links
See: https://forum.rclone.org/t/increasing-sftp-transfer-speed/29928
2022-06-16 22:13:50 +01:00
Nick Craig-Wood
1651429041 union: add min_free_space option for lfs/eplfs policies - fixes #6071 2022-06-16 22:13:50 +01:00
Nick Craig-Wood
29e37749b3 union: fix eplus policy to select correct entry for existing files #6071 2022-06-16 22:13:50 +01:00
Nick Craig-Wood
1e1af46a12 union: fix get free space for remotes which don't support it #6071
Before this fix GetFreeSpace returned math.MaxInt64 for remotes which
don't support reading free space, however this is used in various
comparison routines as a too large value, meaning that remotes of size
math.MaxInt64 were never being selected.

This fixes GetFreeSpace to return math.MaxInt64 - 1 so then can be selected.

It also fixes GetUsedSpace the same way however as the default for not
supported was 0 this was very unlikely to have ever caused a problem.
2022-06-16 22:13:50 +01:00
Nick Craig-Wood
1d2fe0d856 union: enable passing of options to upstreams and policies #6071
This factors out the options into a sub package so they can be passed
to upstreams and used in policies.
2022-06-16 22:13:50 +01:00
Nick Craig-Wood
4f94b27800 check: implement --no-traverse and --no-unicode-normalization
See: https://forum.rclone.org/t/rclone-check-head-or-list-object-from-source/30400
2022-06-16 22:13:50 +01:00
Nick Craig-Wood
4d72abf389 dropbox: fix nil pointer exception on dropbox impersonate user not found
Fixes #6139
2022-06-16 22:13:50 +01:00
Nick Craig-Wood
411013dbdc drive: add --drive-resource-key for accessing link-shared files 2022-06-16 22:13:50 +01:00
Nick Craig-Wood
e87e331f4c drive: make --drive-shared-with-me work with shared drives
Fixes #6247
2022-06-16 22:13:50 +01:00
Maciej Radzikowski
2e91287b2e docs/s3: add note about chunk size decreasing progress accuracy 2022-06-16 22:29:36 +02:00
Nick Craig-Wood
a0cb3bbd02 mount: allow tests to run on CI 2022-06-16 16:48:09 +01:00
Nick Craig-Wood
4a382c09ec mount: run tests in a subprocess to fix deadlock - fixes #3259
Before this change we ran the tests and the mount in the same process.
This could cause deadlocks and often did, and made the mount tests
very unreliable.

This fixes the problem by running the mount in a seperate process and
commanding it via a pipe over stdin/stdout.
2022-06-16 16:48:09 +01:00
Nick Craig-Wood
626a416ff8 vfs: factor out the VFS option initialization for re-use #3259 2022-06-16 16:48:09 +01:00
Nick Craig-Wood
6c832a72ee Add CrossR to contributors 2022-06-16 16:48:09 +01:00
Nick Craig-Wood
c390098262 Add Sven Gerber to contributors 2022-06-16 16:48:09 +01:00
Nick Craig-Wood
41f3ceb67d Add Art M. Gallagher to contributors 2022-06-16 16:48:09 +01:00
Nick Craig-Wood
592358148d Add m00594701 to contributors 2022-06-16 16:48:09 +01:00
buengese
93a25498cf docs/pcloud: document the cleanup issues 2022-06-16 15:45:42 +02:00
buengese
32f913ffbd pcloud: fix cleanup - fixes #3853 2022-06-16 15:45:42 +02:00
buengese
621c4ebe15 bin/make_backend_docs: allow generation of docs for just one backend 2022-06-16 15:45:32 +02:00
CrossR
0279bf3abb ncdu: implement multi selection
Co-authored-by: buengese <buengese@protonmail.com>
2022-06-14 13:57:35 +02:00
Sven Gerber
50c2e37aac onedrive: add access scopes option
By default, rclone always requests read and write permissions. No matter what settings you configure in the AAD application. This option allows to explicitly request readonly permissions

Migrated read only option to access scope option and set disable_site_permission option to hidden.
2022-06-14 10:21:23 +01:00
Art M. Gallagher
6602e1a851 mega: document using MEGAcmd to help with login failures
Added extra subsection under Failure to log-in 

See: https://forum.rclone.org/t/30935
2022-06-14 09:55:40 +01:00
m00594701
02b4638a22 backend: add Huawei OBS to s3 provider list 2022-06-14 09:21:01 +01:00
albertony
ec117593f1 Fix lint issues reported by staticcheck
Used staticcheck 2022.1.2 (v0.3.2)

See: staticcheck.io
2022-06-13 21:13:50 +02:00
buengese
74bd7f3381 pcloud: fix about with no free space left 2022-06-13 20:40:15 +02:00
albertony
afa30abd33 mount: remove legacy OS X remnants 2022-06-13 18:06:38 +01:00
albertony
70d1d8d760 mount: replace deprecated fuse.ENOSYS with syscall.ENOSYS 2022-06-13 17:59:44 +01:00
albertony
5006ede266 mailru: use variable type int instead of time.Duration for keeping number of seconds
Fixes problem reported by staticcheck lint tool (v0.3.2):
Poorly chosen name for variable of type time.Duration (ST1011).
2022-06-13 17:42:24 +01:00
Roberto Ricci
0f41e91d41 cmd/ncdu: display correct path in delete confirmation dialog
If the remote on the command line is "remote:subdir", when
deleting "filename", the confirmation message shows the path
"remote:subdirfilename".
Using fspath.JoinRootPath() fixes this. Also use this function
and fs.ConfigString() in other parts of the file, since they
are more appropriate.
2022-06-13 17:40:59 +01:00
buengese
3a20929db4 uptobox: fix root path handling - fixes #5903 2022-06-12 22:36:46 +02:00
buengese
cee79f27ee zoho: add Japan and China regions 2022-06-12 15:37:30 +02:00
buengese
aeb5dc2892 docs/zoho: add a section explaining client_id setup 2022-06-12 15:32:31 +02:00
Nick Craig-Wood
cfe0911e0d sync: fix tests for overlapping with filter
In commit

3ccf222acb sync: overlap check is now filter-sensitive

The tests were attempting to write invalid objects on some backends
due to a leading / on the object name.

This fix also adds a few more test cases and makes sure the tests can
be run individually.
2022-06-09 14:26:43 +01:00
Nick Craig-Wood
7c1f2d7c84 dirtree: fix tests with -fast-list
In commit

da404dc0f2 sync,copy: Fix --fast-list --create-empty-src-dirs and --exclude

The fix caused DirTree.AddDir to be called with the root directory.
This in turn caused a spurious directory entry in the DirTree which
caused tests with the -fast-list flag to fail with directory not found
errors.
2022-06-09 14:26:43 +01:00
albertony
5db9a2f831 sftp: use vendor-specific VFS statistics extension for about if available
See #5763
2022-06-08 21:14:53 +02:00
albertony
b4091f282a sftp: add support for about and hashsum on windows server
Windows shells like cmd and powershell needs to use different quoting/escaping
of strings and paths than the unix shell, and also absolute paths must be fixed
by removing leading slash that the POSIX formatted paths have
(e.g. /C:/Users does not work in shell, it must be converted to C:/Users).

Tries to autodetect shell type (cmd, powershell, unix) on first use.

Implemented default builtin powershell functions for hashsum and about when remote
shell is powershell.

See #5763

Fixes #5758
2022-06-08 21:14:53 +02:00
albertony
218bf2183d docs: add missing backends from listing of optional features (Akamai, Koofr and Sia) 2022-06-08 20:47:58 +02:00
Nick Craig-Wood
bb6edb3c39 build: update dependencies
Also:

- azureblob: fix compile after API change in upstream library
2022-06-08 18:29:42 +01:00
Nick Craig-Wood
6c2331ffd7 mount: skip tests on CI even if >= 2 processors 2022-06-08 18:29:42 +01:00
Nick Craig-Wood
08a897424b Add Noah Hsu to contributors 2022-06-08 18:29:42 +01:00
albertony
acd7ad9190 config: ajust section headers to improve readability
See #6211
2022-06-08 19:24:38 +02:00
albertony
b246584a02 config: more readable listing of remote options
Differentiate output of 'config show remote' command from listing options as part
of interactive config process for consistency: 'config show remote' consistent with
'config show', while listing in interactive config consistent with other output.

See #6211
2022-06-08 19:24:38 +02:00
albertony
61a75bfe07 config: add empty line between sections to improve readability
See #6211
2022-06-08 19:24:38 +02:00
Noah Hsu
ef089dd867 webdav: add SharePoint in other specific regions support 2022-06-08 17:24:35 +01:00
albertony
e3d44612c1 jottacloud: error strings should not be capitalized 2022-06-08 17:56:37 +02:00
albertony
b2388f1294 jottacloud: refactor endpoint paths 2022-06-08 17:56:37 +02:00
albertony
a571c1fb46 jottacloud: refactor naming of different api urls 2022-06-08 17:56:37 +02:00
albertony
01340acad2 jottacloud: add support for upload to custom device and mountpoint
See #5926
2022-06-08 17:56:37 +02:00
albertony
700ca23a71 config: add utility function for backend config with list and custom input 2022-06-08 17:56:37 +02:00
albertony
f4f0e444bf filter: allow multiple --exclude-if-present flags - fixes #6219 2022-06-08 17:11:52 +02:00
albertony
20aaeba547 docs: clarify backend support for setting modtime only (#5638) 2022-06-08 16:29:35 +02:00
Nick Craig-Wood
4b358ff43b combine: backend to combine multiple remotes in one directory tree
Fixes #5600
2022-06-08 14:57:25 +01:00
Nick Craig-Wood
e58d75e4d7 drive: make backend config -o config add a combined AllDrives remote
This adjusts

    rclone backend drives -o config drive:

So that it also emits a config section called `AllDrives` which uses
the combine backend to make a backend which combines all the shared
drives into one.

It also makes sure that all the shared drive names are valid rclone
config names, deduplicating if necessary.

Fixes #4506
2022-06-08 14:57:25 +01:00
Nick Craig-Wood
fb58737142 fstests: check for wrapped errors in ListR test 2022-06-08 14:57:25 +01:00
Nick Craig-Wood
26db80c270 ftp: revert to upstream github.com/jlaffaye/ftp from our fork
...now all of our patches have been merged #5810
2022-06-08 11:58:32 +01:00
Nick Craig-Wood
9eb3470c9c Add Matthew Vernon to contributors 2022-06-08 11:58:32 +01:00
Nick Craig-Wood
a449dd7d1c Add Jason Zheng to contributors 2022-06-08 11:58:32 +01:00
Nick Craig-Wood
fc4fe33703 Add Nick to contributors 2022-06-08 11:58:32 +01:00
Matthew Vernon
e11bfacfcf docs: note use of regexp filtering prevents directory optimisation
Discussed in the forum:
https://forum.rclone.org/t/rclone-regex-in-filter-causes-spurious-directory-catch-all-filter/30985

Signed-off-by: Matthew Vernon <mvernon@wikimedia.org>
2022-06-07 17:59:34 +01:00
Jason Zheng
a9c49c50a0 ftp: add support for disable_utf8 option - fixes #6209 2022-06-01 19:09:37 +01:00
albertony
8979337313 Add Eric Wolf to contributors 2022-06-01 20:02:25 +02:00
Eric Wolf
7ffab5d998 docs: clarify use of verbosity environment variable (#6208)
RCLONE_VERBOSE does not use a true/false setting, instead using a 0,1,2 setting.
2022-06-01 20:00:43 +02:00
Nick
3ccf222acb sync: overlap check is now filter-sensitive
Previously, the overlap check was based on simple prefix checks of the source and destination paths. Now it actually checks whether the destination is excluded via any filter rule or a "--exclude-if-present"-file.
2022-06-01 18:24:54 +01:00
Nick Craig-Wood
2781f8e2f1 gcs: Fix download of "Content-Encoding: gzip" compressed objects
Before this change, if an object compressed with "Content-Encoding:
gzip" was downloaded, a length and hash mismatch would occur since the
as the go runtime automatically decompressed the object on download.

This change erases the length and hash on compressed objects so they
can be downloaded successfully, at the cost of not being able to check
the length or the hash of the downloaded object.

This also adds the --gcs-download-compressed flag to allow the
compressed files to be downloaded as-is providing compressed objects
with intact size and hash information.

Fixes #2658
2022-05-31 12:10:21 +01:00
Nick Craig-Wood
3d55f69338 dropbox: add logs to show when poll interval limits are exceeded 2022-05-31 12:09:50 +01:00
Nick Craig-Wood
cc9bc2cb80 Add Andrey to contributors 2022-05-31 12:09:50 +01:00
Nick Craig-Wood
80ac59ee5b Add Rob Pickerill to contributors 2022-05-31 12:09:50 +01:00
m8rge
5d6a6dd6c0 dropbox: migrate from deprecated api
Change UploadSessionFinishBatch usage to UploadSessionFinishBatchV2. Change in sdk was made in https://github.com/dropbox/dropbox-sdk-go-unofficial/pull/106
2022-05-30 17:24:18 +01:00
Andrey
c676e2139d bisync: docs: add Yandex Disk as tested bisync backend 2022-05-30 17:10:25 +01:00
albertony
7361c98b2d Add Mr-Kanister to contributors 2022-05-27 10:34:58 +02:00
Mr-Kanister
5cc47de912 docs/bisync: fixed typo (#6196)
Fixed a little typo in /docs/content/bisync.md
2022-05-27 10:32:25 +02:00
Rob Pickerill
6d342a3c5b azureblob: case insensitive access tier 2022-05-24 09:19:08 +01:00
Nick Craig-Wood
336051870e build: add linux/arm/v6 to docker images - fixes #6192 2022-05-23 18:04:55 +01:00
Nick Craig-Wood
38c6d022bd Add Hugo Laloge to contributors 2022-05-23 18:04:55 +01:00
Hugo Laloge
c138367df6 onedrive: Implement --poll-interval for onedrive
Implement ChangeNotifier for onedrive.
Use drive delta queries to listen for modifications.
2022-05-23 11:30:43 +01:00
Nick Craig-Wood
da404dc0f2 sync,copy: Fix --fast-list --create-empty-src-dirs and --exclude
Before this change, if --fast-list was in use while doing a sync or
copy with --create-empty-src-dirs and --exclude excluded all the files
from the directory (but not the directory), then the directory would
not be created.

This is also visible with `rclone tree` which uses the same tree
building approach as `rclone sync --fast-list` where the directories
would go missing from the tree view.

This was caused by not adding the parents of excluded files to the
directory tree.

See: https://forum.rclone.org/t/create-empty-src-dirs-issue-with-b2/30856
2022-05-23 10:10:55 +01:00
Nick Craig-Wood
28e43fe7af Add SimonLiu to contributors 2022-05-23 10:10:55 +01:00
SimonLiu
3ec25f437b Update remote_setup.md
Add another option (utilizing SSH Tunnel) for Linux/macOs users to complete the auth on headless box.
2022-05-19 14:13:38 +01:00
Alex JOST
a34276e9b3 s3: Add Warsaw location for Scaleway
Add new location in Warsaw (Poland) to endpoints for Scaleway.

More Information:
https://blog.scaleway.com/scaleway-is-now-in-warsaw/
https://www.scaleway.com/en/docs/storage/object/how-to/create-a-bucket/
2022-05-19 14:06:16 +01:00
Nick Craig-Wood
c2baacc0a4 union: fix uploading files to union of all bucket based remotes
Before this fix, if uploading to a union consisting of all bucket
based remotes (eg s3), uploads failed with:

    Failed to copy: object not found

This was because the union backend was relying on parent directories
being created to work out which files to upload. If all the upstreams
were bucket based backends which can't hold empty directories, no
directories were created and the upload failed.

This fixes the problem by returning the upstreams used when creating
the directory for the upload, rather than searching for them again
after they've been created.

This will also make the union backend a little more efficient.

Fixes #6170
2022-05-19 13:23:41 +01:00
Nick Craig-Wood
fcec4bedbe drive: fix 404 errors on copy/server side copy objects from public folder
Before this change, copying objects from a public folder shared with a
resource key failed with a 404 error.

This is because rclone wasn't supplying the resource Key where it
should have been.

After this change rclone adds the resource Key when trying to download
or server side copy an object.

There may be more places rclone needs to supply the resource key as
this is barely documented in the API documentation.

See: https://forum.rclone.org/t/copying-files-from-a-publicly-accessible-google-drive-folder-added-as-a-shortcut-getting-error-404-server-side-copies-are-disabled/30811
2022-05-18 18:12:19 +01:00
Nick Craig-Wood
813a5e0931 s3: Remove bucket ACL configuration for Cloudflare R2
Bucket ACLs are not supported by Cloudflare R2. All buckets are
private and must be shared using a Cloudflare Worker.
2022-05-17 15:57:09 +01:00
Nick Craig-Wood
bd4abb15a3 Add Derek Battams to contributors 2022-05-17 15:57:09 +01:00
Nick Craig-Wood
7f84283539 Add Erik van Velzen to contributors 2022-05-17 15:57:09 +01:00
albertony
47b1a0d6fa docs: improve guide for installing from source - fixes #5809 2022-05-17 12:11:20 +01:00
albertony
ce168ecac2 Configurable version suffix independent of version number 2022-05-17 12:10:01 +01:00
Eng Zer Jun
4f0ddb60e7 refactor: replace strings.Replace with strings.ReplaceAll
strings.ReplaceAll(s, old, new) is a wrapper function for
strings.Replace(s, old, new, -1). But strings.ReplaceAll is more
readable and removes the hardcoded -1.

Signed-off-by: Eng Zer Jun <engzerjun@gmail.com>
2022-05-17 11:08:37 +01:00
albertony
b929a56f46 Add Christian Galo to contributors 2022-05-16 13:01:55 +02:00
Christian Galo
74af6409d4 docs/rc: added missing global flags (#6083) 2022-05-16 13:00:19 +02:00
albertony
0e77072dcc vfs: error strings should not be capitalized 2022-05-16 12:43:43 +02:00
albertony
2437eb3cce vfs: fix incorrect detection of root in parent directory utility function
When using filepath.Dir, a difference to path.Dir is that it returns os PathSeparator
instead of slash when the path consists entirely of separators.

Also fixed casing of the function name, use OS in all caps instead of Os
as recommended here: https://github.com/golang/go/wiki/CodeReviewComments#initialisms
2022-05-16 12:43:43 +02:00
Michael C Tiernan - MIT-Research Computing Project
a12c94caff docs: replaced extended ascii smart quotes (#6171) 2022-05-16 12:34:54 +02:00
Kaspian
542c1616b8 docs: small style fixes 2022-05-13 23:06:45 +01:00
albertony
8697f0bd26 about: improved error message 2022-05-13 12:08:10 +01:00
albertony
a9f18f8093 Set proper exit code for errors that are not low-level retried (e.g. size/timestamp changing)
Fixes #5785
2022-05-13 12:02:55 +01:00
Derek Battams
8e5e230b81 b2: use chunksize lib to determine chunksize dynamically
Fixes #4643
2022-05-13 09:25:48 +01:00
Derek Battams
c0985e93b7 azureblob: use chunksize lib to determine chunksize dynamically 2022-05-13 09:25:48 +01:00
Derek Battams
fb4f7555c7 s3: use chunksize lib to determine chunksize dynamically 2022-05-13 09:25:48 +01:00
Derek Battams
f2e7a2e794 chunksize: initial implementation of chunksize helper lib 2022-05-13 09:25:48 +01:00
Erik van Velzen
9e4854955c storj: fix put
The "relative" argument was missing when Put'ing a file. This
sets an incorrect object entry in the cache, leading to the file being
unreadable when using mount functionality.

Fixes #6151
2022-05-12 20:43:54 +01:00
Vincent Murphy
319ac225e4 s3: backend restore command to skip non-GLACIER objects 2022-05-12 20:42:37 +01:00
albertony
a9d3283d97 jottacloud: fix listing output of remote with special characters
This fixes the failing integration test: TestIntegration/FsMkdir/FsPutFiles/FsIsFile
2022-05-12 20:41:07 +01:00
Nick Craig-Wood
edf0412464 test_all: correct path for Internet Archive test
This is to make it use the rclone project's account and to remove the
/ in the remote name which was crashing the integration tester!
2022-05-12 20:38:05 +01:00
Nick Craig-Wood
e6194a4b83 Add Hugal31 to contributors 2022-05-12 20:38:05 +01:00
Nick Craig-Wood
7f05990623 Add Werner to contributors 2022-05-12 20:38:05 +01:00
Nick Craig-Wood
e16f2a566f Add Kaspian to contributors 2022-05-12 20:38:05 +01:00
Hugal31
a36fef8a66 rclone.mount: ignore _netdev mount argument - FIxes #5808
Do not trigger an error upon parsing argument starting with underscores.

_netdev was already ignored after parsing.
2022-05-12 20:27:13 +01:00
Werner
6500e1d205 docs: bisync: minor grammar fix 2022-05-12 20:24:52 +01:00
Kaspian
9f7484e4e9 docs: faq: small grammar fix 2022-05-12 20:23:48 +01:00
Michael C Tiernan - MIT-Research Computing Project
0ba702ccf4 install: Pre verify sudo authorization "-v" before calling curl.
The way this was, the curl interferes with the bash invocation. Here it will authenticate first.
2022-05-12 20:19:33 +01:00
Nick Craig-Wood
6f91198b57 s3: Support Cloudflare R2 - fixes #5642 2022-05-12 08:49:20 +01:00
Nick Craig-Wood
cf0a72aecd fs: fix FixRangeOption to do nothing on unknown sized objects
FixRangeOption shouldn't be called on an object of unknown size, but
if it is, make sure it does nothing.

See: #5642
2022-05-12 08:49:20 +01:00
Nick Craig-Wood
f6fd6ee777 fs: fix FixRangeOption make SeekOptions into absolute RangeOptions
Cloudflare R2 doesn't support range options like `Range: bytes=21-`.

This patch makes FixRangeOption turn a SeekOption into an absolute
RangeOption like this `Range: bytes=21-25` to interoperate with R2.

See: #5642
2022-05-12 08:49:20 +01:00
Nick Craig-Wood
1e66d052fd fs: fix FixRangeOption to make fetch to end Range options absolute
Before this change FixRangeOption was leaving `Range: bytes=21-`
alone, thus not fulfilling its contract of making Range requests
absolute.

As it happens this form isn't supported by Cloudflare R2.

After this change the request is normalised to `Range: bytes=21-25`.

See: #5642
2022-05-12 08:49:20 +01:00
Nick Craig-Wood
e5974ac4b0 s3: use PutObject from the aws SDK to upload single part objects
Before this change rclone used presigned requests to upload single
part objects. This was because of a limitation in the SDK which didn't
allow non seekable io.Readers to be passed in.

This is incompatible with some S3 backends, and rclone wasn't adding
the `X-Amz-Content-Sha256: UNSIGNED-PAYLOAD` header which was
incompatible with other S3 backends.

The SDK now allows for this so rclone can use PutObject directly.

This sets the `X-Amz-Content-Sha256: UNSIGNED-PAYLOAD` flag on the PUT
request. However rclone will add a `Content-Md5` header if at all
possible so the body data is still protected.

Note that the old behaviour can still be configured if required with
the `use_presigned_request` config parameter.

Fixes #5422
2022-05-12 08:49:20 +01:00
Nick Craig-Wood
50a0c3482d lib/readers: add FakeSeeker to adapt io.Reader to io.ReadSeeker #5422 2022-05-12 08:49:20 +01:00
Nick Craig-Wood
389a29b017 Add Michael C Tiernan - MIT-Research Computing Project to contributors 2022-05-12 08:49:20 +01:00
Nick Craig-Wood
9dcf9375e8 Add Mateusz Puczyński to contributors 2022-05-12 08:49:20 +01:00
Lesmiscore
1d6d41fb91 backend/internetarchive: fix uploads can take very long time
* fill in empty values for non-wait mode
* add tracking metadata to observe file change
* completely remove getHashes
* remove unreliable update tests

Closes #6150
2022-05-07 21:54:14 +01:00
Michael C Tiernan - MIT-Research Computing Project
a3d4307892 install: set the modes on the files and/or directories on macOS
Changes made for macOS specific for that style of system.
Paths are established/defined singularly and modes are set automatically
when created. (Platform specific.)
2022-05-05 17:50:22 +01:00
ehsantdy
a446106041 s3: update Arvancloud default values and correct docs 2022-05-02 16:04:01 +01:00
Mateusz Puczyński
607172b6ec go: run go mod tidy and set version to minimum supported go - go1.16
This fixes `go mod tidy` needing manual intervention to build with
previous go versions.
2022-05-01 13:28:20 +01:00
Nick Craig-Wood
94757277bc build: fix go mod tidy removing golang.org/x/mobile dependency
Before this running `go mod tidy` caused the build to break because it
removed the dependency on golang.org/x/mobile and a command line tool
from this package is needed for the build.

This adds an explicit dependency which will mean the tool is always present.
2022-05-01 13:28:20 +01:00
Nick Craig-Wood
deab86867c build: support mount on windows/arm64 - all windows binaries now not cgo
This builds all windows binaries without CGO but with cmount.

cgofuse has a compile mode which works without CGO on Windows for
amd64/x86/arm64 architectures so switch to using that.
2022-04-29 18:04:21 +01:00
Nick Craig-Wood
c0c5b3bc6b build: add rclone version step 2022-04-29 18:04:21 +01:00
Nick Craig-Wood
a947f298e6 cmount: remove cgo from windows build requirements since it builds without 2022-04-29 18:04:21 +01:00
Nick Craig-Wood
1b0128ecb2 cmount: update winfsp/cgofuse dependency for windows/arm64 build 2022-04-29 18:04:21 +01:00
Nick Craig-Wood
c5395db1f1 Changelog updates from Version v1.58.1 2022-04-29 13:11:23 +01:00
Nick Craig-Wood
6e5382fc99 Add SwazRGB to contributors 2022-04-29 13:11:23 +01:00
Nick Craig-Wood
134592adaa Add ehsantdy to contributors 2022-04-29 13:11:23 +01:00
Nick Craig-Wood
36e614f550 Add Lesmiscore to contributors 2022-04-29 13:11:23 +01:00
Nick Craig-Wood
7bfed98b48 Add Zsolt Ero to contributors 2022-04-29 13:11:23 +01:00
Nick Craig-Wood
f471096fd0 Add Leroy van Logchem to contributors 2022-04-29 13:11:23 +01:00
SwazRGB
4cebade95d b2: Add b2-version-at flag to show file versions at time
Uses b2_list_file_versions to retrieve all file versions, and returns
the one that was active at the specified time

This is especially useful in combination with other backup tools, such
as restic, which may use rclone as a backend.
2022-04-28 16:29:13 +01:00
SwazRGB
a8cd18faf3 fs: Implement fs.Time
Similar to fs.Duration but parses into a timestamp instead

Supports parsing from:

* Any of the date formats in parseTimeDates
* A time.Duration offset from now
* parseDurationSuffixes offset from now
2022-04-28 16:29:13 +01:00
730 changed files with 76312 additions and 19577 deletions

10
.github/dependabot.yml vendored Normal file
View File

@@ -0,0 +1,10 @@
version: 2
updates:
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: "daily"
- package-ecosystem: "gomod"
directory: "/"
schedule:
interval: "daily"

View File

@@ -15,22 +15,24 @@ on:
workflow_dispatch:
inputs:
manual:
description: Manual run (bypass default conditions)
type: boolean
required: true
default: true
jobs:
build:
if: ${{ github.repository == 'rclone/rclone' || github.event.inputs.manual }}
if: ${{ github.event.inputs.manual == 'true' || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name)) }}
timeout-minutes: 60
strategy:
fail-fast: false
matrix:
job_name: ['linux', 'mac_amd64', 'mac_arm64', 'windows_amd64', 'windows_386', 'other_os', 'go1.16', 'go1.17']
job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.18', 'go1.19']
include:
- job_name: linux
os: ubuntu-latest
go: '1.18.x'
go: '1.20'
gotags: cmount
build_flags: '-include "^linux/"'
check: true
@@ -39,9 +41,16 @@ jobs:
librclonetest: true
deploy: true
- job_name: linux_386
os: ubuntu-latest
go: '1.20'
goarch: 386
gotags: cmount
quicktest: true
- job_name: mac_amd64
os: macos-11
go: '1.18.x'
go: '1.20'
gotags: 'cmount'
build_flags: '-include "^darwin/amd64" -cgo'
quicktest: true
@@ -50,48 +59,37 @@ jobs:
- job_name: mac_arm64
os: macos-11
go: '1.18.x'
go: '1.20'
gotags: 'cmount'
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
deploy: true
- job_name: windows_amd64
- job_name: windows
os: windows-latest
go: '1.18.x'
go: '1.20'
gotags: cmount
build_flags: '-include "^windows/amd64" -cgo'
build_args: '-buildmode exe'
quicktest: true
racequicktest: true
deploy: true
- job_name: windows_386
os: windows-latest
go: '1.18.x'
gotags: cmount
goarch: '386'
cgo: '1'
build_flags: '-include "^windows/386" -cgo'
cgo: '0'
build_flags: '-include "^windows/"'
build_args: '-buildmode exe'
quicktest: true
deploy: true
- job_name: other_os
os: ubuntu-latest
go: '1.18.x'
build_flags: '-exclude "^(windows/(386|amd64)|darwin/|linux/)"'
go: '1.20'
build_flags: '-exclude "^(windows/|darwin/|linux/)"'
compile_all: true
deploy: true
- job_name: go1.16
- job_name: go1.18
os: ubuntu-latest
go: '1.16.x'
go: '1.18'
quicktest: true
racequicktest: true
- job_name: go1.17
- job_name: go1.19
os: ubuntu-latest
go: '1.17.x'
go: '1.19'
quicktest: true
racequicktest: true
@@ -101,14 +99,13 @@ jobs:
steps:
- name: Checkout
uses: actions/checkout@v2
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Install Go
uses: actions/setup-go@v2
uses: actions/setup-go@v3
with:
stable: 'false'
go-version: ${{ matrix.go }}
check-latest: true
@@ -127,7 +124,7 @@ jobs:
sudo modprobe fuse
sudo chmod 666 /dev/fuse
sudo chown root:$USER /etc/fuse.conf
sudo apt-get install fuse libfuse-dev rpm pkg-config
sudo apt-get install fuse3 libfuse-dev rpm pkg-config
if: matrix.os == 'ubuntu-latest'
- name: Install Libraries on macOS
@@ -166,7 +163,7 @@ jobs:
env
- name: Go module cache
uses: actions/cache@v2
uses: actions/cache@v3
with:
path: ~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
@@ -178,6 +175,11 @@ jobs:
run: |
make
- name: Rclone version
shell: bash
run: |
rclone version
- name: Run tests
shell: bash
run: |
@@ -218,45 +220,54 @@ jobs:
if: matrix.deploy && github.head_ref == '' && github.repository == 'rclone/rclone'
lint:
if: ${{ github.repository == 'rclone/rclone' || github.event.inputs.manual }}
if: ${{ github.event.inputs.manual == 'true' || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name)) }}
timeout-minutes: 30
name: "lint"
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
uses: actions/checkout@v3
- name: Code quality test
uses: golangci/golangci-lint-action@v2
uses: golangci/golangci-lint-action@v3
with:
# Optional: version of golangci-lint to use in form of v1.2 or v1.2.3 or `latest` to use the latest version
version: latest
# Run govulncheck on the latest go version, the one we build binaries with
- name: Install Go
uses: actions/setup-go@v3
with:
go-version: '1.20'
check-latest: true
- name: Install govulncheck
run: go install golang.org/x/vuln/cmd/govulncheck@latest
- name: Scan for vulnerabilities
run: govulncheck ./...
android:
if: ${{ github.repository == 'rclone/rclone' || github.event.inputs.manual }}
if: ${{ github.event.inputs.manual == 'true' || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name)) }}
timeout-minutes: 30
name: "android-all"
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
uses: actions/checkout@v3
with:
fetch-depth: 0
# Upgrade together with NDK version
- name: Set up Go
uses: actions/setup-go@v1
uses: actions/setup-go@v3
with:
go-version: 1.18.x
# Upgrade together with Go version. Using a GitHub-provided version saves around 2 minutes.
- name: Force NDK version
run: echo "y" | sudo ${ANDROID_HOME}/tools/bin/sdkmanager --install "ndk;23.1.7779620" | grep -v = || true
go-version: '1.20'
- name: Go module cache
uses: actions/cache@v2
uses: actions/cache@v3
with:
path: ~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
@@ -277,27 +288,29 @@ jobs:
go install golang.org/x/mobile/cmd/gobind@latest
go install golang.org/x/mobile/cmd/gomobile@latest
env PATH=$PATH:~/go/bin gomobile init
echo "RCLONE_NDK_VERSION=21" >> $GITHUB_ENV
- name: arm-v7a gomobile build
run: env PATH=$PATH:~/go/bin gomobile bind -v -target=android/arm -javapkg=org.rclone -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} github.com/rclone/rclone/librclone/gomobile
run: env PATH=$PATH:~/go/bin gomobile bind -androidapi ${RCLONE_NDK_VERSION} -v -target=android/arm -javapkg=org.rclone -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} github.com/rclone/rclone/librclone/gomobile
- name: arm-v7a Set environment variables
shell: bash
run: |
echo "CC=$(echo $ANDROID_HOME/ndk/23.1.7779620/toolchains/llvm/prebuilt/linux-x86_64/bin/armv7a-linux-androideabi16-clang)" >> $GITHUB_ENV
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/armv7a-linux-androideabi${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
echo 'GOOS=android' >> $GITHUB_ENV
echo 'GOARCH=arm' >> $GITHUB_ENV
echo 'GOARM=7' >> $GITHUB_ENV
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
- name: arm-v7a build
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-16-armv7a .
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-armv7a .
- name: arm64-v8a Set environment variables
shell: bash
run: |
echo "CC=$(echo $ANDROID_HOME/ndk/23.1.7779620/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android21-clang)" >> $GITHUB_ENV
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
echo 'GOOS=android' >> $GITHUB_ENV
echo 'GOARCH=arm64' >> $GITHUB_ENV
@@ -305,12 +318,12 @@ jobs:
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
- name: arm64-v8a build
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-21-armv8a .
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-armv8a .
- name: x86 Set environment variables
shell: bash
run: |
echo "CC=$(echo $ANDROID_HOME/ndk/23.1.7779620/toolchains/llvm/prebuilt/linux-x86_64/bin/i686-linux-android16-clang)" >> $GITHUB_ENV
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/i686-linux-android${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
echo 'GOOS=android' >> $GITHUB_ENV
echo 'GOARCH=386' >> $GITHUB_ENV
@@ -318,12 +331,12 @@ jobs:
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
- name: x86 build
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-16-x86 .
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-x86 .
- name: x64 Set environment variables
shell: bash
run: |
echo "CC=$(echo $ANDROID_HOME/ndk/23.1.7779620/toolchains/llvm/prebuilt/linux-x86_64/bin/x86_64-linux-android21-clang)" >> $GITHUB_ENV
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/x86_64-linux-android${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
echo 'GOOS=android' >> $GITHUB_ENV
echo 'GOARCH=amd64' >> $GITHUB_ENV
@@ -331,7 +344,7 @@ jobs:
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
- name: x64 build
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-21-x64 .
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-x64 .
- name: Upload artifacts
run: |

View File

@@ -12,7 +12,7 @@ jobs:
name: Build image job
steps:
- name: Checkout master
uses: actions/checkout@v2
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Build and publish image
@@ -20,7 +20,7 @@ jobs:
with:
tag: beta
imageName: rclone/rclone
platform: linux/amd64,linux/386,linux/arm64,linux/arm/v7
platform: linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6
publish: true
dockerHubUser: ${{ secrets.DOCKER_HUB_USER }}
dockerHubPassword: ${{ secrets.DOCKER_HUB_PASSWORD }}

View File

@@ -11,7 +11,7 @@ jobs:
name: Build image job
steps:
- name: Checkout master
uses: actions/checkout@v2
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Get actual patch version
@@ -28,7 +28,7 @@ jobs:
with:
tag: latest,${{ steps.actual_patch_version.outputs.ACTUAL_PATCH_VERSION }},${{ steps.actual_minor_version.outputs.ACTUAL_MINOR_VERSION }},${{ steps.actual_major_version.outputs.ACTUAL_MAJOR_VERSION }}
imageName: rclone/rclone
platform: linux/amd64,linux/386,linux/arm64,linux/arm/v7
platform: linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6
publish: true
dockerHubUser: ${{ secrets.DOCKER_HUB_USER }}
dockerHubPassword: ${{ secrets.DOCKER_HUB_PASSWORD }}
@@ -40,7 +40,7 @@ jobs:
name: Build docker plugin job
steps:
- name: Checkout master
uses: actions/checkout@v2
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Build and publish docker plugin
@@ -50,7 +50,7 @@ jobs:
PLUGIN_USER=rclone
docker login --username ${{ secrets.DOCKER_HUB_USER }} \
--password-stdin <<< "${{ secrets.DOCKER_HUB_PASSWORD }}"
for PLUGIN_ARCH in amd64 arm64 arm/v7 ;do
for PLUGIN_ARCH in amd64 arm64 arm/v7 arm/v6 ;do
export PLUGIN_USER PLUGIN_ARCH
make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}
make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}-${VER#v}

14
.github/workflows/winget.yml vendored Normal file
View File

@@ -0,0 +1,14 @@
name: Publish to Winget
on:
release:
types: [released]
jobs:
publish:
runs-on: windows-latest # Action can only run on Windows
steps:
- uses: vedantmgoyal2009/winget-releaser@v2
with:
identifier: Rclone.Rclone
installers-regex: '-windows-\w+\.zip$'
token: ${{ secrets.WINGET_TOKEN }}

View File

@@ -20,7 +20,7 @@ issues:
exclude-use-default: false
# Maximum issues count per one linter. Set to 0 to disable. Default is 50.
max-per-linter: 0
max-issues-per-linter: 0
# Maximum count of issues with the same text. Set to 0 to disable. Default is 3.
max-same-issues: 0

View File

@@ -77,7 +77,7 @@ Make sure you
* Add [documentation](#writing-documentation) for a new feature.
* [Commit your changes](#committing-your-changes) using the [message guideline](#commit-messages).
When you are done with that push your changes to Github:
When you are done with that push your changes to GitHub:
git push -u origin my-new-feature
@@ -88,7 +88,7 @@ Your changes will then get reviewed and you might get asked to fix some stuff. I
You may sometimes be asked to [base your changes on the latest master](#basing-your-changes-on-the-latest-master) or [squash your commits](#squashing-your-commits).
## Using Git and Github ##
## Using Git and GitHub ##
### Committing your changes ###

11040
MANUAL.html generated

File diff suppressed because it is too large Load Diff

10956
MANUAL.md generated

File diff suppressed because it is too large Load Diff

11289
MANUAL.txt generated

File diff suppressed because it is too large Load Diff

View File

@@ -81,6 +81,9 @@ quicktest:
racequicktest:
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) -cpu=2 -race ./...
compiletest:
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) -run XXX ./...
# Do source code quality checks
check: rclone
@echo "-- START CODE QUALITY REPORT -------------------------------"
@@ -97,7 +100,7 @@ release_dep_linux:
# Get the release dependencies we only install on Windows
release_dep_windows:
GO111MODULE=off GOOS="" GOARCH="" go get github.com/josephspurrier/goversioninfo/cmd/goversioninfo
GOOS="" GOARCH="" go install github.com/josephspurrier/goversioninfo/cmd/goversioninfo@latest
# Update dependencies
showupdates:
@@ -245,18 +248,18 @@ retag:
startdev:
@echo "Version is $(VERSION)"
@echo "Next version is $(NEXT_VERSION)"
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(NEXT_VERSION)-DEV\"\n" | gofmt > fs/version.go
echo -e "package fs\n\n// VersionTag of rclone\nvar VersionTag = \"$(NEXT_VERSION)\"\n" | gofmt > fs/versiontag.go
echo -n "$(NEXT_VERSION)" > docs/layouts/partials/version.html
echo "$(NEXT_VERSION)" > VERSION
git commit -m "Start $(NEXT_VERSION)-DEV development" fs/version.go VERSION docs/layouts/partials/version.html
git commit -m "Start $(NEXT_VERSION)-DEV development" fs/versiontag.go VERSION docs/layouts/partials/version.html
startstable:
@echo "Version is $(VERSION)"
@echo "Next stable version is $(NEXT_PATCH_VERSION)"
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(NEXT_PATCH_VERSION)-DEV\"\n" | gofmt > fs/version.go
echo -e "package fs\n\n// VersionTag of rclone\nvar VersionTag = \"$(NEXT_PATCH_VERSION)\"\n" | gofmt > fs/versiontag.go
echo -n "$(NEXT_PATCH_VERSION)" > docs/layouts/partials/version.html
echo "$(NEXT_PATCH_VERSION)" > VERSION
git commit -m "Start $(NEXT_PATCH_VERSION)-DEV development" fs/version.go VERSION docs/layouts/partials/version.html
git commit -m "Start $(NEXT_PATCH_VERSION)-DEV development" fs/versiontag.go VERSION docs/layouts/partials/version.html
winzip:
zip -9 rclone-$(TAG).zip rclone.exe

View File

@@ -29,6 +29,7 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
* Box [:page_facing_up:](https://rclone.org/box/)
* Ceph [:page_facing_up:](https://rclone.org/s3/#ceph)
* China Mobile Ecloud Elastic Object Storage (EOS) [:page_facing_up:](https://rclone.org/s3/#china-mobile-ecloud-eos)
* Cloudflare R2 [:page_facing_up:](https://rclone.org/s3/#cloudflare-r2)
* Arvan Cloud Object Storage (AOS) [:page_facing_up:](https://rclone.org/s3/#arvan-cloud-object-storage-aos)
* Citrix ShareFile [:page_facing_up:](https://rclone.org/sharefile/)
* DigitalOcean Spaces [:page_facing_up:](https://rclone.org/s3/#digitalocean-spaces)
@@ -41,12 +42,15 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
* Google Drive [:page_facing_up:](https://rclone.org/drive/)
* Google Photos [:page_facing_up:](https://rclone.org/googlephotos/)
* HDFS (Hadoop Distributed Filesystem) [:page_facing_up:](https://rclone.org/hdfs/)
* HiDrive [:page_facing_up:](https://rclone.org/hidrive/)
* HTTP [:page_facing_up:](https://rclone.org/http/)
* Hubic [:page_facing_up:](https://rclone.org/hubic/)
* Huawei Cloud Object Storage Service(OBS) [:page_facing_up:](https://rclone.org/s3/#huawei-obs)
* Internet Archive [:page_facing_up:](https://rclone.org/internetarchive/)
* Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
* IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3)
* IONOS Cloud [:page_facing_up:](https://rclone.org/s3/#ionos)
* Koofr [:page_facing_up:](https://rclone.org/koofr/)
* Liara Object Storage [:page_facing_up:](https://rclone.org/s3/#liara-object-storage)
* Mail.ru Cloud [:page_facing_up:](https://rclone.org/mailru/)
* Memset Memstore [:page_facing_up:](https://rclone.org/swift/)
* Mega [:page_facing_up:](https://rclone.org/mega/)
@@ -59,17 +63,20 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
* OpenDrive [:page_facing_up:](https://rclone.org/opendrive/)
* OpenStack Swift [:page_facing_up:](https://rclone.org/swift/)
* Oracle Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
* Oracle Object Storage [:page_facing_up:](https://rclone.org/oracleobjectstorage/)
* ownCloud [:page_facing_up:](https://rclone.org/webdav/#owncloud)
* pCloud [:page_facing_up:](https://rclone.org/pcloud/)
* premiumize.me [:page_facing_up:](https://rclone.org/premiumizeme/)
* put.io [:page_facing_up:](https://rclone.org/putio/)
* QingStor [:page_facing_up:](https://rclone.org/qingstor/)
* Qiniu Cloud Object Storage (Kodo) [:page_facing_up:](https://rclone.org/s3/#qiniu)
* Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
* RackCorp Object Storage [:page_facing_up:](https://rclone.org/s3/#RackCorp)
* Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)
* Seafile [:page_facing_up:](https://rclone.org/seafile/)
* SeaweedFS [:page_facing_up:](https://rclone.org/s3/#seaweedfs)
* SFTP [:page_facing_up:](https://rclone.org/sftp/)
* SMB / CIFS [:page_facing_up:](https://rclone.org/smb/)
* StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
* Storj [:page_facing_up:](https://rclone.org/storj/)
* SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)
@@ -82,6 +89,19 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
Please see [the full list of all storage providers and their features](https://rclone.org/overview/)
### Virtual storage providers
These backends adapt or modify other storage providers
* Alias: rename existing remotes [:page_facing_up:](https://rclone.org/alias/)
* Cache: cache remotes (DEPRECATED) [:page_facing_up:](https://rclone.org/cache/)
* Chunker: split large files [:page_facing_up:](https://rclone.org/chunker/)
* Combine: combine multiple remotes into a directory tree [:page_facing_up:](https://rclone.org/combine/)
* Compress: compress files [:page_facing_up:](https://rclone.org/compress/)
* Crypt: encrypt files [:page_facing_up:](https://rclone.org/crypt/)
* Hasher: hash files [:page_facing_up:](https://rclone.org/hasher/)
* Union: join multiple remotes to work together [:page_facing_up:](https://rclone.org/union/)
## Features
* MD5/SHA-1 hashes checked at all times for file integrity
@@ -96,7 +116,7 @@ Please see [the full list of all storage providers and their features](https://r
* Optional encryption ([Crypt](https://rclone.org/crypt/))
* Optional FUSE mount ([rclone mount](https://rclone.org/commands/rclone_mount/))
* Multi-threaded downloads to local disk
* Can [serve](https://rclone.org/commands/rclone_serve/) local or remote files over HTTP/WebDav/FTP/SFTP/dlna
* Can [serve](https://rclone.org/commands/rclone_serve/) local or remote files over HTTP/WebDAV/FTP/SFTP/DLNA
## Installation & documentation
@@ -117,5 +137,5 @@ Please see the [rclone website](https://rclone.org/) for:
License
-------
This is free software under the terms of MIT the license (check the
This is free software under the terms of the MIT license (check the
[COPYING file](/COPYING) included in this package).

View File

@@ -53,6 +53,14 @@ doing that so it may be necessary to roll back dependencies to the
version specified by `make updatedirect` in order to get rclone to
build.
## Tidy beta
At some point after the release run
bin/tidy-beta v1.55
where the version number is that of a couple ago to remove old beta binaries.
## Making a point release
If rclone needs a point release due to some horrendous bug:
@@ -66,8 +74,7 @@ Set vars
First make the release branch. If this is a second point release then
this will be done already.
* git branch ${BASE_TAG} ${BASE_TAG}-stable
* git co ${BASE_TAG}-stable
* git co -b ${BASE_TAG}-stable ${BASE_TAG}.0
* make startstable
Now

View File

@@ -1 +1 @@
v1.59.0
v1.62.0

View File

@@ -1,3 +1,4 @@
// Package alias implements a virtual provider to rename existing remotes.
package alias
import (

View File

@@ -1,3 +1,4 @@
// Package all imports all the backends
package all
import (
@@ -9,6 +10,7 @@ import (
_ "github.com/rclone/rclone/backend/box"
_ "github.com/rclone/rclone/backend/cache"
_ "github.com/rclone/rclone/backend/chunker"
_ "github.com/rclone/rclone/backend/combine"
_ "github.com/rclone/rclone/backend/compress"
_ "github.com/rclone/rclone/backend/crypt"
_ "github.com/rclone/rclone/backend/drive"
@@ -20,8 +22,8 @@ import (
_ "github.com/rclone/rclone/backend/googlephotos"
_ "github.com/rclone/rclone/backend/hasher"
_ "github.com/rclone/rclone/backend/hdfs"
_ "github.com/rclone/rclone/backend/hidrive"
_ "github.com/rclone/rclone/backend/http"
_ "github.com/rclone/rclone/backend/hubic"
_ "github.com/rclone/rclone/backend/internetarchive"
_ "github.com/rclone/rclone/backend/jottacloud"
_ "github.com/rclone/rclone/backend/koofr"
@@ -32,6 +34,7 @@ import (
_ "github.com/rclone/rclone/backend/netstorage"
_ "github.com/rclone/rclone/backend/onedrive"
_ "github.com/rclone/rclone/backend/opendrive"
_ "github.com/rclone/rclone/backend/oracleobjectstorage"
_ "github.com/rclone/rclone/backend/pcloud"
_ "github.com/rclone/rclone/backend/premiumizeme"
_ "github.com/rclone/rclone/backend/putio"
@@ -41,6 +44,7 @@ import (
_ "github.com/rclone/rclone/backend/sftp"
_ "github.com/rclone/rclone/backend/sharefile"
_ "github.com/rclone/rclone/backend/sia"
_ "github.com/rclone/rclone/backend/smb"
_ "github.com/rclone/rclone/backend/storj"
_ "github.com/rclone/rclone/backend/sugarsync"
_ "github.com/rclone/rclone/backend/swift"

View File

@@ -435,7 +435,7 @@ func (f *Fs) listAll(ctx context.Context, dirID string, title string, directorie
query += " AND kind:" + folderKind
} else if filesOnly {
query += " AND kind:" + fileKind
} else {
//} else {
// FIXME none of these work
//query += " AND kind:(" + fileKind + " OR " + folderKind + ")"
//query += " AND (kind:" + fileKind + " OR kind:" + folderKind + ")"
@@ -556,9 +556,9 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
//
// This is a workaround for Amazon sometimes returning
//
// * 408 REQUEST_TIMEOUT
// * 504 GATEWAY_TIMEOUT
// * 500 Internal server error
// - 408 REQUEST_TIMEOUT
// - 504 GATEWAY_TIMEOUT
// - 500 Internal server error
//
// At the end of large uploads. The speculation is that the timeout
// is waiting for the sha1 hashing to complete and the file may well
@@ -626,7 +626,7 @@ func (f *Fs) checkUpload(ctx context.Context, resp *http.Response, in io.Reader,
// Put the object into the container
//
// Copy the reader in to the new object which is returned
// Copy the reader in to the new object which is returned.
//
// The new object may have been created if an error is returned
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
@@ -685,9 +685,9 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
// Move src to this remote using server-side move operations.
//
// This is stored with the remote path given
// This is stored with the remote path given.
//
// It returns the destination Object and a possible error
// It returns the destination Object and a possible error.
//
// Will only be called if src.Fs().Name() == f.Name()
//
@@ -1002,7 +1002,6 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
// ModTime returns the modification time of the object
//
//
// It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers
func (o *Object) ModTime(ctx context.Context) time.Time {

File diff suppressed because it is too large Load Diff

View File

@@ -1,5 +1,5 @@
//go:build !plan9 && !solaris && !js
// +build !plan9,!solaris,!js
//go:build !plan9 && !solaris && !js && go1.18
// +build !plan9,!solaris,!js,go1.18
package azureblob

View File

@@ -1,12 +1,11 @@
// Test AzureBlob filesystem interface
//go:build !plan9 && !solaris && !js
// +build !plan9,!solaris,!js
//go:build !plan9 && !solaris && !js && go1.18
// +build !plan9,!solaris,!js,go1.18
package azureblob
import (
"context"
"testing"
"github.com/rclone/rclone/fs"
@@ -17,10 +16,12 @@ import (
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestAzureBlob:",
NilObject: (*Object)(nil),
TiersToTest: []string{"Hot", "Cool"},
ChunkedUpload: fstests.ChunkedUploadConfig{},
RemoteName: "TestAzureBlob:",
NilObject: (*Object)(nil),
TiersToTest: []string{"Hot", "Cool"},
ChunkedUpload: fstests.ChunkedUploadConfig{
MinChunkSize: defaultChunkSize,
},
})
}
@@ -32,32 +33,24 @@ var (
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
)
// TestServicePrincipalFileSuccess checks that, given a proper JSON file, we can create a token.
func TestServicePrincipalFileSuccess(t *testing.T) {
ctx := context.TODO()
credentials := `
{
"appId": "my application (client) ID",
"password": "my secret",
"tenant": "my active directory tenant ID"
}
`
tokenRefresher, err := newServicePrincipalTokenRefresher(ctx, []byte(credentials))
if assert.NoError(t, err) {
assert.NotNil(t, tokenRefresher)
func TestValidateAccessTier(t *testing.T) {
tests := map[string]struct {
accessTier string
want bool
}{
"hot": {"hot", true},
"HOT": {"HOT", true},
"Hot": {"Hot", true},
"cool": {"cool", true},
"archive": {"archive", true},
"empty": {"", false},
"unknown": {"unknown", false},
}
for name, test := range tests {
t.Run(name, func(t *testing.T) {
got := validateAccessTier(test.accessTier)
assert.Equal(t, test.want, got)
})
}
}
// TestServicePrincipalFileFailure checks that, given a JSON file with a missing secret, it returns an error.
func TestServicePrincipalFileFailure(t *testing.T) {
ctx := context.TODO()
credentials := `
{
"appId": "my application (client) ID",
"tenant": "my active directory tenant ID"
}
`
_, err := newServicePrincipalTokenRefresher(ctx, []byte(credentials))
assert.Error(t, err)
assert.EqualError(t, err, "error creating service principal token: parameter 'secret' cannot be empty")
}

View File

@@ -1,7 +1,7 @@
// Build for azureblob for unsupported platforms to stop go complaining
// about "no buildable Go source files "
//go:build plan9 || solaris || js
// +build plan9 solaris js
//go:build plan9 || solaris || js || !go1.18
// +build plan9 solaris js !go1.18
package azureblob

View File

@@ -1,137 +0,0 @@
//go:build !plan9 && !solaris && !js
// +build !plan9,!solaris,!js
package azureblob
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"github.com/Azure/go-autorest/autorest/adal"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/fshttp"
)
const (
azureResource = "https://storage.azure.com"
imdsAPIVersion = "2018-02-01"
msiEndpointDefault = "http://169.254.169.254/metadata/identity/oauth2/token"
)
// This custom type is used to add the port the test server has bound to
// to the request context.
type testPortKey string
type msiIdentifierType int
const (
msiClientID msiIdentifierType = iota
msiObjectID
msiResourceID
)
type userMSI struct {
Type msiIdentifierType
Value string
}
type httpError struct {
Response *http.Response
}
func (e httpError) Error() string {
return fmt.Sprintf("HTTP error %v (%v)", e.Response.StatusCode, e.Response.Status)
}
// GetMSIToken attempts to obtain an MSI token from the Azure Instance
// Metadata Service.
func GetMSIToken(ctx context.Context, identity *userMSI) (adal.Token, error) {
// Attempt to get an MSI token; silently continue if unsuccessful.
// This code has been lovingly stolen from azcopy's OAuthTokenManager.
result := adal.Token{}
req, err := http.NewRequestWithContext(ctx, "GET", msiEndpointDefault, nil)
if err != nil {
fs.Debugf(nil, "Failed to create request: %v", err)
return result, err
}
params := req.URL.Query()
params.Set("resource", azureResource)
params.Set("api-version", imdsAPIVersion)
// Specify user-assigned identity if requested.
if identity != nil {
switch identity.Type {
case msiClientID:
params.Set("client_id", identity.Value)
case msiObjectID:
params.Set("object_id", identity.Value)
case msiResourceID:
params.Set("mi_res_id", identity.Value)
default:
// If this happens, the calling function and this one don't agree on
// what valid ID types exist.
return result, fmt.Errorf("unknown MSI identity type specified")
}
}
req.URL.RawQuery = params.Encode()
// The Metadata header is required by all calls to IMDS.
req.Header.Set("Metadata", "true")
// If this function is run in a test, query the test server instead of IMDS.
testPort, isTest := ctx.Value(testPortKey("testPort")).(int)
if isTest {
req.URL.Host = fmt.Sprintf("localhost:%d", testPort)
req.Host = req.URL.Host
}
// Send request
httpClient := fshttp.NewClient(ctx)
resp, err := httpClient.Do(req)
if err != nil {
return result, fmt.Errorf("MSI is not enabled on this VM: %w", err)
}
defer func() { // resp and Body should not be nil
_, err = io.Copy(ioutil.Discard, resp.Body)
if err != nil {
fs.Debugf(nil, "Unable to drain IMDS response: %v", err)
}
err = resp.Body.Close()
if err != nil {
fs.Debugf(nil, "Unable to close IMDS response: %v", err)
}
}()
// Check if the status code indicates success
// The request returns 200 currently, add 201 and 202 as well for possible extension.
switch resp.StatusCode {
case 200, 201, 202:
break
default:
body, _ := ioutil.ReadAll(resp.Body)
fs.Errorf(nil, "Couldn't obtain OAuth token from IMDS; server returned status code %d and body: %v", resp.StatusCode, string(body))
return result, httpError{Response: resp}
}
b, err := ioutil.ReadAll(resp.Body)
if err != nil {
return result, fmt.Errorf("Couldn't read IMDS response: %w", err)
}
// Remove BOM, if any. azcopy does this so I'm following along.
b = bytes.TrimPrefix(b, []byte("\xef\xbb\xbf"))
// This would be a good place to persist the token if a large number of rclone
// invocations are being made in a short amount of time. If the token is
// persisted, the azureblob code will need to check for expiry before every
// storage API call.
err = json.Unmarshal(b, &result)
if err != nil {
return result, fmt.Errorf("Couldn't unmarshal IMDS response: %w", err)
}
return result, nil
}

View File

@@ -1,118 +0,0 @@
//go:build !plan9 && !solaris && !js
// +build !plan9,!solaris,!js
package azureblob
import (
"context"
"encoding/json"
"net/http"
"net/http/httptest"
"strconv"
"strings"
"testing"
"github.com/Azure/go-autorest/autorest/adal"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func handler(t *testing.T, actual *map[string]string) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
err := r.ParseForm()
require.NoError(t, err)
parameters := r.URL.Query()
(*actual)["path"] = r.URL.Path
(*actual)["Metadata"] = r.Header.Get("Metadata")
(*actual)["method"] = r.Method
for paramName := range parameters {
(*actual)[paramName] = parameters.Get(paramName)
}
// Make response.
response := adal.Token{}
responseBytes, err := json.Marshal(response)
require.NoError(t, err)
_, err = w.Write(responseBytes)
require.NoError(t, err)
}
}
func TestManagedIdentity(t *testing.T) {
// test user-assigned identity specifiers to use
testMSIClientID := "d859b29f-5c9c-42f8-a327-ec1bc6408d79"
testMSIObjectID := "9ffeb650-3ca0-4278-962b-5a38d520591a"
testMSIResourceID := "/subscriptions/fe714c49-b8a4-4d49-9388-96a20daa318f/resourceGroups/somerg/providers/Microsoft.ManagedIdentity/userAssignedIdentities/someidentity"
tests := []struct {
identity *userMSI
identityParameterName string
expectedAbsent []string
}{
{&userMSI{msiClientID, testMSIClientID}, "client_id", []string{"object_id", "mi_res_id"}},
{&userMSI{msiObjectID, testMSIObjectID}, "object_id", []string{"client_id", "mi_res_id"}},
{&userMSI{msiResourceID, testMSIResourceID}, "mi_res_id", []string{"object_id", "client_id"}},
{nil, "(default)", []string{"object_id", "client_id", "mi_res_id"}},
}
alwaysExpected := map[string]string{
"path": "/metadata/identity/oauth2/token",
"resource": "https://storage.azure.com",
"Metadata": "true",
"api-version": "2018-02-01",
"method": "GET",
}
for _, test := range tests {
actual := make(map[string]string, 10)
testServer := httptest.NewServer(handler(t, &actual))
defer testServer.Close()
testServerPort, err := strconv.Atoi(strings.Split(testServer.URL, ":")[2])
require.NoError(t, err)
ctx := context.WithValue(context.TODO(), testPortKey("testPort"), testServerPort)
_, err = GetMSIToken(ctx, test.identity)
require.NoError(t, err)
// Validate expected query parameters present
expected := make(map[string]string)
for k, v := range alwaysExpected {
expected[k] = v
}
if test.identity != nil {
expected[test.identityParameterName] = test.identity.Value
}
for key := range expected {
value, exists := actual[key]
if assert.Truef(t, exists, "test of %s: query parameter %s was not passed",
test.identityParameterName, key) {
assert.Equalf(t, expected[key], value,
"test of %s: parameter %s has incorrect value", test.identityParameterName, key)
}
}
// Validate unexpected query parameters absent
for _, key := range test.expectedAbsent {
_, exists := actual[key]
assert.Falsef(t, exists, "query parameter %s was unexpectedly passed")
}
}
}
func errorHandler(resultCode int) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
http.Error(w, "Test error generated", resultCode)
}
}
func TestIMDSErrors(t *testing.T) {
errorCodes := []int{404, 429, 500}
for _, code := range errorCodes {
testServer := httptest.NewServer(errorHandler(code))
defer testServer.Close()
testServerPort, err := strconv.Atoi(strings.Split(testServer.URL, ":")[2])
require.NoError(t, err)
ctx := context.WithValue(context.TODO(), testPortKey("testPort"), testServerPort)
_, err = GetMSIToken(ctx, nil)
require.Error(t, err)
httpErr, ok := err.(httpError)
require.Truef(t, ok, "HTTP error %d did not result in an httpError object", code)
assert.Equalf(t, httpErr.Response.StatusCode, code, "desired error %d but didn't get it", code)
}
}

View File

@@ -1,3 +1,4 @@
// Package api provides types used by the Backblaze B2 API.
package api
import (
@@ -238,7 +239,7 @@ type GetFileInfoRequest struct {
// If the original source of the file being uploaded has a last
// modified time concept, Backblaze recommends using
// src_last_modified_millis as the name, and a string holding the base
// 10 number number of milliseconds since midnight, January 1, 1970
// 10 number of milliseconds since midnight, January 1, 1970
// UTC. This fits in a 64 bit integer such as the type "long" in the
// programming language Java. It is intended to be compatible with
// Java's time long. For example, it can be passed directly into the

View File

@@ -1,4 +1,4 @@
// Package b2 provides an interface to the Backblaze B2 object storage system
// Package b2 provides an interface to the Backblaze B2 object storage system.
package b2
// FIXME should we remove sha1 checks from here as rclone now supports
@@ -64,7 +64,8 @@ const (
// Globals
var (
errNotWithVersions = errors.New("can't modify or delete files in --b2-versions mode")
errNotWithVersions = errors.New("can't modify or delete files in --b2-versions mode")
errNotWithVersionAt = errors.New("can't modify or delete files in --b2-version-at mode")
)
// Register with Fs
@@ -106,6 +107,11 @@ in the [b2 integrations checklist](https://www.backblaze.com/b2/docs/integration
Help: "Include old versions in directory listings.\n\nNote that when using this no file write operations are permitted,\nso you can't upload files or delete them.",
Default: false,
Advanced: true,
}, {
Name: "version_at",
Help: "Show file versions as they were at the specified time.\n\nNote that when using this no file write operations are permitted,\nso you can't upload files or delete them.",
Default: fs.Time{},
Advanced: true,
}, {
Name: "hard_delete",
Help: "Permanently delete files on remote removal, otherwise hide files.",
@@ -211,6 +217,7 @@ type Options struct {
Endpoint string `config:"endpoint"`
TestMode string `config:"test_mode"`
Versions bool `config:"versions"`
VersionAt fs.Time `config:"version_at"`
HardDelete bool `config:"hard_delete"`
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
CopyCutoff fs.SizeSuffix `config:"copy_cutoff"`
@@ -273,7 +280,7 @@ func (f *Fs) Root() string {
// String converts this Fs to a string
func (f *Fs) String() string {
if f.rootBucket == "" {
return fmt.Sprintf("B2 root")
return "B2 root"
}
if f.rootDirectory == "" {
return fmt.Sprintf("B2 bucket %s", f.rootBucket)
@@ -649,15 +656,15 @@ var errEndList = errors.New("end list")
//
// (bucket, directory) is the starting directory
//
// If prefix is set then it is removed from all file names
// If prefix is set then it is removed from all file names.
//
// If addBucket is set then it adds the bucket to the start of the
// remotes generated
// remotes generated.
//
// If recurse is set the function will recursively list
// If recurse is set the function will recursively list.
//
// If limit is > 0 then it limits to that many files (must be less
// than 1000)
// than 1000).
//
// If hidden is set then it will list the hidden (deleted) files too.
//
@@ -696,9 +703,12 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
Method: "POST",
Path: "/b2_list_file_names",
}
if hidden {
if hidden || f.opt.VersionAt.IsSet() {
opts.Path = "/b2_list_file_versions"
}
lastFileName := ""
for {
var response api.ListFileNamesResponse
err := f.pacer.Call(func() (bool, error) {
@@ -728,7 +738,21 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
if addBucket {
remote = path.Join(bucket, remote)
}
if f.opt.VersionAt.IsSet() {
if time.Time(file.UploadTimestamp).After(time.Time(f.opt.VersionAt)) {
// Ignore versions that were created after the specified time
continue
}
if file.Name == lastFileName {
// Ignore versions before the already returned version
continue
}
}
// Send object
lastFileName = file.Name
err = fn(remote, file, isDirectory)
if err != nil {
if err == errEndList {
@@ -1001,7 +1025,7 @@ func (f *Fs) clearBucketID(bucket string) {
// Put the object into the bucket
//
// Copy the reader in to the new object which is returned
// Copy the reader in to the new object which is returned.
//
// The new object may have been created if an error is returned
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
@@ -1181,10 +1205,7 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool) error {
}
}
var isUnfinishedUploadStale = func(timestamp api.Timestamp) bool {
if time.Since(time.Time(timestamp)).Hours() > 24 {
return true
}
return false
return time.Since(time.Time(timestamp)).Hours() > 24
}
// Delete Config.Transfers in parallel
@@ -1200,7 +1221,7 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool) error {
fs.Errorf(object.Name, "Can't create object %v", err)
continue
}
tr := accounting.Stats(ctx).NewCheckingTransfer(oi)
tr := accounting.Stats(ctx).NewCheckingTransfer(oi, "deleting")
err = f.deleteByID(ctx, object.ID, object.Name)
checkErr(err)
tr.Done(ctx, err)
@@ -1214,7 +1235,7 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool) error {
if err != nil {
fs.Errorf(object, "Can't create object %+v", err)
}
tr := accounting.Stats(ctx).NewCheckingTransfer(oi)
tr := accounting.Stats(ctx).NewCheckingTransfer(oi, "checking")
if oldOnly && last != remote {
// Check current version of the file
if object.Action == "hide" {
@@ -1313,9 +1334,9 @@ func (f *Fs) copy(ctx context.Context, dstObj *Object, srcObj *Object, newInfo *
// Copy src to this remote using server-side copy operations.
//
// This is stored with the remote path given
// This is stored with the remote path given.
//
// It returns the destination Object and a possible error
// It returns the destination Object and a possible error.
//
// Will only be called if src.Fs().Name() == f.Name()
//
@@ -1457,26 +1478,23 @@ func (o *Object) Size() int64 {
// Clean the SHA1
//
// Make sure it is lower case
// Make sure it is lower case.
//
// Remove unverified prefix - see https://www.backblaze.com/b2/docs/uploading.html
// Some tools (e.g. Cyberduck) use this
func cleanSHA1(sha1 string) (out string) {
out = strings.ToLower(sha1)
func cleanSHA1(sha1 string) string {
const unverified = "unverified:"
if strings.HasPrefix(out, unverified) {
out = out[len(unverified):]
}
return out
return strings.TrimPrefix(strings.ToLower(sha1), unverified)
}
// decodeMetaDataRaw sets the metadata from the data passed in
//
// Sets
// o.id
// o.modTime
// o.size
// o.sha1
//
// o.id
// o.modTime
// o.size
// o.sha1
func (o *Object) decodeMetaDataRaw(ID, SHA1 string, Size int64, UploadTimestamp api.Timestamp, Info map[string]string, mimeType string) (err error) {
o.id = ID
o.sha1 = SHA1
@@ -1495,10 +1513,11 @@ func (o *Object) decodeMetaDataRaw(ID, SHA1 string, Size int64, UploadTimestamp
// decodeMetaData sets the metadata in the object from an api.File
//
// Sets
// o.id
// o.modTime
// o.size
// o.sha1
//
// o.id
// o.modTime
// o.size
// o.sha1
func (o *Object) decodeMetaData(info *api.File) (err error) {
return o.decodeMetaDataRaw(info.ID, info.SHA1, info.Size, info.UploadTimestamp, info.Info, info.ContentType)
}
@@ -1506,10 +1525,11 @@ func (o *Object) decodeMetaData(info *api.File) (err error) {
// decodeMetaDataFileInfo sets the metadata in the object from an api.FileInfo
//
// Sets
// o.id
// o.modTime
// o.size
// o.sha1
//
// o.id
// o.modTime
// o.size
// o.sha1
func (o *Object) decodeMetaDataFileInfo(info *api.FileInfo) (err error) {
return o.decodeMetaDataRaw(info.ID, info.SHA1, info.Size, info.UploadTimestamp, info.Info, info.ContentType)
}
@@ -1567,10 +1587,11 @@ func (o *Object) getMetaData(ctx context.Context) (info *api.File, err error) {
// readMetaData gets the metadata if it hasn't already been fetched
//
// Sets
// o.id
// o.modTime
// o.size
// o.sha1
//
// o.id
// o.modTime
// o.size
// o.sha1
func (o *Object) readMetaData(ctx context.Context) (err error) {
if o.id != "" {
return nil
@@ -1828,6 +1849,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
if o.fs.opt.Versions {
return errNotWithVersions
}
if o.fs.opt.VersionAt.IsSet() {
return errNotWithVersionAt
}
size := src.Size()
bucket, bucketPath := o.split()
@@ -1983,6 +2007,9 @@ func (o *Object) Remove(ctx context.Context) error {
if o.fs.opt.Versions {
return errNotWithVersions
}
if o.fs.opt.VersionAt.IsSet() {
return errNotWithVersionAt
}
if o.fs.opt.HardDelete {
return o.fs.deleteByID(ctx, o.id, bucketPath)
}

View File

@@ -14,12 +14,15 @@ import (
"io"
"strings"
"sync"
"time"
"github.com/rclone/rclone/backend/b2/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/chunksize"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/atexit"
"github.com/rclone/rclone/lib/pool"
"github.com/rclone/rclone/lib/rest"
"golang.org/x/sync/errgroup"
)
@@ -88,21 +91,19 @@ type largeUpload struct {
// newLargeUpload starts an upload of object o from in with metadata in src
//
// If newInfo is set then metadata from that will be used instead of reading it from src
func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs.ObjectInfo, chunkSize fs.SizeSuffix, doCopy bool, newInfo *api.File) (up *largeUpload, err error) {
remote := o.remote
func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs.ObjectInfo, defaultChunkSize fs.SizeSuffix, doCopy bool, newInfo *api.File) (up *largeUpload, err error) {
size := src.Size()
parts := int64(0)
sha1SliceSize := int64(maxParts)
chunkSize := defaultChunkSize
if size == -1 {
fs.Debugf(o, "Streaming upload with --b2-chunk-size %s allows uploads of up to %s and will fail only when that limit is reached.", f.opt.ChunkSize, maxParts*f.opt.ChunkSize)
} else {
chunkSize = chunksize.Calculator(o, size, maxParts, defaultChunkSize)
parts = size / int64(chunkSize)
if size%int64(chunkSize) != 0 {
parts++
}
if parts > maxParts {
return nil, fmt.Errorf("%q too big (%d bytes) makes too many parts %d > %d - increase --b2-chunk-size", remote, size, parts, maxParts)
}
sha1SliceSize = parts
}
@@ -429,18 +430,47 @@ func (up *largeUpload) Upload(ctx context.Context) (err error) {
defer atexit.OnError(&err, func() { _ = up.cancel(ctx) })()
fs.Debugf(up.o, "Starting %s of large file in %d chunks (id %q)", up.what, up.parts, up.id)
var (
g, gCtx = errgroup.WithContext(ctx)
remaining = up.size
g, gCtx = errgroup.WithContext(ctx)
remaining = up.size
uploadPool *pool.Pool
ci = fs.GetConfig(ctx)
)
// If using large chunk size then make a temporary pool
if up.chunkSize <= int64(up.f.opt.ChunkSize) {
uploadPool = up.f.pool
} else {
uploadPool = pool.New(
time.Duration(up.f.opt.MemoryPoolFlushTime),
int(up.chunkSize),
ci.Transfers,
up.f.opt.MemoryPoolUseMmap,
)
defer uploadPool.Flush()
}
// Get an upload token and a buffer
getBuf := func() (buf []byte) {
up.f.getBuf(true)
if !up.doCopy {
buf = uploadPool.Get()
}
return buf
}
// Put an upload token and a buffer
putBuf := func(buf []byte) {
if !up.doCopy {
uploadPool.Put(buf)
}
up.f.putBuf(nil, true)
}
g.Go(func() error {
for part := int64(1); part <= up.parts; part++ {
// Get a block of memory from the pool and token which limits concurrency.
buf := up.f.getBuf(up.doCopy)
buf := getBuf()
// Fail fast, in case an errgroup managed function returns an error
// gCtx is cancelled. There is no point in uploading all the other parts.
if gCtx.Err() != nil {
up.f.putBuf(buf, up.doCopy)
putBuf(buf)
return nil
}
@@ -454,14 +484,14 @@ func (up *largeUpload) Upload(ctx context.Context) (err error) {
buf = buf[:reqSize]
_, err = io.ReadFull(up.in, buf)
if err != nil {
up.f.putBuf(buf, up.doCopy)
putBuf(buf)
return err
}
}
part := part // for the closure
g.Go(func() (err error) {
defer up.f.putBuf(buf, up.doCopy)
defer putBuf(buf)
if !up.doCopy {
err = up.transferChunk(gCtx, part, buf)
} else {

View File

@@ -14,7 +14,7 @@ const (
timeFormat = `"` + time.RFC3339 + `"`
)
// Time represents represents date and time information for the
// Time represents date and time information for the
// box API, by using RFC3339
type Time time.Time

View File

@@ -17,9 +17,9 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"os"
"path"
"strconv"
"strings"
@@ -183,7 +183,7 @@ func refreshJWTToken(ctx context.Context, jsonFile string, boxSubType string, na
}
func getBoxConfig(configFile string) (boxConfig *api.ConfigJSON, err error) {
file, err := ioutil.ReadFile(configFile)
file, err := os.ReadFile(configFile)
if err != nil {
return nil, fmt.Errorf("box: failed to read Box config: %w", err)
}
@@ -266,7 +266,7 @@ type Fs struct {
root string // the path we are working on
opt Options // parsed options
features *fs.Features // optional features
srv *rest.Client // the connection to the one drive server
srv *rest.Client // the connection to the server
dirCache *dircache.DirCache // Map of directory path to directory id
pacer *fs.Pacer // pacer for API calls
tokenRenewer *oauthutil.Renew // renew the token on expiry
@@ -692,7 +692,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// Creates from the parameters passed in a half finished Object which
// must have setMetaData called on it
//
// Returns the object, leaf, directoryID and error
// Returns the object, leaf, directoryID and error.
//
// Used to create new objects
func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time, size int64) (o *Object, leaf string, directoryID string, err error) {
@@ -752,7 +752,7 @@ func (f *Fs) preUploadCheck(ctx context.Context, leaf, directoryID string, size
// Put the object
//
// Copy the reader in to the new object which is returned
// Copy the reader in to the new object which is returned.
//
// The new object may have been created if an error is returned
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
@@ -792,9 +792,9 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
// PutUnchecked the object into the container
//
// This will produce an error if the object already exists
// This will produce an error if the object already exists.
//
// Copy the reader in to the new object which is returned
// Copy the reader in to the new object which is returned.
//
// The new object may have been created if an error is returned
func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
@@ -877,9 +877,9 @@ func (f *Fs) Precision() time.Duration {
// Copy src to this remote using server-side copy operations.
//
// This is stored with the remote path given
// This is stored with the remote path given.
//
// It returns the destination Object and a possible error
// It returns the destination Object and a possible error.
//
// Will only be called if src.Fs().Name() == f.Name()
//
@@ -897,7 +897,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
srcPath := srcObj.fs.rootSlash() + srcObj.remote
dstPath := f.rootSlash() + remote
if strings.ToLower(srcPath) == strings.ToLower(dstPath) {
if strings.EqualFold(srcPath, dstPath) {
return nil, fmt.Errorf("can't copy %q -> %q as are same name when lowercase", srcPath, dstPath)
}
@@ -995,9 +995,9 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
// Move src to this remote using server-side move operations.
//
// This is stored with the remote path given
// This is stored with the remote path given.
//
// It returns the destination Object and a possible error
// It returns the destination Object and a possible error.
//
// Will only be called if src.Fs().Name() == f.Name()
//
@@ -1235,7 +1235,6 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
// ModTime returns the modification time of the object
//
//
// It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers
func (o *Object) ModTime(ctx context.Context) time.Time {
@@ -1346,9 +1345,9 @@ func (o *Object) upload(ctx context.Context, in io.Reader, leaf, directoryID str
// Update the object with the contents of the io.Reader, modTime and size
//
// If existing is set then it updates the object rather than creating a new one
// If existing is set then it updates the object rather than creating a new one.
//
// The new object may have been created if an error is returned
// The new object may have been created if an error is returned.
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
if o.fs.tokenRenewer != nil {
o.fs.tokenRenewer.Start()

View File

@@ -1,6 +1,7 @@
//go:build !plan9 && !js
// +build !plan9,!js
// Package cache implements a virtual provider to cache existing remotes.
package cache
import (
@@ -1037,7 +1038,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
}
fs.Debugf(dir, "list: remove entry: %v", entryRemote)
}
entries = nil
entries = nil //nolint:ineffassign
// and then iterate over the ones from source (temp Objects will override source ones)
var batchDirectories []*Directory
@@ -1128,7 +1129,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
case fs.Directory:
_ = f.cache.AddDir(DirectoryFromOriginal(ctx, f, o))
default:
return fmt.Errorf("Unknown object type %T", entry)
return fmt.Errorf("unknown object type %T", entry)
}
}
@@ -1747,7 +1748,7 @@ func (f *Fs) CleanUp(ctx context.Context) error {
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
do := f.Fs.Features().About
if do == nil {
return nil, errors.New("About not supported")
return nil, errors.New("not supported by underlying remote")
}
return do(ctx)
}

View File

@@ -11,7 +11,6 @@ import (
goflag "flag"
"fmt"
"io"
"io/ioutil"
"log"
"math/rand"
"os"
@@ -102,14 +101,12 @@ func TestMain(m *testing.M) {
func TestInternalListRootAndInnerRemotes(t *testing.T) {
id := fmt.Sprintf("tilrair%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
// Instantiate inner fs
innerFolder := "inner"
runInstance.mkdir(t, rootFs, innerFolder)
rootFs2, boltDb2 := runInstance.newCacheFs(t, remoteName, id+"/"+innerFolder, true, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs2, boltDb2)
rootFs2, _ := runInstance.newCacheFs(t, remoteName, id+"/"+innerFolder, true, true, nil)
runInstance.writeObjectString(t, rootFs2, "one", "content")
listRoot, err := runInstance.list(t, rootFs, "")
@@ -167,7 +164,7 @@ func TestInternalVfsCache(t *testing.T) {
li2 := [2]string{path.Join("test", "one"), path.Join("test", "second")}
for _, r := range li2 {
var err error
ci, err := ioutil.ReadDir(path.Join(runInstance.chunkPath, runInstance.encryptRemoteIfNeeded(t, path.Join(id, r))))
ci, err := os.ReadDir(path.Join(runInstance.chunkPath, runInstance.encryptRemoteIfNeeded(t, path.Join(id, r))))
if err != nil || len(ci) == 0 {
log.Printf("========== '%v' not in cache", r)
} else {
@@ -226,8 +223,7 @@ func TestInternalVfsCache(t *testing.T) {
func TestInternalObjWrapFsFound(t *testing.T) {
id := fmt.Sprintf("tiowff%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
cfs, err := runInstance.getCacheFs(rootFs)
require.NoError(t, err)
@@ -259,8 +255,7 @@ func TestInternalObjWrapFsFound(t *testing.T) {
func TestInternalObjNotFound(t *testing.T) {
id := fmt.Sprintf("tionf%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil)
obj, err := rootFs.NewObject(context.Background(), "404")
require.Error(t, err)
@@ -270,8 +265,7 @@ func TestInternalObjNotFound(t *testing.T) {
func TestInternalCachedWrittenContentMatches(t *testing.T) {
testy.SkipUnreliable(t)
id := fmt.Sprintf("ticwcm%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil)
cfs, err := runInstance.getCacheFs(rootFs)
require.NoError(t, err)
@@ -298,8 +292,7 @@ func TestInternalDoubleWrittenContentMatches(t *testing.T) {
t.Skip("Skip test on windows/386")
}
id := fmt.Sprintf("tidwcm%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil)
// write the object
runInstance.writeRemoteString(t, rootFs, "one", "one content")
@@ -317,8 +310,7 @@ func TestInternalDoubleWrittenContentMatches(t *testing.T) {
func TestInternalCachedUpdatedContentMatches(t *testing.T) {
testy.SkipUnreliable(t)
id := fmt.Sprintf("ticucm%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil)
var err error
// create some rand test data
@@ -347,8 +339,7 @@ func TestInternalCachedUpdatedContentMatches(t *testing.T) {
func TestInternalWrappedWrittenContentMatches(t *testing.T) {
id := fmt.Sprintf("tiwwcm%v", time.Now().Unix())
vfsflags.Opt.DirCacheTime = time.Second
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
if runInstance.rootIsCrypt {
t.Skip("test skipped with crypt remote")
}
@@ -378,8 +369,7 @@ func TestInternalWrappedWrittenContentMatches(t *testing.T) {
func TestInternalLargeWrittenContentMatches(t *testing.T) {
id := fmt.Sprintf("tilwcm%v", time.Now().Unix())
vfsflags.Opt.DirCacheTime = time.Second
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
if runInstance.rootIsCrypt {
t.Skip("test skipped with crypt remote")
}
@@ -405,8 +395,7 @@ func TestInternalLargeWrittenContentMatches(t *testing.T) {
func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
id := fmt.Sprintf("tiwfcns%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil)
cfs, err := runInstance.getCacheFs(rootFs)
require.NoError(t, err)
@@ -460,8 +449,7 @@ func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
func TestInternalMoveWithNotify(t *testing.T) {
id := fmt.Sprintf("timwn%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil)
if !runInstance.wrappedIsExternal {
t.Skipf("Not external")
}
@@ -547,8 +535,7 @@ func TestInternalMoveWithNotify(t *testing.T) {
func TestInternalNotifyCreatesEmptyParts(t *testing.T) {
id := fmt.Sprintf("tincep%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil)
if !runInstance.wrappedIsExternal {
t.Skipf("Not external")
}
@@ -634,8 +621,7 @@ func TestInternalNotifyCreatesEmptyParts(t *testing.T) {
func TestInternalChangeSeenAfterDirCacheFlush(t *testing.T) {
id := fmt.Sprintf("ticsadcf%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil)
cfs, err := runInstance.getCacheFs(rootFs)
require.NoError(t, err)
@@ -667,8 +653,7 @@ func TestInternalChangeSeenAfterDirCacheFlush(t *testing.T) {
func TestInternalCacheWrites(t *testing.T) {
id := "ticw"
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, map[string]string{"writes": "true"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, map[string]string{"writes": "true"})
cfs, err := runInstance.getCacheFs(rootFs)
require.NoError(t, err)
@@ -689,8 +674,7 @@ func TestInternalMaxChunkSizeRespected(t *testing.T) {
t.Skip("Skip test on windows/386")
}
id := fmt.Sprintf("timcsr%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, map[string]string{"workers": "1"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, map[string]string{"workers": "1"})
cfs, err := runInstance.getCacheFs(rootFs)
require.NoError(t, err)
@@ -725,8 +709,7 @@ func TestInternalMaxChunkSizeRespected(t *testing.T) {
func TestInternalExpiredEntriesRemoved(t *testing.T) {
id := fmt.Sprintf("tieer%v", time.Now().Unix())
vfsflags.Opt.DirCacheTime = time.Second * 4 // needs to be lower than the defined
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, map[string]string{"info_age": "5s"}, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
cfs, err := runInstance.getCacheFs(rootFs)
require.NoError(t, err)
@@ -763,9 +746,7 @@ func TestInternalBug2117(t *testing.T) {
vfsflags.Opt.DirCacheTime = time.Second * 10
id := fmt.Sprintf("tib2117%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil,
map[string]string{"info_age": "72h", "chunk_clean_interval": "15m"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, map[string]string{"info_age": "72h", "chunk_clean_interval": "15m"})
if runInstance.rootIsCrypt {
t.Skipf("skipping crypt")
@@ -841,7 +822,7 @@ func newRun() *run {
}
if uploadDir == "" {
r.tmpUploadDir, err = ioutil.TempDir("", "rclonecache-tmp")
r.tmpUploadDir, err = os.MkdirTemp("", "rclonecache-tmp")
if err != nil {
panic(fmt.Sprintf("Failed to create temp dir: %v", err))
}
@@ -866,7 +847,7 @@ func (r *run) encryptRemoteIfNeeded(t *testing.T, remote string) string {
return enc
}
func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool, cfg map[string]string, flags map[string]string) (fs.Fs, *cache.Persistent) {
func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool, flags map[string]string) (fs.Fs, *cache.Persistent) {
fstest.Initialise()
remoteExists := false
for _, s := range config.FileSections() {
@@ -959,10 +940,15 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
}
err = f.Mkdir(context.Background(), "")
require.NoError(t, err)
t.Cleanup(func() {
runInstance.cleanupFs(t, f)
})
return f, boltDb
}
func (r *run) cleanupFs(t *testing.T, f fs.Fs, b *cache.Persistent) {
func (r *run) cleanupFs(t *testing.T, f fs.Fs) {
err := f.Features().Purge(context.Background(), "")
require.NoError(t, err)
cfs, err := r.getCacheFs(f)
@@ -984,7 +970,7 @@ func (r *run) randomReader(t *testing.T, size int64) io.ReadCloser {
chunk := int64(1024)
cnt := size / chunk
left := size % chunk
f, err := ioutil.TempFile("", "rclonecache-tempfile")
f, err := os.CreateTemp("", "rclonecache-tempfile")
require.NoError(t, err)
for i := 0; i < int(cnt); i++ {

View File

@@ -19,7 +19,7 @@ func TestIntegration(t *testing.T) {
RemoteName: "TestCache:",
NilObject: (*cache.Object)(nil),
UnimplementableFsMethods: []string{"PublicLink", "OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType", "ID", "GetTier", "SetTier"},
UnimplementableObjectMethods: []string{"MimeType", "ID", "GetTier", "SetTier", "Metadata"},
SkipInvalidUTF8: true, // invalid UTF-8 confuses the cache
})
}

View File

@@ -21,10 +21,8 @@ import (
func TestInternalUploadTempDirCreated(t *testing.T) {
id := fmt.Sprintf("tiutdc%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true,
nil,
runInstance.newCacheFs(t, remoteName, id, false, true,
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id)})
defer runInstance.cleanupFs(t, rootFs, boltDb)
_, err := os.Stat(path.Join(runInstance.tmpUploadDir, id))
require.NoError(t, err)
@@ -63,9 +61,7 @@ func testInternalUploadQueueOneFile(t *testing.T, id string, rootFs fs.Fs, boltD
func TestInternalUploadQueueOneFileNoRest(t *testing.T) {
id := fmt.Sprintf("tiuqofnr%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "0s"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
testInternalUploadQueueOneFile(t, id, rootFs, boltDb)
}
@@ -73,19 +69,15 @@ func TestInternalUploadQueueOneFileNoRest(t *testing.T) {
func TestInternalUploadQueueOneFileWithRest(t *testing.T) {
id := fmt.Sprintf("tiuqofwr%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1m"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
testInternalUploadQueueOneFile(t, id, rootFs, boltDb)
}
func TestInternalUploadMoveExistingFile(t *testing.T) {
id := fmt.Sprintf("tiumef%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true,
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "3s"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
err := rootFs.Mkdir(context.Background(), "one")
require.NoError(t, err)
@@ -119,10 +111,8 @@ func TestInternalUploadMoveExistingFile(t *testing.T) {
func TestInternalUploadTempPathCleaned(t *testing.T) {
id := fmt.Sprintf("tiutpc%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true,
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "5s"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
err := rootFs.Mkdir(context.Background(), "one")
require.NoError(t, err)
@@ -162,10 +152,8 @@ func TestInternalUploadTempPathCleaned(t *testing.T) {
func TestInternalUploadQueueMoreFiles(t *testing.T) {
id := fmt.Sprintf("tiuqmf%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true,
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1s"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
err := rootFs.Mkdir(context.Background(), "test")
require.NoError(t, err)
@@ -213,9 +201,7 @@ func TestInternalUploadQueueMoreFiles(t *testing.T) {
func TestInternalUploadTempFileOperations(t *testing.T) {
id := "tiutfo"
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1h"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
boltDb.PurgeTempUploads()
@@ -343,9 +329,7 @@ func TestInternalUploadTempFileOperations(t *testing.T) {
func TestInternalUploadUploadingFileOperations(t *testing.T) {
id := "tiuufo"
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1h"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
boltDb.PurgeTempUploads()

View File

@@ -8,7 +8,7 @@ import (
"crypto/tls"
"encoding/json"
"fmt"
"io/ioutil"
"io"
"net/http"
"net/url"
"strings"
@@ -167,7 +167,7 @@ func (p *plexConnector) listenWebsocket() {
continue
}
var data []byte
data, err = ioutil.ReadAll(resp.Body)
data, err = io.ReadAll(resp.Body)
if err != nil {
continue
}
@@ -213,7 +213,7 @@ func (p *plexConnector) authenticate() error {
var data map[string]interface{}
err = json.NewDecoder(resp.Body).Decode(&data)
if err != nil {
return fmt.Errorf("failed to obtain token: %v", err)
return fmt.Errorf("failed to obtain token: %w", err)
}
tokenGen, ok := get(data, "user", "authToken")
if !ok {

View File

@@ -76,10 +76,7 @@ func (m *Memory) CleanChunksByAge(chunkAge time.Duration) {
// CleanChunksByNeed will cleanup chunks after the FS passes a specific chunk
func (m *Memory) CleanChunksByNeed(offset int64) {
var items map[string]cache.Item
items = m.db.Items()
for key := range items {
for key := range m.db.Items() {
sepIdx := strings.LastIndex(key, "-")
keyOffset, err := strconv.ParseInt(key[sepIdx+1:], 10, 64)
if err != nil {

View File

@@ -9,7 +9,6 @@ import (
"encoding/binary"
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path"
"strconv"
@@ -250,7 +249,7 @@ func (b *Persistent) GetDirEntries(cachedDir *Directory) (fs.DirEntries, error)
if val != nil {
err := json.Unmarshal(val, cachedDir)
if err != nil {
return fmt.Errorf("error during unmarshalling obj: %v", err)
return fmt.Errorf("error during unmarshalling obj: %w", err)
}
} else {
return fmt.Errorf("missing cached dir: %v", cachedDir)
@@ -456,10 +455,7 @@ func (b *Persistent) HasEntry(remote string) bool {
return fmt.Errorf("couldn't find object (%v)", remote)
})
if err == nil {
return true
}
return false
return err == nil
}
// HasChunk confirms the existence of a single chunk of an object
@@ -476,7 +472,7 @@ func (b *Persistent) GetChunk(cachedObject *Object, offset int64) ([]byte, error
var data []byte
fp := path.Join(b.dataPath, cachedObject.abs(), strconv.FormatInt(offset, 10))
data, err := ioutil.ReadFile(fp)
data, err := os.ReadFile(fp)
if err != nil {
return nil, err
}
@@ -489,7 +485,7 @@ func (b *Persistent) AddChunk(fp string, data []byte, offset int64) error {
_ = os.MkdirAll(path.Join(b.dataPath, fp), os.ModePerm)
filePath := path.Join(b.dataPath, fp, strconv.FormatInt(offset, 10))
err := ioutil.WriteFile(filePath, data, os.ModePerm)
err := os.WriteFile(filePath, data, os.ModePerm)
if err != nil {
return err
}
@@ -554,7 +550,7 @@ func (b *Persistent) CleanChunksBySize(maxSize int64) {
err := b.db.Update(func(tx *bolt.Tx) error {
dataTsBucket := tx.Bucket([]byte(DataTsBucket))
if dataTsBucket == nil {
return fmt.Errorf("Couldn't open (%v) bucket", DataTsBucket)
return fmt.Errorf("couldn't open (%v) bucket", DataTsBucket)
}
// iterate through ts
c := dataTsBucket.Cursor()
@@ -904,16 +900,16 @@ func (b *Persistent) rollbackPendingUpload(remote string) error {
v := bucket.Get([]byte(remote))
err = json.Unmarshal(v, tempObj)
if err != nil {
return fmt.Errorf("pending upload (%v) not found %v", remote, err)
return fmt.Errorf("pending upload (%v) not found: %w", remote, err)
}
tempObj.Started = false
v2, err := json.Marshal(tempObj)
if err != nil {
return fmt.Errorf("pending upload not updated %v", err)
return fmt.Errorf("pending upload not updated: %w", err)
}
err = bucket.Put([]byte(tempObj.DestPath), v2)
if err != nil {
return fmt.Errorf("pending upload not updated %v", err)
return fmt.Errorf("pending upload not updated: %w", err)
}
return nil
})
@@ -969,11 +965,11 @@ func (b *Persistent) updatePendingUpload(remote string, fn func(item *tempUpload
}
v2, err := json.Marshal(tempObj)
if err != nil {
return fmt.Errorf("pending upload not updated %v", err)
return fmt.Errorf("pending upload not updated: %w", err)
}
err = bucket.Put([]byte(tempObj.DestPath), v2)
if err != nil {
return fmt.Errorf("pending upload not updated %v", err)
return fmt.Errorf("pending upload not updated: %w", err)
}
return nil

View File

@@ -12,7 +12,6 @@ import (
"fmt"
gohash "hash"
"io"
"io/ioutil"
"math/rand"
"path"
"regexp"
@@ -32,7 +31,6 @@ import (
"github.com/rclone/rclone/fs/operations"
)
//
// Chunker's composite files have one or more chunks
// and optional metadata object. If it's present,
// meta object is named after the original file.
@@ -65,7 +63,7 @@ import (
// length of 13 decimals it makes a 7-digit base-36 number.
//
// When transactions is set to the norename style, data chunks will
// keep their temporary chunk names (with the transacion identifier
// keep their temporary chunk names (with the transaction identifier
// suffix). To distinguish them from temporary chunks, the txn field
// of the metadata file is set to match the transaction identifier of
// the data chunks.
@@ -79,7 +77,6 @@ import (
// Metadata format v1 does not define any control chunk types,
// they are currently ignored aka reserved.
// In future they can be used to implement resumable uploads etc.
//
const (
ctrlTypeRegStr = `[a-z][a-z0-9]{2,6}`
tempSuffixFormat = `_%04s`
@@ -515,7 +512,7 @@ func (f *Fs) setChunkNameFormat(pattern string) error {
strRegex := regexp.QuoteMeta(pattern)
strRegex = reHashes.ReplaceAllLiteralString(strRegex, reDataOrCtrl)
strRegex = strings.Replace(strRegex, "\\*", mainNameRegStr, -1)
strRegex = strings.ReplaceAll(strRegex, "\\*", mainNameRegStr)
strRegex = fmt.Sprintf("^%s(?:%s|%s)?$", strRegex, tempSuffixRegStr, tempSuffixRegOld)
f.nameRegexp = regexp.MustCompile(strRegex)
@@ -524,7 +521,7 @@ func (f *Fs) setChunkNameFormat(pattern string) error {
if numDigits > 1 {
fmtDigits = fmt.Sprintf("%%0%dd", numDigits)
}
strFmt := strings.Replace(pattern, "%", "%%", -1)
strFmt := strings.ReplaceAll(pattern, "%", "%%")
strFmt = strings.Replace(strFmt, "*", "%s", 1)
f.dataNameFmt = reHashes.ReplaceAllLiteralString(strFmt, fmtDigits)
f.ctrlNameFmt = reHashes.ReplaceAllLiteralString(strFmt, "_%s")
@@ -542,7 +539,6 @@ func (f *Fs) setChunkNameFormat(pattern string) error {
//
// xactID is a transaction identifier. Empty xactID denotes active chunk,
// otherwise temporary chunk name is produced.
//
func (f *Fs) makeChunkName(filePath string, chunkNo int, ctrlType, xactID string) string {
dir, parentName := path.Split(filePath)
var name, tempSuffix string
@@ -708,7 +704,6 @@ func (f *Fs) newXactID(ctx context.Context, filePath string) (xactID string, err
// directory together with dead chunks.
// In future a flag named like `--chunker-list-hidden` may be added to
// rclone that will tell List to reveal hidden chunks.
//
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
entries, err = f.base.List(ctx, dir)
if err != nil {
@@ -868,7 +863,6 @@ func (f *Fs) processEntries(ctx context.Context, origEntries fs.DirEntries, dirP
// Note that chunker prefers analyzing file names rather than reading
// the content of meta object assuming that directory scans are fast
// but opening even a small file can be slow on some backends.
//
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
return f.scanObject(ctx, remote, false)
}
@@ -1043,7 +1037,7 @@ func (o *Object) readMetadata(ctx context.Context) error {
if err != nil {
return err
}
metadata, err := ioutil.ReadAll(reader)
metadata, err := io.ReadAll(reader)
_ = reader.Close() // ensure file handle is freed on windows
if err != nil {
return err
@@ -1084,7 +1078,7 @@ func (o *Object) readMetadata(ctx context.Context) error {
// readXactID returns the transaction ID stored in the passed metadata object
func (o *Object) readXactID(ctx context.Context) (xactID string, err error) {
// if xactID has already been read and cahced return it now
// if xactID has already been read and cached return it now
if o.xIDCached {
return o.xactID, nil
}
@@ -1102,7 +1096,7 @@ func (o *Object) readXactID(ctx context.Context) (xactID string, err error) {
if err != nil {
return "", err
}
data, err := ioutil.ReadAll(reader)
data, err := io.ReadAll(reader)
_ = reader.Close() // ensure file handle is freed on windows
if err != nil {
return "", err
@@ -1586,7 +1580,6 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
// This command will chain to `purge` from wrapped remote.
// As a result it removes not only composite chunker files with their
// active chunks but also all hidden temporary chunks in the directory.
//
func (f *Fs) Purge(ctx context.Context, dir string) error {
do := f.base.Features().Purge
if do == nil {
@@ -1628,7 +1621,6 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
// Unsupported control chunks will get re-picked by a more recent
// rclone version with unexpected results. This can be helped by
// the `delete hidden` flag above or at least the user has been warned.
//
func (o *Object) Remove(ctx context.Context) (err error) {
if err := o.f.forbidChunk(o, o.Remote()); err != nil {
// operations.Move can still call Remove if chunker's Move refuses
@@ -1804,9 +1796,9 @@ func (f *Fs) okForServerSide(ctx context.Context, src fs.Object, opName string)
// Copy src to this remote using server-side copy operations.
//
// This is stored with the remote path given
// This is stored with the remote path given.
//
// It returns the destination Object and a possible error
// It returns the destination Object and a possible error.
//
// Will only be called if src.Fs().Name() == f.Name()
//
@@ -1825,9 +1817,9 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
// Move src to this remote using server-side move operations.
//
// This is stored with the remote path given
// This is stored with the remote path given.
//
// It returns the destination Object and a possible error
// It returns the destination Object and a possible error.
//
// Will only be called if src.Fs().Name() == f.Name()
//
@@ -1895,7 +1887,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
func (f *Fs) CleanUp(ctx context.Context) error {
do := f.base.Features().CleanUp
if do == nil {
return errors.New("can't CleanUp")
return errors.New("not supported by underlying remote")
}
return do(ctx)
}
@@ -1904,7 +1896,7 @@ func (f *Fs) CleanUp(ctx context.Context) error {
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
do := f.base.Features().About
if do == nil {
return nil, errors.New("About not supported")
return nil, errors.New("not supported by underlying remote")
}
return do(ctx)
}
@@ -2125,7 +2117,6 @@ func (o *Object) SetModTime(ctx context.Context, mtime time.Time) error {
// file, then tries to read it from metadata. This in theory
// handles the unusual case when a small file has been tampered
// on the level of wrapped remote but chunker is unaware of that.
//
func (o *Object) Hash(ctx context.Context, hashType hash.Type) (string, error) {
if err := o.readMetadata(ctx); err != nil {
return "", err // valid metadata is required to get hash, abort
@@ -2414,7 +2405,6 @@ type metaSimpleJSON struct {
// - for files larger than chunk size
// - if file contents can be mistaken as meta object
// - if consistent hashing is On but wrapped remote can't provide given hash
//
func marshalSimpleJSON(ctx context.Context, size int64, nChunks int, md5, sha1, xactID string) ([]byte, error) {
version := metadataVersion
if xactID == "" && version == 2 {
@@ -2447,7 +2437,6 @@ func marshalSimpleJSON(ctx context.Context, size int64, nChunks int, md5, sha1,
// New format will have a higher version number and cannot be correctly
// handled by current implementation.
// The version check below will then explicitly ask user to upgrade rclone.
//
func unmarshalSimpleJSON(ctx context.Context, metaObject fs.Object, data []byte) (info *ObjectInfo, madeByChunker bool, err error) {
// Be strict about JSON format
// to reduce possibility that a random small file resembles metadata.

View File

@@ -5,7 +5,7 @@ import (
"context"
"flag"
"fmt"
"io/ioutil"
"io"
"path"
"regexp"
"strings"
@@ -59,7 +59,7 @@ var mtime1 = fstest.Time("2001-02-03T04:05:06.499999999Z")
func testPutFile(ctx context.Context, t *testing.T, f fs.Fs, name, contents, message string, check bool) fs.Object {
item := fstest.Item{Path: name, ModTime: mtime1}
_, obj := fstests.PutTestContents(ctx, t, f, &item, contents, check)
obj := fstests.PutTestContents(ctx, t, f, &item, contents, check)
assert.NotNil(t, obj, message)
return obj
}
@@ -413,7 +413,7 @@ func testSmallFileInternals(t *testing.T, f *Fs) {
if r == nil {
return
}
data, err := ioutil.ReadAll(r)
data, err := io.ReadAll(r)
assert.NoError(t, err)
assert.Equal(t, contents, string(data))
_ = r.Close()
@@ -440,7 +440,7 @@ func testSmallFileInternals(t *testing.T, f *Fs) {
checkSmallFile := func(name, contents string) {
filename := path.Join(dir, name)
item := fstest.Item{Path: filename, ModTime: modTime}
_, put := fstests.PutTestContents(ctx, t, f, &item, contents, false)
put := fstests.PutTestContents(ctx, t, f, &item, contents, false)
assert.NotNil(t, put)
checkSmallFileInternals(put)
checkContents(put, contents)
@@ -489,7 +489,7 @@ func testPreventCorruption(t *testing.T, f *Fs) {
newFile := func(name string) fs.Object {
item := fstest.Item{Path: path.Join(dir, name), ModTime: modTime}
_, obj := fstests.PutTestContents(ctx, t, f, &item, contents, true)
obj := fstests.PutTestContents(ctx, t, f, &item, contents, true)
require.NotNil(t, obj)
return obj
}
@@ -538,7 +538,7 @@ func testPreventCorruption(t *testing.T, f *Fs) {
assert.NoError(t, err)
var chunkContents []byte
assert.NotPanics(t, func() {
chunkContents, err = ioutil.ReadAll(r)
chunkContents, err = io.ReadAll(r)
_ = r.Close()
})
assert.NoError(t, err)
@@ -573,7 +573,7 @@ func testPreventCorruption(t *testing.T, f *Fs) {
r, err = willyChunk.Open(ctx)
assert.NoError(t, err)
assert.NotPanics(t, func() {
_, err = ioutil.ReadAll(r)
_, err = io.ReadAll(r)
_ = r.Close()
})
assert.NoError(t, err)
@@ -599,7 +599,7 @@ func testChunkNumberOverflow(t *testing.T, f *Fs) {
newFile := func(f fs.Fs, name string) (obj fs.Object, filename string, txnID string) {
filename = path.Join(dir, name)
item := fstest.Item{Path: filename, ModTime: modTime}
_, obj = fstests.PutTestContents(ctx, t, f, &item, contents, true)
obj = fstests.PutTestContents(ctx, t, f, &item, contents, true)
require.NotNil(t, obj)
if chunkObj, isChunkObj := obj.(*Object); isChunkObj {
txnID = chunkObj.xactID
@@ -672,7 +672,7 @@ func testMetadataInput(t *testing.T, f *Fs) {
assert.NoError(t, err, "open "+description)
assert.NotNil(t, r, "open stream of "+description)
if err == nil && r != nil {
data, err := ioutil.ReadAll(r)
data, err := io.ReadAll(r)
assert.NoError(t, err, "read all of "+description)
assert.Equal(t, contents, string(data), description+" contents is ok")
_ = r.Close()
@@ -716,7 +716,7 @@ func testFutureProof(t *testing.T, f *Fs) {
name = f.makeChunkName(name, part-1, "", "")
}
item := fstest.Item{Path: name, ModTime: modTime}
_, obj := fstests.PutTestContents(ctx, t, f.base, &item, data, true)
obj := fstests.PutTestContents(ctx, t, f.base, &item, data, true)
assert.NotNil(t, obj, msg)
}
@@ -758,8 +758,8 @@ func testFutureProof(t *testing.T, f *Fs) {
assert.Error(t, err)
// Rcat must fail
in := ioutil.NopCloser(bytes.NewBufferString("abc"))
robj, err := operations.Rcat(ctx, f, file, in, modTime)
in := io.NopCloser(bytes.NewBufferString("abc"))
robj, err := operations.Rcat(ctx, f, file, in, modTime, nil)
assert.Nil(t, robj)
assert.NotNil(t, err)
if err != nil {
@@ -790,7 +790,7 @@ func testBackwardsCompatibility(t *testing.T, f *Fs) {
newFile := func(f fs.Fs, name string) (fs.Object, string) {
filename := path.Join(dir, name)
item := fstest.Item{Path: filename, ModTime: modTime}
_, obj := fstests.PutTestContents(ctx, t, f, &item, contents, true)
obj := fstests.PutTestContents(ctx, t, f, &item, contents, true)
require.NotNil(t, obj)
return obj, filename
}
@@ -844,7 +844,7 @@ func testChunkerServerSideMove(t *testing.T, f *Fs) {
modTime := fstest.Time("2001-02-03T04:05:06.499999999Z")
item := fstest.Item{Path: "movefile", ModTime: modTime}
contents := "abcdef"
_, file := fstests.PutTestContents(ctx, t, fs1, &item, contents, true)
file := fstests.PutTestContents(ctx, t, fs1, &item, contents, true)
dstOverwritten, _ := fs2.NewObject(ctx, "movefile")
dstFile, err := operations.Move(ctx, fs2, dstOverwritten, "movefile", file)
@@ -854,7 +854,7 @@ func testChunkerServerSideMove(t *testing.T, f *Fs) {
r, err := dstFile.Open(ctx)
assert.NoError(t, err)
assert.NotNil(t, r)
data, err := ioutil.ReadAll(r)
data, err := io.ReadAll(r)
assert.NoError(t, err)
assert.Equal(t, contents, string(data))
_ = r.Close()

View File

@@ -35,6 +35,7 @@ func TestIntegration(t *testing.T) {
"MimeType",
"GetTier",
"SetTier",
"Metadata",
},
UnimplementableFsMethods: []string{
"PublicLink",
@@ -53,6 +54,7 @@ func TestIntegration(t *testing.T) {
{Name: name, Key: "type", Value: "chunker"},
{Name: name, Key: "remote", Value: tempDir},
}
opt.QuickTestOK = true
}
fstests.Run(t, &opt)
}

992
backend/combine/combine.go Normal file
View File

@@ -0,0 +1,992 @@
// Package combine implents a backend to combine multiple remotes in a directory tree
package combine
/*
Have API to add/remove branches in the combine
*/
import (
"context"
"errors"
"fmt"
"io"
"path"
"strings"
"sync"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/cache"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fs/walk"
"golang.org/x/sync/errgroup"
)
// Register with Fs
func init() {
fsi := &fs.RegInfo{
Name: "combine",
Description: "Combine several remotes into one",
NewFs: NewFs,
MetadataInfo: &fs.MetadataInfo{
Help: `Any metadata supported by the underlying remote is read and written.`,
},
Options: []fs.Option{{
Name: "upstreams",
Help: `Upstreams for combining
These should be in the form
dir=remote:path dir2=remote2:path
Where before the = is specified the root directory and after is the remote to
put there.
Embedded spaces can be added using quotes
"dir=remote:path with space" "dir2=remote2:path with space"
`,
Required: true,
Default: fs.SpaceSepList(nil),
}},
}
fs.Register(fsi)
}
// Options defines the configuration for this backend
type Options struct {
Upstreams fs.SpaceSepList `config:"upstreams"`
}
// Fs represents a combine of upstreams
type Fs struct {
name string // name of this remote
features *fs.Features // optional features
opt Options // options for this Fs
root string // the path we are working on
hashSet hash.Set // common hashes
when time.Time // directory times
upstreams map[string]*upstream // map of upstreams
}
// adjustment stores the info to add a prefix to a path or chop characters off
type adjustment struct {
root string
rootSlash string
mountpoint string
mountpointSlash string
}
// newAdjustment makes a new path adjustment adjusting between mountpoint and root
//
// mountpoint is the point the upstream is mounted and root is the combine root
func newAdjustment(root, mountpoint string) (a adjustment) {
return adjustment{
root: root,
rootSlash: root + "/",
mountpoint: mountpoint,
mountpointSlash: mountpoint + "/",
}
}
var errNotUnderRoot = errors.New("file not under root")
// do makes the adjustment on s, mapping an upstream path into a combine path
func (a *adjustment) do(s string) (string, error) {
absPath := join(a.mountpoint, s)
if a.root == "" {
return absPath, nil
}
if absPath == a.root {
return "", nil
}
if !strings.HasPrefix(absPath, a.rootSlash) {
return "", errNotUnderRoot
}
return absPath[len(a.rootSlash):], nil
}
// undo makes the adjustment on s, mapping a combine path into an upstream path
func (a *adjustment) undo(s string) (string, error) {
absPath := join(a.root, s)
if absPath == a.mountpoint {
return "", nil
}
if !strings.HasPrefix(absPath, a.mountpointSlash) {
return "", errNotUnderRoot
}
return absPath[len(a.mountpointSlash):], nil
}
// upstream represents an upstream Fs
type upstream struct {
f fs.Fs
parent *Fs
dir string // directory the upstream is mounted
pathAdjustment adjustment // how to fiddle with the path
}
// Create an upstream from the directory it is mounted on and the remote
func (f *Fs) newUpstream(ctx context.Context, dir, remote string) (*upstream, error) {
uFs, err := cache.Get(ctx, remote)
if err == fs.ErrorIsFile {
return nil, fmt.Errorf("can't combine files yet, only directories %q: %w", remote, err)
}
if err != nil {
return nil, fmt.Errorf("failed to create upstream %q: %w", remote, err)
}
u := &upstream{
f: uFs,
parent: f,
dir: dir,
pathAdjustment: newAdjustment(f.root, dir),
}
cache.PinUntilFinalized(u.f, u)
return u, nil
}
// NewFs constructs an Fs from the path.
//
// The returned Fs is the actual Fs, referenced by remote in the config
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs.Fs, err error) {
// defer log.Trace(nil, "name=%q, root=%q, m=%v", name, root, m)("f=%+v, err=%v", &outFs, &err)
// Parse config into Options struct
opt := new(Options)
err = configstruct.Set(m, opt)
if err != nil {
return nil, err
}
// Backward compatible to old config
if len(opt.Upstreams) == 0 {
return nil, errors.New("combine can't point to an empty upstream - check the value of the upstreams setting")
}
for _, u := range opt.Upstreams {
if strings.HasPrefix(u, name+":") {
return nil, errors.New("can't point combine remote at itself - check the value of the upstreams setting")
}
}
isDir := false
for strings.HasSuffix(root, "/") {
root = root[:len(root)-1]
isDir = true
}
f := &Fs{
name: name,
root: root,
opt: *opt,
upstreams: make(map[string]*upstream, len(opt.Upstreams)),
when: time.Now(),
}
g, gCtx := errgroup.WithContext(ctx)
var mu sync.Mutex
for _, upstream := range opt.Upstreams {
upstream := upstream
g.Go(func() (err error) {
equal := strings.IndexRune(upstream, '=')
if equal < 0 {
return fmt.Errorf("no \"=\" in upstream definition %q", upstream)
}
dir, remote := upstream[:equal], upstream[equal+1:]
if dir == "" {
return fmt.Errorf("empty dir in upstream definition %q", upstream)
}
if remote == "" {
return fmt.Errorf("empty remote in upstream definition %q", upstream)
}
if strings.ContainsRune(dir, '/') {
return fmt.Errorf("dirs can't contain / (yet): %q", dir)
}
u, err := f.newUpstream(gCtx, dir, remote)
if err != nil {
return err
}
mu.Lock()
if _, found := f.upstreams[dir]; found {
err = fmt.Errorf("duplicate directory name %q", dir)
} else {
f.upstreams[dir] = u
}
mu.Unlock()
return err
})
}
err = g.Wait()
if err != nil {
return nil, err
}
// check features
var features = (&fs.Features{
CaseInsensitive: true,
DuplicateFiles: false,
ReadMimeType: true,
WriteMimeType: true,
CanHaveEmptyDirectories: true,
BucketBased: true,
SetTier: true,
GetTier: true,
ReadMetadata: true,
WriteMetadata: true,
UserMetadata: true,
}).Fill(ctx, f)
canMove := true
for _, u := range f.upstreams {
features = features.Mask(ctx, u.f) // Mask all upstream fs
if !operations.CanServerSideMove(u.f) {
canMove = false
}
}
// We can move if all remotes support Move or Copy
if canMove {
features.Move = f.Move
}
// Enable ListR when upstreams either support ListR or is local
// But not when all upstreams are local
if features.ListR == nil {
for _, u := range f.upstreams {
if u.f.Features().ListR != nil {
features.ListR = f.ListR
} else if !u.f.Features().IsLocal {
features.ListR = nil
break
}
}
}
// Enable Purge when any upstreams support it
if features.Purge == nil {
for _, u := range f.upstreams {
if u.f.Features().Purge != nil {
features.Purge = f.Purge
break
}
}
}
// Enable Shutdown when any upstreams support it
if features.Shutdown == nil {
for _, u := range f.upstreams {
if u.f.Features().Shutdown != nil {
features.Shutdown = f.Shutdown
break
}
}
}
// Enable DirCacheFlush when any upstreams support it
if features.DirCacheFlush == nil {
for _, u := range f.upstreams {
if u.f.Features().DirCacheFlush != nil {
features.DirCacheFlush = f.DirCacheFlush
break
}
}
}
// Enable ChangeNotify when any upstreams support it
if features.ChangeNotify == nil {
for _, u := range f.upstreams {
if u.f.Features().ChangeNotify != nil {
features.ChangeNotify = f.ChangeNotify
break
}
}
}
f.features = features
// Get common intersection of hashes
var hashSet hash.Set
var first = true
for _, u := range f.upstreams {
if first {
hashSet = u.f.Hashes()
first = false
} else {
hashSet = hashSet.Overlap(u.f.Hashes())
}
}
f.hashSet = hashSet
// Check to see if the root is actually a file
if f.root != "" && !isDir {
_, err := f.NewObject(ctx, "")
if err != nil {
if err == fs.ErrorObjectNotFound || err == fs.ErrorNotAFile || err == fs.ErrorIsDir {
// File doesn't exist or is a directory so return old f
return f, nil
}
return nil, err
}
// Check to see if the root path is actually an existing file
f.root = path.Dir(f.root)
if f.root == "." {
f.root = ""
}
// Adjust path adjustment to remove leaf
for _, u := range f.upstreams {
u.pathAdjustment = newAdjustment(f.root, u.dir)
}
return f, fs.ErrorIsFile
}
return f, nil
}
// Run a function over all the upstreams in parallel
func (f *Fs) multithread(ctx context.Context, fn func(context.Context, *upstream) error) error {
g, gCtx := errgroup.WithContext(ctx)
for _, u := range f.upstreams {
u := u
g.Go(func() (err error) {
return fn(gCtx, u)
})
}
return g.Wait()
}
// join the elements together but unline path.Join return empty string
func join(elem ...string) string {
result := path.Join(elem...)
if result == "." {
return ""
}
if len(result) > 0 && result[0] == '/' {
result = result[1:]
}
return result
}
// find the upstream for the remote passed in, returning the upstream and the adjusted path
func (f *Fs) findUpstream(remote string) (u *upstream, uRemote string, err error) {
// defer log.Trace(remote, "")("f=%v, uRemote=%q, err=%v", &u, &uRemote, &err)
for _, u := range f.upstreams {
uRemote, err = u.pathAdjustment.undo(remote)
if err == nil {
return u, uRemote, nil
}
}
return nil, "", fmt.Errorf("combine for remote %q: %w", remote, fs.ErrorDirNotFound)
}
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string {
return f.name
}
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
return f.root
}
// String converts this Fs to a string
func (f *Fs) String() string {
return fmt.Sprintf("combine root '%s'", f.root)
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// Rmdir removes the root directory of the Fs object
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
// The root always exists
if f.root == "" && dir == "" {
return nil
}
u, uRemote, err := f.findUpstream(dir)
if err != nil {
return err
}
return u.f.Rmdir(ctx, uRemote)
}
// Hashes returns hash.HashNone to indicate remote hashing is unavailable
func (f *Fs) Hashes() hash.Set {
return f.hashSet
}
// Mkdir makes the root directory of the Fs object
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
// The root always exists
if f.root == "" && dir == "" {
return nil
}
u, uRemote, err := f.findUpstream(dir)
if err != nil {
return err
}
return u.f.Mkdir(ctx, uRemote)
}
// purge the upstream or fallback to a slow way
func (u *upstream) purge(ctx context.Context, dir string) (err error) {
if do := u.f.Features().Purge; do != nil {
err = do(ctx, dir)
} else {
err = operations.Purge(ctx, u.f, dir)
}
return err
}
// Purge all files in the directory
//
// Implement this if you have a way of deleting all the files
// quicker than just running Remove() on the result of List()
//
// Return an error if it doesn't exist
func (f *Fs) Purge(ctx context.Context, dir string) error {
if f.root == "" && dir == "" {
return f.multithread(ctx, func(ctx context.Context, u *upstream) error {
return u.purge(ctx, "")
})
}
u, uRemote, err := f.findUpstream(dir)
if err != nil {
return err
}
return u.purge(ctx, uRemote)
}
// Copy src to this remote using server-side copy operations.
//
// This is stored with the remote path given.
//
// It returns the destination Object and a possible error.
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object)
if !ok {
fs.Debugf(src, "Can't copy - not same remote type")
return nil, fs.ErrorCantCopy
}
dstU, dstRemote, err := f.findUpstream(remote)
if err != nil {
return nil, err
}
do := dstU.f.Features().Copy
if do == nil {
return nil, fs.ErrorCantCopy
}
o, err := do(ctx, srcObj.Object, dstRemote)
if err != nil {
return nil, err
}
return dstU.newObject(o), nil
}
// Move src to this remote using server-side move operations.
//
// This is stored with the remote path given.
//
// It returns the destination Object and a possible error.
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantMove
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object)
if !ok {
fs.Debugf(src, "Can't move - not same remote type")
return nil, fs.ErrorCantMove
}
dstU, dstRemote, err := f.findUpstream(remote)
if err != nil {
return nil, err
}
do := dstU.f.Features().Move
useCopy := false
if do == nil {
do = dstU.f.Features().Copy
if do == nil {
return nil, fs.ErrorCantMove
}
useCopy = true
}
o, err := do(ctx, srcObj.Object, dstRemote)
if err != nil {
return nil, err
}
// If did Copy then remove the source object
if useCopy {
err = srcObj.Remove(ctx)
if err != nil {
return nil, err
}
}
return dstU.newObject(o), nil
}
// DirMove moves src, srcRemote to this remote at dstRemote
// using server-side move operations.
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantDirMove
//
// If destination exists then return fs.ErrorDirExists
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) (err error) {
// defer log.Trace(f, "src=%v, srcRemote=%q, dstRemote=%q", src, srcRemote, dstRemote)("err=%v", &err)
srcFs, ok := src.(*Fs)
if !ok {
fs.Debugf(src, "Can't move directory - not same remote type")
return fs.ErrorCantDirMove
}
dstU, dstURemote, err := f.findUpstream(dstRemote)
if err != nil {
return err
}
srcU, srcURemote, err := srcFs.findUpstream(srcRemote)
if err != nil {
return err
}
do := dstU.f.Features().DirMove
if do == nil {
return fs.ErrorCantDirMove
}
fs.Logf(dstU.f, "srcU.f=%v, srcURemote=%q, dstURemote=%q", srcU.f, srcURemote, dstURemote)
return do(ctx, srcU.f, srcURemote, dstURemote)
}
// ChangeNotify calls the passed function with a path
// that has had changes. If the implementation
// uses polling, it should adhere to the given interval.
// At least one value will be written to the channel,
// specifying the initial value and updated values might
// follow. A 0 Duration should pause the polling.
// The ChangeNotify implementation must empty the channel
// regularly. When the channel gets closed, the implementation
// should stop polling and release resources.
func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), ch <-chan time.Duration) {
var uChans []chan time.Duration
for _, u := range f.upstreams {
u := u
if do := u.f.Features().ChangeNotify; do != nil {
ch := make(chan time.Duration)
uChans = append(uChans, ch)
wrappedNotifyFunc := func(path string, entryType fs.EntryType) {
newPath, err := u.pathAdjustment.do(path)
if err != nil {
fs.Logf(f, "ChangeNotify: unable to process %q: %s", path, err)
return
}
fs.Debugf(f, "ChangeNotify: path %q entryType %d", newPath, entryType)
notifyFunc(newPath, entryType)
}
do(ctx, wrappedNotifyFunc, ch)
}
}
go func() {
for i := range ch {
for _, c := range uChans {
c <- i
}
}
for _, c := range uChans {
close(c)
}
}()
}
// DirCacheFlush resets the directory cache - used in testing
// as an optional interface
func (f *Fs) DirCacheFlush() {
ctx := context.Background()
_ = f.multithread(ctx, func(ctx context.Context, u *upstream) error {
if do := u.f.Features().DirCacheFlush; do != nil {
do()
}
return nil
})
}
func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, stream bool, options ...fs.OpenOption) (fs.Object, error) {
srcPath := src.Remote()
u, uRemote, err := f.findUpstream(srcPath)
if err != nil {
return nil, err
}
uSrc := fs.NewOverrideRemote(src, uRemote)
var o fs.Object
if stream {
o, err = u.f.Features().PutStream(ctx, in, uSrc, options...)
} else {
o, err = u.f.Put(ctx, in, uSrc, options...)
}
if err != nil {
return nil, err
}
return u.newObject(o), nil
}
// Put in to the remote path with the modTime given of the given size
//
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
// nil and the error
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
o, err := f.NewObject(ctx, src.Remote())
switch err {
case nil:
return o, o.Update(ctx, in, src, options...)
case fs.ErrorObjectNotFound:
return f.put(ctx, in, src, false, options...)
default:
return nil, err
}
}
// PutStream uploads to the remote path with the modTime given of indeterminate size
//
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
// nil and the error
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
o, err := f.NewObject(ctx, src.Remote())
switch err {
case nil:
return o, o.Update(ctx, in, src, options...)
case fs.ErrorObjectNotFound:
return f.put(ctx, in, src, true, options...)
default:
return nil, err
}
}
// About gets quota information from the Fs
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
usage := &fs.Usage{
Total: new(int64),
Used: new(int64),
Trashed: new(int64),
Other: new(int64),
Free: new(int64),
Objects: new(int64),
}
for _, u := range f.upstreams {
doAbout := u.f.Features().About
if doAbout == nil {
continue
}
usg, err := doAbout(ctx)
if errors.Is(err, fs.ErrorDirNotFound) {
continue
}
if err != nil {
return nil, err
}
if usg.Total != nil && usage.Total != nil {
*usage.Total += *usg.Total
} else {
usage.Total = nil
}
if usg.Used != nil && usage.Used != nil {
*usage.Used += *usg.Used
} else {
usage.Used = nil
}
if usg.Trashed != nil && usage.Trashed != nil {
*usage.Trashed += *usg.Trashed
} else {
usage.Trashed = nil
}
if usg.Other != nil && usage.Other != nil {
*usage.Other += *usg.Other
} else {
usage.Other = nil
}
if usg.Free != nil && usage.Free != nil {
*usage.Free += *usg.Free
} else {
usage.Free = nil
}
if usg.Objects != nil && usage.Objects != nil {
*usage.Objects += *usg.Objects
} else {
usage.Objects = nil
}
}
return usage, nil
}
// Wraps entries for this upstream
func (u *upstream) wrapEntries(ctx context.Context, entries fs.DirEntries) (fs.DirEntries, error) {
for i, entry := range entries {
switch x := entry.(type) {
case fs.Object:
entries[i] = u.newObject(x)
case fs.Directory:
newDir := fs.NewDirCopy(ctx, x)
newPath, err := u.pathAdjustment.do(newDir.Remote())
if err != nil {
return nil, err
}
newDir.SetRemote(newPath)
entries[i] = newDir
default:
return nil, fmt.Errorf("unknown entry type %T", entry)
}
}
return entries, nil
}
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
// defer log.Trace(f, "dir=%q", dir)("entries = %v, err=%v", &entries, &err)
if f.root == "" && dir == "" {
entries = make(fs.DirEntries, 0, len(f.upstreams))
for combineDir := range f.upstreams {
d := fs.NewDir(combineDir, f.when)
entries = append(entries, d)
}
return entries, nil
}
u, uRemote, err := f.findUpstream(dir)
if err != nil {
return nil, err
}
entries, err = u.f.List(ctx, uRemote)
if err != nil {
return nil, err
}
return u.wrapEntries(ctx, entries)
}
// ListR lists the objects and directories of the Fs starting
// from dir recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
//
// Don't implement this unless you have a more efficient way
// of listing recursively that doing a directory traversal.
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
// defer log.Trace(f, "dir=%q, callback=%v", dir, callback)("err=%v", &err)
if f.root == "" && dir == "" {
rootEntries, err := f.List(ctx, "")
if err != nil {
return err
}
err = callback(rootEntries)
if err != nil {
return err
}
var mu sync.Mutex
syncCallback := func(entries fs.DirEntries) error {
mu.Lock()
defer mu.Unlock()
return callback(entries)
}
err = f.multithread(ctx, func(ctx context.Context, u *upstream) error {
return f.ListR(ctx, u.dir, syncCallback)
})
if err != nil {
return err
}
return nil
}
u, uRemote, err := f.findUpstream(dir)
if err != nil {
return err
}
wrapCallback := func(entries fs.DirEntries) error {
entries, err := u.wrapEntries(ctx, entries)
if err != nil {
return err
}
return callback(entries)
}
if do := u.f.Features().ListR; do != nil {
err = do(ctx, uRemote, wrapCallback)
} else {
err = walk.ListR(ctx, u.f, uRemote, true, -1, walk.ListAll, wrapCallback)
}
if err == fs.ErrorDirNotFound {
err = nil
}
return err
}
// NewObject creates a new remote combine file object
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
u, uRemote, err := f.findUpstream(remote)
if err != nil {
return nil, err
}
if uRemote == "" || strings.HasSuffix(uRemote, "/") {
return nil, fs.ErrorIsDir
}
o, err := u.f.NewObject(ctx, uRemote)
if err != nil {
return nil, err
}
return u.newObject(o), nil
}
// Precision is the greatest Precision of all upstreams
func (f *Fs) Precision() time.Duration {
var greatestPrecision time.Duration
for _, u := range f.upstreams {
uPrecision := u.f.Precision()
if uPrecision > greatestPrecision {
greatestPrecision = uPrecision
}
}
return greatestPrecision
}
// Shutdown the backend, closing any background tasks and any
// cached connections.
func (f *Fs) Shutdown(ctx context.Context) error {
return f.multithread(ctx, func(ctx context.Context, u *upstream) error {
if do := u.f.Features().Shutdown; do != nil {
return do(ctx)
}
return nil
})
}
// Object describes a wrapped Object
//
// This is a wrapped Object which knows its path prefix
type Object struct {
fs.Object
u *upstream
}
func (u *upstream) newObject(o fs.Object) *Object {
return &Object{
Object: o,
u: u,
}
}
// Fs returns read only access to the Fs that this object is part of
func (o *Object) Fs() fs.Info {
return o.u.parent
}
// String returns the remote path
func (o *Object) String() string {
return o.Remote()
}
// Remote returns the remote path
func (o *Object) Remote() string {
newPath, err := o.u.pathAdjustment.do(o.Object.String())
if err != nil {
fs.Errorf(o, "Bad object: %v", err)
return err.Error()
}
return newPath
}
// MimeType returns the content type of the Object if known
func (o *Object) MimeType(ctx context.Context) (mimeType string) {
if do, ok := o.Object.(fs.MimeTyper); ok {
mimeType = do.MimeType(ctx)
}
return mimeType
}
// UnWrap returns the Object that this Object is wrapping or
// nil if it isn't wrapping anything
func (o *Object) UnWrap() fs.Object {
return o.Object
}
// GetTier returns storage tier or class of the Object
func (o *Object) GetTier() string {
do, ok := o.Object.(fs.GetTierer)
if !ok {
return ""
}
return do.GetTier()
}
// ID returns the ID of the Object if known, or "" if not
func (o *Object) ID() string {
do, ok := o.Object.(fs.IDer)
if !ok {
return ""
}
return do.ID()
}
// Metadata returns metadata for an object
//
// It should return nil if there is no Metadata
func (o *Object) Metadata(ctx context.Context) (fs.Metadata, error) {
do, ok := o.Object.(fs.Metadataer)
if !ok {
return nil, nil
}
return do.Metadata(ctx)
}
// SetTier performs changing storage tier of the Object if
// multiple storage classes supported
func (o *Object) SetTier(tier string) error {
do, ok := o.Object.(fs.SetTierer)
if !ok {
return errors.New("underlying remote does not support SetTier")
}
return do.SetTier(tier)
}
// Check the interfaces are satisfied
var (
_ fs.Fs = (*Fs)(nil)
_ fs.Purger = (*Fs)(nil)
_ fs.PutStreamer = (*Fs)(nil)
_ fs.Copier = (*Fs)(nil)
_ fs.Mover = (*Fs)(nil)
_ fs.DirMover = (*Fs)(nil)
_ fs.DirCacheFlusher = (*Fs)(nil)
_ fs.ChangeNotifier = (*Fs)(nil)
_ fs.Abouter = (*Fs)(nil)
_ fs.ListRer = (*Fs)(nil)
_ fs.Shutdowner = (*Fs)(nil)
_ fs.FullObject = (*Object)(nil)
)

View File

@@ -0,0 +1,94 @@
package combine
import (
"fmt"
"testing"
"github.com/stretchr/testify/assert"
)
func TestAdjustmentDo(t *testing.T) {
for _, test := range []struct {
root string
mountpoint string
in string
want string
wantErr error
}{
{
root: "",
mountpoint: "mountpoint",
in: "path/to/file.txt",
want: "mountpoint/path/to/file.txt",
},
{
root: "mountpoint",
mountpoint: "mountpoint",
in: "path/to/file.txt",
want: "path/to/file.txt",
},
{
root: "mountpoint/path",
mountpoint: "mountpoint",
in: "path/to/file.txt",
want: "to/file.txt",
},
{
root: "mountpoint/path",
mountpoint: "mountpoint",
in: "wrongpath/to/file.txt",
want: "",
wantErr: errNotUnderRoot,
},
} {
what := fmt.Sprintf("%+v", test)
a := newAdjustment(test.root, test.mountpoint)
got, gotErr := a.do(test.in)
assert.Equal(t, test.wantErr, gotErr)
assert.Equal(t, test.want, got, what)
}
}
func TestAdjustmentUndo(t *testing.T) {
for _, test := range []struct {
root string
mountpoint string
in string
want string
wantErr error
}{
{
root: "",
mountpoint: "mountpoint",
in: "mountpoint/path/to/file.txt",
want: "path/to/file.txt",
},
{
root: "mountpoint",
mountpoint: "mountpoint",
in: "path/to/file.txt",
want: "path/to/file.txt",
},
{
root: "mountpoint/path",
mountpoint: "mountpoint",
in: "to/file.txt",
want: "path/to/file.txt",
},
{
root: "wrongmountpoint/path",
mountpoint: "mountpoint",
in: "to/file.txt",
want: "",
wantErr: errNotUnderRoot,
},
} {
what := fmt.Sprintf("%+v", test)
a := newAdjustment(test.root, test.mountpoint)
got, gotErr := a.undo(test.in)
assert.Equal(t, test.wantErr, gotErr)
assert.Equal(t, test.want, got, what)
}
}

View File

@@ -0,0 +1,81 @@
// Test Combine filesystem interface
package combine_test
import (
"testing"
_ "github.com/rclone/rclone/backend/local"
_ "github.com/rclone/rclone/backend/memory"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
if *fstest.RemoteName == "" {
t.Skip("Skipping as -remote not set")
}
fstests.Run(t, &fstests.Opt{
RemoteName: *fstest.RemoteName,
UnimplementableFsMethods: []string{"OpenWriterAt", "DuplicateFiles"},
UnimplementableObjectMethods: []string{"MimeType"},
})
}
func TestLocal(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
dirs := MakeTestDirs(t, 3)
upstreams := "dir1=" + dirs[0] + " dir2=" + dirs[1] + " dir3=" + dirs[2]
name := "TestCombineLocal"
fstests.Run(t, &fstests.Opt{
RemoteName: name + ":dir1",
ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "type", Value: "combine"},
{Name: name, Key: "upstreams", Value: upstreams},
},
QuickTestOK: true,
})
}
func TestMemory(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
upstreams := "dir1=:memory:dir1 dir2=:memory:dir2 dir3=:memory:dir3"
name := "TestCombineMemory"
fstests.Run(t, &fstests.Opt{
RemoteName: name + ":dir1",
ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "type", Value: "combine"},
{Name: name, Key: "upstreams", Value: upstreams},
},
QuickTestOK: true,
})
}
func TestMixed(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
dirs := MakeTestDirs(t, 2)
upstreams := "dir1=" + dirs[0] + " dir2=" + dirs[1] + " dir3=:memory:dir3"
name := "TestCombineMixed"
fstests.Run(t, &fstests.Opt{
RemoteName: name + ":dir1",
ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "type", Value: "combine"},
{Name: name, Key: "upstreams", Value: upstreams},
},
})
}
// MakeTestDirs makes directories in /tmp for testing
func MakeTestDirs(t *testing.T, n int) (dirs []string) {
for i := 1; i <= n; i++ {
dir := t.TempDir()
dirs = append(dirs, dir)
}
return dirs
}

View File

@@ -13,7 +13,6 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"regexp"
"strings"
@@ -29,6 +28,7 @@ import (
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fspath"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/log"
"github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/fs/operations"
)
@@ -53,7 +53,7 @@ const (
Gzip = 2
)
var nameRegexp = regexp.MustCompile("^(.+?)\\.([A-Za-z0-9-_]{11})$")
var nameRegexp = regexp.MustCompile(`^(.+?)\.([A-Za-z0-9-_]{11})$`)
// Register with Fs
func init() {
@@ -70,6 +70,9 @@ func init() {
Name: "compress",
Description: "Compress a remote",
NewFs: NewFs,
MetadataInfo: &fs.MetadataInfo{
Help: `Any metadata supported by the underlying remote is read and written.`,
},
Options: []fs.Option{{
Name: "remote",
Help: "Remote to compress.",
@@ -87,7 +90,7 @@ Generally -1 (default, equivalent to 5) is recommended.
Levels 1 to 9 increase compression at the cost of speed. Going past 6
generally offers very little return.
Level -2 uses Huffmann encoding only. Only use if you know what you
Level -2 uses Huffman encoding only. Only use if you know what you
are doing.
Level 0 turns off compression.`,
Default: sgzip.DefaultCompression,
@@ -127,7 +130,7 @@ type Fs struct {
features *fs.Features // optional features
}
// NewFs contstructs an Fs from the path, container:path
// NewFs constructs an Fs from the path, container:path
func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
@@ -180,6 +183,9 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
SetTier: true,
BucketBased: true,
CanHaveEmptyDirectories: true,
ReadMetadata: true,
WriteMetadata: true,
UserMetadata: true,
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
// We support reading MIME types no matter the wrapped fs
f.features.ReadMimeType = true
@@ -222,7 +228,7 @@ func processFileName(compressedFileName string) (origFileName string, extension
// Separate the filename and size from the extension
extensionPos := strings.LastIndex(compressedFileName, ".")
if extensionPos == -1 {
return "", "", 0, errors.New("File name has no extension")
return "", "", 0, errors.New("file name has no extension")
}
extension = compressedFileName[extensionPos:]
nameWithSize := compressedFileName[:extensionPos]
@@ -231,11 +237,11 @@ func processFileName(compressedFileName string) (origFileName string, extension
}
match := nameRegexp.FindStringSubmatch(nameWithSize)
if match == nil || len(match) != 3 {
return "", "", 0, errors.New("Invalid filename")
return "", "", 0, errors.New("invalid filename")
}
size, err := base64ToInt64(match[2])
if err != nil {
return "", "", 0, errors.New("Could not decode size")
return "", "", 0, errors.New("could not decode size")
}
return match[1], gzFileExt, size, nil
}
@@ -304,7 +310,7 @@ func (f *Fs) processEntries(entries fs.DirEntries) (newEntries fs.DirEntries, er
case fs.Directory:
f.addDir(&newEntries, x)
default:
return nil, fmt.Errorf("Unknown object type %T", entry)
return nil, fmt.Errorf("unknown object type %T", entry)
}
}
return newEntries, nil
@@ -361,13 +367,16 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
if err != nil {
return nil, err
}
meta := readMetadata(ctx, mo)
if meta == nil {
return nil, errors.New("error decoding metadata")
meta, err := readMetadata(ctx, mo)
if err != nil {
return nil, fmt.Errorf("error decoding metadata: %w", err)
}
// Create our Object
o, err := f.Fs.NewObject(ctx, makeDataName(remote, meta.CompressionMetadata.Size, meta.Mode))
return f.newObject(o, mo, meta), err
if err != nil {
return nil, err
}
return f.newObject(o, mo, meta), nil
}
// checkCompressAndType checks if an object is compressible and determines it's mime type
@@ -445,7 +454,7 @@ func (f *Fs) rcat(ctx context.Context, dstFileName string, in io.ReadCloser, mod
return f.Fs.Put(ctx, bytes.NewBuffer(buf[:n]), src, options...)
}
// Need to include what we allready read
// Need to include what we already read
in = &ReadCloserWrapper{
Reader: io.MultiReader(bytes.NewReader(buf), in),
Closer: in,
@@ -458,7 +467,7 @@ func (f *Fs) rcat(ctx context.Context, dstFileName string, in io.ReadCloser, mod
}
fs.Debugf(f, "Target remote doesn't support streaming uploads, creating temporary local file")
tempFile, err := ioutil.TempFile("", "rclone-press-")
tempFile, err := os.CreateTemp("", "rclone-press-")
defer func() {
// these errors should be relatively uncritical and the upload should've succeeded so it's okay-ish
// to ignore them
@@ -466,10 +475,10 @@ func (f *Fs) rcat(ctx context.Context, dstFileName string, in io.ReadCloser, mod
_ = os.Remove(tempFile.Name())
}()
if err != nil {
return nil, fmt.Errorf("Failed to create temporary local FS to spool file: %w", err)
return nil, fmt.Errorf("failed to create temporary local FS to spool file: %w", err)
}
if _, err = io.Copy(tempFile, in); err != nil {
return nil, fmt.Errorf("Failed to write temporary local file: %w", err)
return nil, fmt.Errorf("failed to write temporary local file: %w", err)
}
if _, err = tempFile.Seek(0, 0); err != nil {
return nil, err
@@ -536,8 +545,8 @@ func (f *Fs) putCompress(ctx context.Context, in io.Reader, src fs.ObjectInfo, o
}
// Transfer the data
o, err := f.rcat(ctx, makeDataName(src.Remote(), src.Size(), f.mode), ioutil.NopCloser(wrappedIn), src.ModTime(ctx), options)
//o, err := operations.Rcat(ctx, f.Fs, makeDataName(src.Remote(), src.Size(), f.mode), ioutil.NopCloser(wrappedIn), src.ModTime(ctx))
o, err := f.rcat(ctx, makeDataName(src.Remote(), src.Size(), f.mode), io.NopCloser(wrappedIn), src.ModTime(ctx), options)
//o, err := operations.Rcat(ctx, f.Fs, makeDataName(src.Remote(), src.Size(), f.mode), io.NopCloser(wrappedIn), src.ModTime(ctx))
if err != nil {
if o != nil {
removeErr := o.Remove(ctx)
@@ -671,7 +680,7 @@ func (f *Fs) putWithCustomFunctions(ctx context.Context, in io.Reader, src fs.Ob
}
return nil, err
}
return f.newObject(dataObject, mo, meta), err
return f.newObject(dataObject, mo, meta), nil
}
// Put in to the remote path with the modTime given of the given size
@@ -720,23 +729,23 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
if found && (oldObj.(*Object).meta.Mode != Uncompressed || compressible) {
err = oldObj.(*Object).Object.Remove(ctx)
if err != nil {
return nil, fmt.Errorf("Could remove original object: %w", err)
return nil, fmt.Errorf("couldn't remove original object: %w", err)
}
}
// If our new object is compressed we have to rename it with the correct size.
// Uncompressed objects don't store the size in the name so we they'll allready have the correct name.
// Uncompressed objects don't store the size in the name so we they'll already have the correct name.
if compressible {
wrapObj, err := operations.Move(ctx, f.Fs, nil, f.dataName(src.Remote(), newObj.size, compressible), newObj.Object)
if err != nil {
return nil, fmt.Errorf("Couldn't rename streamed Object.: %w", err)
return nil, fmt.Errorf("couldn't rename streamed object: %w", err)
}
newObj.Object = wrapObj
}
return newObj, nil
}
// Temporarely disabled. There might be a way to implement this correctly but with the current handling metadata duplicate objects
// Temporarily disabled. There might be a way to implement this correctly but with the current handling metadata duplicate objects
// will break stuff. Right no I can't think of a way to make this work.
// PutUnchecked uploads the object
@@ -779,9 +788,9 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
// Copy src to this remote using server side copy operations.
//
// This is stored with the remote path given
// This is stored with the remote path given.
//
// It returns the destination Object and a possible error
// It returns the destination Object and a possible error.
//
// Will only be called if src.Fs().Name() == f.Name()
//
@@ -829,9 +838,9 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
// Move src to this remote using server side move operations.
//
// This is stored with the remote path given
// This is stored with the remote path given.
//
// It returns the destination Object and a possible error
// It returns the destination Object and a possible error.
//
// Will only be called if src.Fs().Name() == f.Name()
//
@@ -906,7 +915,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
func (f *Fs) CleanUp(ctx context.Context) error {
do := f.Fs.Features().CleanUp
if do == nil {
return errors.New("can't CleanUp: not supported by underlying remote")
return errors.New("not supported by underlying remote")
}
return do(ctx)
}
@@ -915,7 +924,7 @@ func (f *Fs) CleanUp(ctx context.Context) error {
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
do := f.Fs.Features().About
if do == nil {
return nil, errors.New("can't About: not supported by underlying remote")
return nil, errors.New("not supported by underlying remote")
}
return do(ctx)
}
@@ -1034,24 +1043,19 @@ func newMetadata(size int64, mode int, cmeta sgzip.GzipMetadata, md5 string, mim
}
// This function will read the metadata from a metadata object.
func readMetadata(ctx context.Context, mo fs.Object) (meta *ObjectMetadata) {
func readMetadata(ctx context.Context, mo fs.Object) (meta *ObjectMetadata, err error) {
// Open our meradata object
rc, err := mo.Open(ctx)
if err != nil {
return nil
return nil, err
}
defer func() {
err := rc.Close()
if err != nil {
fs.Errorf(mo, "Error closing object: %v", err)
}
}()
defer fs.CheckClose(rc, &err)
jr := json.NewDecoder(rc)
meta = new(ObjectMetadata)
if err = jr.Decode(meta); err != nil {
return nil
return nil, err
}
return meta
return meta, nil
}
// Remove removes this object
@@ -1096,6 +1100,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
origName := o.Remote()
if o.meta.Mode != Uncompressed || compressible {
newObject, err = o.f.putWithCustomFunctions(ctx, in, o.f.wrapInfo(src, origName, src.Size()), options, o.f.Fs.Put, updateMeta, compressible, mimeType)
if err != nil {
return err
}
if newObject.Object.Remote() != o.Object.Remote() {
if removeErr := o.Object.Remove(ctx); removeErr != nil {
return removeErr
@@ -1109,9 +1116,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
}
// If we are, just update the object and metadata
newObject, err = o.f.putWithCustomFunctions(ctx, in, src, options, update, updateMeta, compressible, mimeType)
}
if err != nil {
return err
if err != nil {
return err
}
}
// Update object metadata and return
o.Object = newObject.Object
@@ -1122,6 +1129,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// This will initialize the variables of a new press Object. The metadata object, mo, and metadata struct, meta, must be specified.
func (f *Fs) newObject(o fs.Object, mo fs.Object, meta *ObjectMetadata) *Object {
if o == nil {
log.Trace(nil, "newObject(%#v, %#v, %#v) called with nil o", o, mo, meta)
}
return &Object{
Object: o,
f: f,
@@ -1134,6 +1144,9 @@ func (f *Fs) newObject(o fs.Object, mo fs.Object, meta *ObjectMetadata) *Object
// This initializes the variables of a press Object with only the size. The metadata will be loaded later on demand.
func (f *Fs) newObjectSizeAndNameOnly(o fs.Object, moName string, size int64) *Object {
if o == nil {
log.Trace(nil, "newObjectSizeAndNameOnly(%#v, %#v, %#v) called with nil o", o, moName, size)
}
return &Object{
Object: o,
f: f,
@@ -1161,7 +1174,7 @@ func (o *Object) loadMetadataIfNotLoaded(ctx context.Context) (err error) {
return err
}
if o.meta == nil {
o.meta = readMetadata(ctx, o.mo)
o.meta, err = readMetadata(ctx, o.mo)
}
return err
}
@@ -1214,6 +1227,21 @@ func (o *Object) MimeType(ctx context.Context) string {
return o.meta.MimeType
}
// Metadata returns metadata for an object
//
// It should return nil if there is no Metadata
func (o *Object) Metadata(ctx context.Context) (fs.Metadata, error) {
err := o.loadMetadataIfNotLoaded(ctx)
if err != nil {
return nil, err
}
do, ok := o.mo.(fs.Metadataer)
if !ok {
return nil, nil
}
return do.Metadata(ctx)
}
// Hash returns the selected checksum of the file
// If no checksum is available it returns ""
func (o *Object) Hash(ctx context.Context, ht hash.Type) (string, error) {
@@ -1360,6 +1388,51 @@ func (o *ObjectInfo) Hash(ctx context.Context, ht hash.Type) (string, error) {
return "", nil // cannot know the checksum
}
// ID returns the ID of the Object if known, or "" if not
func (o *ObjectInfo) ID() string {
do, ok := o.src.(fs.IDer)
if !ok {
return ""
}
return do.ID()
}
// MimeType returns the content type of the Object if
// known, or "" if not
func (o *ObjectInfo) MimeType(ctx context.Context) string {
do, ok := o.src.(fs.MimeTyper)
if !ok {
return ""
}
return do.MimeType(ctx)
}
// UnWrap returns the Object that this Object is wrapping or
// nil if it isn't wrapping anything
func (o *ObjectInfo) UnWrap() fs.Object {
return fs.UnWrapObjectInfo(o.src)
}
// Metadata returns metadata for an object
//
// It should return nil if there is no Metadata
func (o *ObjectInfo) Metadata(ctx context.Context) (fs.Metadata, error) {
do, ok := o.src.(fs.Metadataer)
if !ok {
return nil, nil
}
return do.Metadata(ctx)
}
// GetTier returns storage tier or class of the Object
func (o *ObjectInfo) GetTier() string {
do, ok := o.src.(fs.GetTierer)
if !ok {
return ""
}
return do.GetTier()
}
// ID returns the ID of the Object if known, or "" if not
func (o *Object) ID() string {
do, ok := o.Object.(fs.IDer)
@@ -1412,11 +1485,6 @@ var (
_ fs.ChangeNotifier = (*Fs)(nil)
_ fs.PublicLinker = (*Fs)(nil)
_ fs.Shutdowner = (*Fs)(nil)
_ fs.ObjectInfo = (*ObjectInfo)(nil)
_ fs.GetTierer = (*Object)(nil)
_ fs.SetTierer = (*Object)(nil)
_ fs.Object = (*Object)(nil)
_ fs.ObjectUnWrapper = (*Object)(nil)
_ fs.IDer = (*Object)(nil)
_ fs.MimeTyper = (*Object)(nil)
_ fs.FullObjectInfo = (*ObjectInfo)(nil)
_ fs.FullObject = (*Object)(nil)
)

View File

@@ -61,5 +61,6 @@ func TestRemoteGzip(t *testing.T) {
{Name: name, Key: "remote", Value: tempdir},
{Name: name, Key: "compression_mode", Value: "gzip"},
},
QuickTestOK: true,
})
}

View File

@@ -96,7 +96,7 @@ func NewNameEncryptionMode(s string) (mode NameEncryptionMode, err error) {
case "obfuscate":
mode = NameEncryptionObfuscated
default:
err = fmt.Errorf("Unknown file name encryption mode %q", s)
err = fmt.Errorf("unknown file name encryption mode %q", s)
}
return mode, err
}
@@ -127,11 +127,11 @@ type fileNameEncoding interface {
// RFC4648
//
// The standard encoding is modified in two ways
// * it becomes lower case (no-one likes upper case filenames!)
// * we strip the padding character `=`
// - it becomes lower case (no-one likes upper case filenames!)
// - we strip the padding character `=`
type caseInsensitiveBase32Encoding struct{}
// EncodeToString encodes a strign using the modified version of
// EncodeToString encodes a string using the modified version of
// base32 encoding.
func (caseInsensitiveBase32Encoding) EncodeToString(src []byte) string {
encoded := base32.HexEncoding.EncodeToString(src)
@@ -162,7 +162,7 @@ func NewNameEncoding(s string) (enc fileNameEncoding, err error) {
case "base32768":
enc = base32768.SafeEncoding
default:
err = fmt.Errorf("Unknown file name encoding mode %q", s)
err = fmt.Errorf("unknown file name encoding mode %q", s)
}
return enc, err
}
@@ -244,7 +244,7 @@ func (c *Cipher) putBlock(buf []byte) {
// encryptSegment encrypts a path segment
//
// This uses EME with AES
// This uses EME with AES.
//
// EME (ECB-Mix-ECB) is a wide-block encryption mode presented in the
// 2003 paper "A Parallelizable Enciphering Mode" by Halevi and
@@ -254,8 +254,8 @@ func (c *Cipher) putBlock(buf []byte) {
// same filename must encrypt to the same thing.
//
// This means that
// * filenames with the same name will encrypt the same
// * filenames which start the same won't have a common prefix
// - filenames with the same name will encrypt the same
// - filenames which start the same won't have a common prefix
func (c *Cipher) encryptSegment(plaintext string) string {
if plaintext == "" {
return ""
@@ -1085,7 +1085,7 @@ func (c *Cipher) DecryptData(rc io.ReadCloser) (io.ReadCloser, error) {
// DecryptDataSeek decrypts the data stream from offset
//
// The open function must return a ReadCloser opened to the offset supplied
// The open function must return a ReadCloser opened to the offset supplied.
//
// You must use this form of DecryptData if you might want to Seek the file handle
func (c *Cipher) DecryptDataSeek(ctx context.Context, open OpenRangeSeek, offset, limit int64) (ReadSeekCloser, error) {

View File

@@ -8,7 +8,6 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"strings"
"testing"
@@ -1073,7 +1072,7 @@ func testEncryptDecrypt(t *testing.T, bufSize int, copySize int64) {
source := newRandomSource(copySize)
encrypted, err := c.newEncrypter(source, nil)
assert.NoError(t, err)
decrypted, err := c.newDecrypter(ioutil.NopCloser(encrypted))
decrypted, err := c.newDecrypter(io.NopCloser(encrypted))
assert.NoError(t, err)
sink := newRandomSource(copySize)
n, err := io.CopyBuffer(sink, decrypted, buf)
@@ -1144,15 +1143,15 @@ func TestEncryptData(t *testing.T) {
buf := bytes.NewBuffer(test.in)
encrypted, err := c.EncryptData(buf)
assert.NoError(t, err)
out, err := ioutil.ReadAll(encrypted)
out, err := io.ReadAll(encrypted)
assert.NoError(t, err)
assert.Equal(t, test.expected, out)
// Check we can decode the data properly too...
buf = bytes.NewBuffer(out)
decrypted, err := c.DecryptData(ioutil.NopCloser(buf))
decrypted, err := c.DecryptData(io.NopCloser(buf))
assert.NoError(t, err)
out, err = ioutil.ReadAll(decrypted)
out, err = io.ReadAll(decrypted)
assert.NoError(t, err)
assert.Equal(t, test.in, out)
}
@@ -1187,7 +1186,7 @@ func TestNewEncrypterErrUnexpectedEOF(t *testing.T) {
fh, err := c.newEncrypter(in, nil)
assert.NoError(t, err)
n, err := io.CopyN(ioutil.Discard, fh, 1e6)
n, err := io.CopyN(io.Discard, fh, 1e6)
assert.Equal(t, io.ErrUnexpectedEOF, err)
assert.Equal(t, int64(32), n)
}
@@ -1257,12 +1256,12 @@ func TestNewDecrypterErrUnexpectedEOF(t *testing.T) {
in2 := &readers.ErrorReader{Err: io.ErrUnexpectedEOF}
in1 := bytes.NewBuffer(file16)
in := ioutil.NopCloser(io.MultiReader(in1, in2))
in := io.NopCloser(io.MultiReader(in1, in2))
fh, err := c.newDecrypter(in)
assert.NoError(t, err)
n, err := io.CopyN(ioutil.Discard, fh, 1e6)
n, err := io.CopyN(io.Discard, fh, 1e6)
assert.Equal(t, io.ErrUnexpectedEOF, err)
assert.Equal(t, int64(16), n)
}
@@ -1274,14 +1273,14 @@ func TestNewDecrypterSeekLimit(t *testing.T) {
// Make random data
const dataSize = 150000
plaintext, err := ioutil.ReadAll(newRandomSource(dataSize))
plaintext, err := io.ReadAll(newRandomSource(dataSize))
assert.NoError(t, err)
// Encrypt the data
buf := bytes.NewBuffer(plaintext)
encrypted, err := c.EncryptData(buf)
assert.NoError(t, err)
ciphertext, err := ioutil.ReadAll(encrypted)
ciphertext, err := io.ReadAll(encrypted)
assert.NoError(t, err)
trials := []int{0, 1, 2, 3, 4, 5, 7, 8, 9, 15, 16, 17, 31, 32, 33, 63, 64, 65,
@@ -1300,7 +1299,7 @@ func TestNewDecrypterSeekLimit(t *testing.T) {
end = len(ciphertext)
}
}
reader = ioutil.NopCloser(bytes.NewBuffer(ciphertext[int(underlyingOffset):end]))
reader = io.NopCloser(bytes.NewBuffer(ciphertext[int(underlyingOffset):end]))
return reader, nil
}
@@ -1490,7 +1489,7 @@ func TestDecrypterRead(t *testing.T) {
assert.NoError(t, err, what)
continue
}
_, err = ioutil.ReadAll(fh)
_, err = io.ReadAll(fh)
var expectedErr error
switch {
case i == fileHeaderSize:
@@ -1514,7 +1513,7 @@ func TestDecrypterRead(t *testing.T) {
cd := newCloseDetector(in)
fh, err := c.newDecrypter(cd)
assert.NoError(t, err)
_, err = ioutil.ReadAll(fh)
_, err = io.ReadAll(fh)
assert.Error(t, err, "potato")
assert.Equal(t, 0, cd.closed)
@@ -1524,13 +1523,13 @@ func TestDecrypterRead(t *testing.T) {
copy(file16copy, file16)
for i := range file16copy {
file16copy[i] ^= 0xFF
fh, err := c.newDecrypter(ioutil.NopCloser(bytes.NewBuffer(file16copy)))
fh, err := c.newDecrypter(io.NopCloser(bytes.NewBuffer(file16copy)))
if i < fileMagicSize {
assert.Error(t, err, ErrorEncryptedBadMagic.Error())
assert.Nil(t, fh)
} else {
assert.NoError(t, err)
_, err = ioutil.ReadAll(fh)
_, err = io.ReadAll(fh)
assert.Error(t, err, ErrorEncryptedFileBadHeader.Error())
}
file16copy[i] ^= 0xFF
@@ -1565,7 +1564,7 @@ func TestDecrypterClose(t *testing.T) {
assert.Equal(t, 0, cd.closed)
// close after reading
out, err := ioutil.ReadAll(fh)
out, err := io.ReadAll(fh)
assert.NoError(t, err)
assert.Equal(t, []byte{1}, out)
assert.Equal(t, io.EOF, fh.err)

View File

@@ -28,6 +28,9 @@ func init() {
Description: "Encrypt/Decrypt a remote",
NewFs: NewFs,
CommandHelp: commandHelp,
MetadataInfo: &fs.MetadataInfo{
Help: `Any metadata supported by the underlying remote is read and written.`,
},
Options: []fs.Option{{
Name: "remote",
Help: "Remote to encrypt/decrypt.\n\nNormally should contain a ':' and a path, e.g. \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
@@ -122,7 +125,7 @@ names, or for debugging purposes.`,
This option could help with shortening the encrypted filename. The
suitable option would depend on the way your remote count the filename
length and if it's case sensitve.`,
length and if it's case sensitive.`,
Default: "base32",
Examples: []fs.OptionExample{
{
@@ -232,7 +235,7 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
// the features here are ones we could support, and they are
// ANDed with the ones from wrappedFs
f.features = (&fs.Features{
CaseInsensitive: cipher.NameEncryptionMode() == NameEncryptionOff,
CaseInsensitive: !cipher.dirNameEncrypt || cipher.NameEncryptionMode() == NameEncryptionOff,
DuplicateFiles: true,
ReadMimeType: false, // MimeTypes not supported with crypt
WriteMimeType: false,
@@ -241,6 +244,9 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
SetTier: true,
GetTier: true,
ServerSideAcrossConfigs: opt.ServerSideAcrossConfigs,
ReadMetadata: true,
WriteMetadata: true,
UserMetadata: true,
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
return f, err
@@ -328,7 +334,7 @@ func (f *Fs) encryptEntries(ctx context.Context, entries fs.DirEntries) (newEntr
case fs.Directory:
f.addDir(ctx, &newEntries, x)
default:
return nil, fmt.Errorf("Unknown object type %T", entry)
return nil, fmt.Errorf("unknown object type %T", entry)
}
}
return newEntries, nil
@@ -390,6 +396,8 @@ type putFn func(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ..
// put implements Put or PutStream
func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn) (fs.Object, error) {
ci := fs.GetConfig(ctx)
if f.opt.NoDataEncryption {
o, err := put(ctx, in, f.newObjectInfo(src, nonce{}), options...)
if err == nil && o != nil {
@@ -407,6 +415,9 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
// Find a hash the destination supports to compute a hash of
// the encrypted data
ht := f.Fs.Hashes().GetOne()
if ci.IgnoreChecksum {
ht = hash.None
}
var hasher *hash.MultiHasher
if ht != hash.None {
hasher, err = hash.NewMultiHasherTypes(hash.NewHashSet(ht))
@@ -501,9 +512,9 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
// Copy src to this remote using server-side copy operations.
//
// This is stored with the remote path given
// This is stored with the remote path given.
//
// It returns the destination Object and a possible error
// It returns the destination Object and a possible error.
//
// Will only be called if src.Fs().Name() == f.Name()
//
@@ -526,9 +537,9 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
// Move src to this remote using server-side move operations.
//
// This is stored with the remote path given
// This is stored with the remote path given.
//
// It returns the destination Object and a possible error
// It returns the destination Object and a possible error.
//
// Will only be called if src.Fs().Name() == f.Name()
//
@@ -597,7 +608,7 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
func (f *Fs) CleanUp(ctx context.Context) error {
do := f.Fs.Features().CleanUp
if do == nil {
return errors.New("can't CleanUp")
return errors.New("not supported by underlying remote")
}
return do(ctx)
}
@@ -606,7 +617,7 @@ func (f *Fs) CleanUp(ctx context.Context) error {
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
do := f.Fs.Features().About
if do == nil {
return nil, errors.New("About not supported")
return nil, errors.New("not supported by underlying remote")
}
return do(ctx)
}
@@ -1041,10 +1052,11 @@ func (o *ObjectInfo) Hash(ctx context.Context, hash hash.Type) (string, error) {
// Get the underlying object if there is one
if srcObj, ok = o.ObjectInfo.(fs.Object); ok {
// Prefer direct interface assertion
} else if do, ok := o.ObjectInfo.(fs.ObjectUnWrapper); ok {
// Otherwise likely is an operations.OverrideRemote
} else if do, ok := o.ObjectInfo.(*fs.OverrideRemote); ok {
// Unwrap if it is an operations.OverrideRemote
srcObj = do.UnWrap()
} else {
// Otherwise don't unwrap any further
return "", nil
}
// if this is wrapping a local object then we work out the hash
@@ -1056,6 +1068,50 @@ func (o *ObjectInfo) Hash(ctx context.Context, hash hash.Type) (string, error) {
return "", nil
}
// GetTier returns storage tier or class of the Object
func (o *ObjectInfo) GetTier() string {
do, ok := o.ObjectInfo.(fs.GetTierer)
if !ok {
return ""
}
return do.GetTier()
}
// ID returns the ID of the Object if known, or "" if not
func (o *ObjectInfo) ID() string {
do, ok := o.ObjectInfo.(fs.IDer)
if !ok {
return ""
}
return do.ID()
}
// Metadata returns metadata for an object
//
// It should return nil if there is no Metadata
func (o *ObjectInfo) Metadata(ctx context.Context) (fs.Metadata, error) {
do, ok := o.ObjectInfo.(fs.Metadataer)
if !ok {
return nil, nil
}
return do.Metadata(ctx)
}
// MimeType returns the content type of the Object if
// known, or "" if not
//
// This is deliberately unsupported so we don't leak mime type info by
// default.
func (o *ObjectInfo) MimeType(ctx context.Context) string {
return ""
}
// UnWrap returns the Object that this Object is wrapping or
// nil if it isn't wrapping anything
func (o *ObjectInfo) UnWrap() fs.Object {
return fs.UnWrapObjectInfo(o.ObjectInfo)
}
// ID returns the ID of the Object if known, or "" if not
func (o *Object) ID() string {
do, ok := o.Object.(fs.IDer)
@@ -1084,6 +1140,26 @@ func (o *Object) GetTier() string {
return do.GetTier()
}
// Metadata returns metadata for an object
//
// It should return nil if there is no Metadata
func (o *Object) Metadata(ctx context.Context) (fs.Metadata, error) {
do, ok := o.Object.(fs.Metadataer)
if !ok {
return nil, nil
}
return do.Metadata(ctx)
}
// MimeType returns the content type of the Object if
// known, or "" if not
//
// This is deliberately unsupported so we don't leak mime type info by
// default.
func (o *Object) MimeType(ctx context.Context) string {
return ""
}
// Check the interfaces are satisfied
var (
_ fs.Fs = (*Fs)(nil)
@@ -1106,10 +1182,6 @@ var (
_ fs.UserInfoer = (*Fs)(nil)
_ fs.Disconnecter = (*Fs)(nil)
_ fs.Shutdowner = (*Fs)(nil)
_ fs.ObjectInfo = (*ObjectInfo)(nil)
_ fs.Object = (*Object)(nil)
_ fs.ObjectUnWrapper = (*Object)(nil)
_ fs.IDer = (*Object)(nil)
_ fs.SetTierer = (*Object)(nil)
_ fs.GetTierer = (*Object)(nil)
_ fs.FullObjectInfo = (*ObjectInfo)(nil)
_ fs.FullObject = (*Object)(nil)
)

View File

@@ -17,41 +17,28 @@ import (
"github.com/stretchr/testify/require"
)
type testWrapper struct {
fs.ObjectInfo
}
// UnWrap returns the Object that this Object is wrapping or nil if it
// isn't wrapping anything
func (o testWrapper) UnWrap() fs.Object {
if o, ok := o.ObjectInfo.(fs.Object); ok {
return o
}
return nil
}
// Create a temporary local fs to upload things from
func makeTempLocalFs(t *testing.T) (localFs fs.Fs, cleanup func()) {
func makeTempLocalFs(t *testing.T) (localFs fs.Fs) {
localFs, err := fs.TemporaryLocalFs(context.Background())
require.NoError(t, err)
cleanup = func() {
t.Cleanup(func() {
require.NoError(t, localFs.Rmdir(context.Background(), ""))
}
return localFs, cleanup
})
return localFs
}
// Upload a file to a remote
func uploadFile(t *testing.T, f fs.Fs, remote, contents string) (obj fs.Object, cleanup func()) {
func uploadFile(t *testing.T, f fs.Fs, remote, contents string) (obj fs.Object) {
inBuf := bytes.NewBufferString(contents)
t1 := time.Date(2012, time.December, 17, 18, 32, 31, 0, time.UTC)
upSrc := object.NewStaticObjectInfo(remote, t1, int64(len(contents)), true, nil, nil)
obj, err := f.Put(context.Background(), inBuf, upSrc)
require.NoError(t, err)
cleanup = func() {
t.Cleanup(func() {
require.NoError(t, obj.Remove(context.Background()))
}
return obj, cleanup
})
return obj
}
// Test the ObjectInfo
@@ -65,11 +52,9 @@ func testObjectInfo(t *testing.T, f *Fs, wrap bool) {
path = "_wrap"
}
localFs, cleanupLocalFs := makeTempLocalFs(t)
defer cleanupLocalFs()
localFs := makeTempLocalFs(t)
obj, cleanupObj := uploadFile(t, localFs, path, contents)
defer cleanupObj()
obj := uploadFile(t, localFs, path, contents)
// encrypt the data
inBuf := bytes.NewBufferString(contents)
@@ -83,7 +68,7 @@ func testObjectInfo(t *testing.T, f *Fs, wrap bool) {
var oi fs.ObjectInfo = obj
if wrap {
// wrap the object in an fs.ObjectUnwrapper if required
oi = testWrapper{oi}
oi = fs.NewOverrideRemote(oi, "new_remote")
}
// wrap the object in a crypt for upload using the nonce we
@@ -91,7 +76,9 @@ func testObjectInfo(t *testing.T, f *Fs, wrap bool) {
src := f.newObjectInfo(oi, nonce)
// Test ObjectInfo methods
assert.Equal(t, int64(outBuf.Len()), src.Size())
if !f.opt.NoDataEncryption {
assert.Equal(t, int64(outBuf.Len()), src.Size())
}
assert.Equal(t, f, src.Fs())
assert.NotEqual(t, path, src.Remote())
@@ -114,16 +101,13 @@ func testComputeHash(t *testing.T, f *Fs) {
t.Skipf("%v: does not support hashes", f.Fs)
}
localFs, cleanupLocalFs := makeTempLocalFs(t)
defer cleanupLocalFs()
localFs := makeTempLocalFs(t)
// Upload a file to localFs as a test object
localObj, cleanupLocalObj := uploadFile(t, localFs, path, contents)
defer cleanupLocalObj()
localObj := uploadFile(t, localFs, path, contents)
// Upload the same data to the remote Fs also
remoteObj, cleanupRemoteObj := uploadFile(t, f, path, contents)
defer cleanupRemoteObj()
remoteObj := uploadFile(t, f, path, contents)
// Calculate the expected Hash of the remote object
computedHash, err := f.ComputeHash(ctx, remoteObj.(*Object), localObj, hashType)

View File

@@ -4,6 +4,7 @@ package crypt_test
import (
"os"
"path/filepath"
"runtime"
"testing"
"github.com/rclone/rclone/backend/crypt"
@@ -46,6 +47,7 @@ func TestStandardBase32(t *testing.T) {
},
UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"},
QuickTestOK: true,
})
}
@@ -67,6 +69,7 @@ func TestStandardBase64(t *testing.T) {
},
UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"},
QuickTestOK: true,
})
}
@@ -88,6 +91,7 @@ func TestStandardBase32768(t *testing.T) {
},
UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"},
QuickTestOK: true,
})
}
@@ -109,6 +113,7 @@ func TestOff(t *testing.T) {
},
UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"},
QuickTestOK: true,
})
}
@@ -117,6 +122,9 @@ func TestObfuscate(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
if runtime.GOOS == "darwin" {
t.Skip("Skipping on macOS as obfuscating control characters makes filenames macOS can't cope with")
}
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-obfuscate")
name := "TestCrypt3"
fstests.Run(t, &fstests.Opt{
@@ -131,6 +139,7 @@ func TestObfuscate(t *testing.T) {
SkipBadWindowsCharacters: true,
UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"},
QuickTestOK: true,
})
}
@@ -139,6 +148,9 @@ func TestNoDataObfuscate(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
if runtime.GOOS == "darwin" {
t.Skip("Skipping on macOS as obfuscating control characters makes filenames macOS can't cope with")
}
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-obfuscate")
name := "TestCrypt4"
fstests.Run(t, &fstests.Opt{
@@ -154,5 +166,6 @@ func TestNoDataObfuscate(t *testing.T) {
SkipBadWindowsCharacters: true,
UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"},
QuickTestOK: true,
})
}

View File

@@ -8,11 +8,11 @@ import "errors"
// Errors Unpad can return
var (
ErrorPaddingNotFound = errors.New("Bad PKCS#7 padding - not padded")
ErrorPaddingNotAMultiple = errors.New("Bad PKCS#7 padding - not a multiple of blocksize")
ErrorPaddingTooLong = errors.New("Bad PKCS#7 padding - too long")
ErrorPaddingTooShort = errors.New("Bad PKCS#7 padding - too short")
ErrorPaddingNotAllTheSame = errors.New("Bad PKCS#7 padding - not all the same")
ErrorPaddingNotFound = errors.New("bad PKCS#7 padding - not padded")
ErrorPaddingNotAMultiple = errors.New("bad PKCS#7 padding - not a multiple of blocksize")
ErrorPaddingTooLong = errors.New("bad PKCS#7 padding - too long")
ErrorPaddingTooShort = errors.New("bad PKCS#7 padding - too short")
ErrorPaddingNotAllTheSame = errors.New("bad PKCS#7 padding - not all the same")
)
// Pad buf using PKCS#7 to a multiple of n.

View File

@@ -14,9 +14,9 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"mime"
"net/http"
"os"
"path"
"sort"
"strconv"
@@ -50,6 +50,7 @@ import (
drive_v2 "google.golang.org/api/drive/v2"
drive "google.golang.org/api/drive/v3"
"google.golang.org/api/googleapi"
"google.golang.org/api/option"
)
// Constants
@@ -70,7 +71,7 @@ const (
// 1<<18 is the minimum size supported by the Google uploader, and there is no maximum.
minChunkSize = fs.SizeSuffix(googleapi.MinUploadChunkSize)
defaultChunkSize = 8 * fs.Mebi
partialFields = "id,name,size,md5Checksum,trashed,explicitlyTrashed,modifiedTime,createdTime,mimeType,parents,webViewLink,shortcutDetails,exportLinks"
partialFields = "id,name,size,md5Checksum,trashed,explicitlyTrashed,modifiedTime,createdTime,mimeType,parents,webViewLink,shortcutDetails,exportLinks,resourceKey"
listRGrouping = 50 // number of IDs to search at once when using ListR
listRInputBuffer = 1000 // size of input buffer when using ListR
defaultXDGIcon = "text-html"
@@ -276,6 +277,7 @@ Leave blank normally.
Fill in to access "Computers" folders (see docs), or for rclone to use
a non root folder as its starting point.
`,
Advanced: true,
}, {
Name: "service_account_file",
Help: "Service Account Credentials JSON file path.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login." + env.ShellExpandHelp,
@@ -449,7 +451,11 @@ If downloading a file returns the error "This file has been identified
as malware or spam and cannot be downloaded" with the error code
"cannotDownloadAbusiveFile" then supply this flag to rclone to
indicate you acknowledge the risks of downloading the file and rclone
will download it anyway.`,
will download it anyway.
Note that if you are using service account it will need Manager
permission (not Content Manager) to for this flag to work. If the SA
does not have the right permission, Google will just ignore the flag.`,
Advanced: true,
}, {
Name: "keep_revision_forever",
@@ -564,6 +570,27 @@ If this is set then rclone will not show any dangling shortcuts in listings.
`,
Advanced: true,
Default: false,
}, {
Name: "resource_key",
Help: `Resource key for accessing a link-shared file.
If you need to access files shared with a link like this
https://drive.google.com/drive/folders/XXX?resourcekey=YYY&usp=sharing
Then you will need to use the first part "XXX" as the "root_folder_id"
and the second part "YYY" as the "resource_key" otherwise you will get
404 not found errors when trying to access the directory.
See: https://developers.google.com/drive/api/guides/resource-keys
This resource key requirement only applies to a subset of old files.
Note also that opening the folder once in the web interface (with the
user you've authenticated rclone with) seems to be enough so that the
resource key is no needed.
`,
Advanced: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
@@ -625,6 +652,7 @@ type Options struct {
StopOnDownloadLimit bool `config:"stop_on_download_limit"`
SkipShortcuts bool `config:"skip_shortcuts"`
SkipDanglingShortcuts bool `config:"skip_dangling_shortcuts"`
ResourceKey string `config:"resource_key"`
Enc encoder.MultiEncoder `config:"encoding"`
}
@@ -650,6 +678,7 @@ type Fs struct {
grouping int32 // number of IDs to search at once in ListR - read with atomic
listRmu *sync.Mutex // protects listRempties
listRempties map[string]struct{} // IDs of supposedly empty directories which triggered grouping disable
dirResourceKeys *sync.Map // map directory ID to resource key
}
type baseObject struct {
@@ -660,6 +689,7 @@ type baseObject struct {
mimeType string // The object MIME type
bytes int64 // size of the object
parents []string // IDs of the parent directories
resourceKey *string // resourceKey is needed for link shared objects
}
type documentObject struct {
baseObject
@@ -731,6 +761,9 @@ func (f *Fs) shouldRetry(ctx context.Context, err error) (bool, error) {
} else if f.opt.StopOnDownloadLimit && reason == "downloadQuotaExceeded" {
fs.Errorf(f, "Received download limit error: %v", err)
return false, fserrors.FatalError(err)
} else if f.opt.StopOnUploadLimit && (reason == "quotaExceeded" || reason == "storageQuotaExceeded") {
fs.Errorf(f, "Received upload limit error: %v", err)
return false, fserrors.FatalError(err)
} else if f.opt.StopOnUploadLimit && reason == "teamDriveFileLimitExceeded" {
fs.Errorf(f, "Received Shared Drive file limit error: %v", err)
return false, fserrors.FatalError(err)
@@ -800,6 +833,7 @@ func (f *Fs) list(ctx context.Context, dirIDs []string, title string, directorie
// We must not filter with parent when we try list "ROOT" with drive-shared-with-me
// If we need to list file inside those shared folders, we must search it without sharedWithMe
parentsQuery := bytes.NewBufferString("(")
var resourceKeys []string
for _, dirID := range dirIDs {
if dirID == "" {
continue
@@ -820,7 +854,12 @@ func (f *Fs) list(ctx context.Context, dirIDs []string, title string, directorie
} else {
_, _ = fmt.Fprintf(parentsQuery, "'%s' in parents", dirID)
}
resourceKey, hasResourceKey := f.dirResourceKeys.Load(dirID)
if hasResourceKey {
resourceKeys = append(resourceKeys, fmt.Sprintf("%s/%s", dirID, resourceKey))
}
}
resourceKeysHeader := strings.Join(resourceKeys, ",")
if parentsQuery.Len() > 1 {
_ = parentsQuery.WriteByte(')')
query = append(query, parentsQuery.String())
@@ -829,8 +868,8 @@ func (f *Fs) list(ctx context.Context, dirIDs []string, title string, directorie
if title != "" {
searchTitle := f.opt.Enc.FromStandardName(title)
// Escaping the backslash isn't documented but seems to work
searchTitle = strings.Replace(searchTitle, `\`, `\\`, -1)
searchTitle = strings.Replace(searchTitle, `'`, `\'`, -1)
searchTitle = strings.ReplaceAll(searchTitle, `\`, `\\`)
searchTitle = strings.ReplaceAll(searchTitle, `'`, `\'`)
var titleQuery bytes.Buffer
_, _ = fmt.Fprintf(&titleQuery, "(name='%s'", searchTitle)
@@ -884,7 +923,7 @@ func (f *Fs) list(ctx context.Context, dirIDs []string, title string, directorie
}
list.SupportsAllDrives(true)
list.IncludeItemsFromAllDrives(true)
if f.isTeamDrive {
if f.isTeamDrive && !f.opt.SharedWithMe {
list.DriveId(f.opt.TeamDriveID)
list.Corpora("drive")
}
@@ -892,6 +931,10 @@ func (f *Fs) list(ctx context.Context, dirIDs []string, title string, directorie
if f.rootFolderID == "appDataFolder" {
list.Spaces("appDataFolder")
}
// Add resource Keys if necessary
if resourceKeysHeader != "" {
list.Header().Add("X-Goog-Drive-Resource-Keys", resourceKeysHeader)
}
fields := fmt.Sprintf("files(%s),nextPageToken,incompleteSearch", f.fileFields)
@@ -1068,7 +1111,7 @@ func createOAuthClient(ctx context.Context, opt *Options, name string, m configm
// try loading service account credentials from env variable, then from a file
if len(opt.ServiceAccountCredentials) == 0 && opt.ServiceAccountFile != "" {
loadedCreds, err := ioutil.ReadFile(env.ShellExpand(opt.ServiceAccountFile))
loadedCreds, err := os.ReadFile(env.ShellExpand(opt.ServiceAccountFile))
if err != nil {
return nil, fmt.Errorf("error opening service account credentials file: %w", err)
}
@@ -1151,15 +1194,16 @@ func newFs(ctx context.Context, name, path string, m configmap.Mapper) (*Fs, err
ci := fs.GetConfig(ctx)
f := &Fs{
name: name,
root: root,
opt: *opt,
ci: ci,
pacer: fs.NewPacer(ctx, pacer.NewGoogleDrive(pacer.MinSleep(opt.PacerMinSleep), pacer.Burst(opt.PacerBurst))),
m: m,
grouping: listRGrouping,
listRmu: new(sync.Mutex),
listRempties: make(map[string]struct{}),
name: name,
root: root,
opt: *opt,
ci: ci,
pacer: fs.NewPacer(ctx, pacer.NewGoogleDrive(pacer.MinSleep(opt.PacerMinSleep), pacer.Burst(opt.PacerBurst))),
m: m,
grouping: listRGrouping,
listRmu: new(sync.Mutex),
listRempties: make(map[string]struct{}),
dirResourceKeys: new(sync.Map),
}
f.isTeamDrive = opt.TeamDriveID != ""
f.fileFields = f.getFileFields()
@@ -1169,17 +1213,18 @@ func newFs(ctx context.Context, name, path string, m configmap.Mapper) (*Fs, err
WriteMimeType: true,
CanHaveEmptyDirectories: true,
ServerSideAcrossConfigs: opt.ServerSideAcrossConfigs,
FilterAware: true,
}).Fill(ctx, f)
// Create a new authorized Drive client.
f.client = oAuthClient
f.svc, err = drive.New(f.client)
f.svc, err = drive.NewService(context.Background(), option.WithHTTPClient(f.client))
if err != nil {
return nil, fmt.Errorf("couldn't create Drive client: %w", err)
}
if f.opt.V2DownloadMinSize >= 0 {
f.v2Svc, err = drive_v2.New(f.client)
f.v2Svc, err = drive_v2.NewService(context.Background(), option.WithHTTPClient(f.client))
if err != nil {
return nil, fmt.Errorf("couldn't create Drive v2 client: %w", err)
}
@@ -1221,6 +1266,11 @@ func NewFs(ctx context.Context, name, path string, m configmap.Mapper) (fs.Fs, e
f.dirCache = dircache.New(f.root, f.rootFolderID, f)
// If resource key is set then cache it for the root folder id
if f.opt.ResourceKey != "" {
f.dirResourceKeys.Store(f.rootFolderID, f.opt.ResourceKey)
}
// Parse extensions
if f.opt.Extensions != "" {
if f.opt.ExportExtensions != defaultExportExtensions {
@@ -1319,12 +1369,16 @@ func (f *Fs) newRegularObject(remote string, info *drive.File) fs.Object {
}
}
}
return &Object{
o := &Object{
baseObject: f.newBaseObject(remote, info),
url: fmt.Sprintf("%sfiles/%s?alt=media", f.svc.BasePath, actualID(info.Id)),
md5sum: strings.ToLower(info.Md5Checksum),
v2Download: f.opt.V2DownloadMinSize != -1 && info.Size >= int64(f.opt.V2DownloadMinSize),
}
if info.ResourceKey != "" {
o.resourceKey = &info.ResourceKey
}
return o
}
// newDocumentObject creates an fs.Object for a google docs drive.File
@@ -2014,7 +2068,7 @@ func splitID(compositeID string) (actualID, shortcutID string) {
// isShortcutID returns true if compositeID refers to a shortcut
func isShortcutID(compositeID string) bool {
return strings.IndexRune(compositeID, shortcutSeparator) >= 0
return strings.ContainsRune(compositeID, shortcutSeparator)
}
// actualID returns an actual ID from a composite ID
@@ -2085,6 +2139,10 @@ func (f *Fs) itemToDirEntry(ctx context.Context, remote string, item *drive.File
case item.MimeType == driveFolderType:
// cache the directory ID for later lookups
f.dirCache.Put(remote, item.Id)
// cache the resource key for later lookups
if item.ResourceKey != "" {
f.dirResourceKeys.Store(item.Id, item.ResourceKey)
}
when, _ := time.Parse(timeFormatIn, item.ModifiedTime)
d := fs.NewDir(remote, when).SetID(item.Id)
if len(item.Parents) > 0 {
@@ -2126,7 +2184,7 @@ func (f *Fs) createFileInfo(ctx context.Context, remote string, modTime time.Tim
// Put the object
//
// Copy the reader in to the new object which is returned
// Copy the reader in to the new object which is returned.
//
// The new object may have been created if an error is returned
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
@@ -2168,10 +2226,10 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
exportExt, _, _ = f.findExportFormatByMimeType(ctx, importMimeType)
if exportExt == "" {
return nil, fmt.Errorf("No export format found for %q", importMimeType)
return nil, fmt.Errorf("no export format found for %q", importMimeType)
}
if exportExt != srcExt && !f.opt.AllowImportNameChange {
return nil, fmt.Errorf("Can't convert %q to a document with a different export filetype (%q)", srcExt, exportExt)
return nil, fmt.Errorf("can't convert %q to a document with a different export filetype (%q)", srcExt, exportExt)
}
}
}
@@ -2360,9 +2418,9 @@ func (f *Fs) Precision() time.Duration {
// Copy src to this remote using server-side copy operations.
//
// This is stored with the remote path given
// This is stored with the remote path given.
//
// It returns the destination Object and a possible error
// It returns the destination Object and a possible error.
//
// Will only be called if src.Fs().Name() == f.Name()
//
@@ -2429,11 +2487,12 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
var info *drive.File
err = f.pacer.Call(func() (bool, error) {
info, err = f.svc.Files.Copy(id, createInfo).
copy := f.svc.Files.Copy(id, createInfo).
Fields(partialFields).
SupportsAllDrives(true).
KeepRevisionForever(f.opt.KeepRevisionForever).
Context(ctx).Do()
KeepRevisionForever(f.opt.KeepRevisionForever)
srcObj.addResourceKey(copy.Header())
info, err = copy.Context(ctx).Do()
return f.shouldRetry(ctx, err)
})
if err != nil {
@@ -2475,7 +2534,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
// result of List()
func (f *Fs) Purge(ctx context.Context, dir string) error {
if f.opt.TrashedOnly {
return errors.New("Can't purge with --drive-trashed-only. Use delete if you want to selectively delete files")
return errors.New("can't purge with --drive-trashed-only, use delete if you want to selectively delete files")
}
return f.purgeCheck(ctx, dir, false)
}
@@ -2594,9 +2653,9 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
// Move src to this remote using server-side move operations.
//
// This is stored with the remote path given
// This is stored with the remote path given.
//
// It returns the destination Object and a possible error
// It returns the destination Object and a possible error.
//
// Will only be called if src.Fs().Name() == f.Name()
//
@@ -2941,12 +3000,12 @@ func (f *Fs) changeServiceAccountFile(ctx context.Context, file string) (err err
return fmt.Errorf("drive: failed when making oauth client: %w", err)
}
f.client = oAuthClient
f.svc, err = drive.New(f.client)
f.svc, err = drive.NewService(context.Background(), option.WithHTTPClient(f.client))
if err != nil {
return fmt.Errorf("couldn't create Drive client: %w", err)
}
if f.opt.V2DownloadMinSize >= 0 {
f.v2Svc, err = drive_v2.New(f.client)
f.v2Svc, err = drive_v2.NewService(context.Background(), option.WithHTTPClient(f.client))
if err != nil {
return fmt.Errorf("couldn't create Drive v2 client: %w", err)
}
@@ -3235,7 +3294,7 @@ This will return a JSON list of objects like this
With the -o config parameter it will output the list in a format
suitable for adding to a config file to make aliases for all the
drives found.
drives found and a combined drive.
[My Drive]
type = alias
@@ -3245,10 +3304,15 @@ drives found.
type = alias
remote = drive,team_drive=0ABCDEFabcdefghijkl,root_folder_id=:
Adding this to the rclone config file will cause those team drives to
be accessible with the aliases shown. This may require manual editing
of the names.
[AllDrives]
type = combine
upstreams = "My Drive=My Drive:" "Test Drive=Test Drive:"
Adding this to the rclone config file will cause those team drives to
be accessible with the aliases shown. Any illegal characters will be
substituted with "_" and duplicate names will have numbers suffixed.
It will also add a remote called AllDrives which shows all the shared
drives combined into one directory tree.
`,
}, {
Name: "untrash",
@@ -3262,9 +3326,9 @@ This takes an optional directory to trash which make this easier to
use via the API.
rclone backend untrash drive:directory
rclone backend -i untrash drive:directory subdir
rclone backend --interactive untrash drive:directory subdir
Use the -i flag to see what would be restored before restoring it.
Use the --interactive/-i or --dry-run flag to see what would be restored before restoring it.
Result:
@@ -3294,8 +3358,14 @@ component will be used as the file name.
If the destination is a drive backend then server-side copying will be
attempted if possible.
Use the -i flag to see what would be copied before copying.
Use the --interactive/-i or --dry-run flag to see what would be copied before copying.
`,
}, {
Name: "exportformats",
Short: "Dump the export formats for debug purposes",
}, {
Name: "importformats",
Short: "Dump the import formats for debug purposes",
}}
// Command the backend to run a named command
@@ -3315,7 +3385,7 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
out["service_account_file"] = f.opt.ServiceAccountFile
}
if _, ok := opt["chunk_size"]; ok {
out["chunk_size"] = fmt.Sprintf("%s", f.opt.ChunkSize)
out["chunk_size"] = f.opt.ChunkSize.String()
}
return out, nil
case "set":
@@ -3332,11 +3402,11 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
}
if chunkSize, ok := opt["chunk_size"]; ok {
chunkSizeMap := make(map[string]string)
chunkSizeMap["previous"] = fmt.Sprintf("%s", f.opt.ChunkSize)
chunkSizeMap["previous"] = f.opt.ChunkSize.String()
if err = f.changeChunkSize(chunkSize); err != nil {
return out, err
}
chunkSizeString := fmt.Sprintf("%s", f.opt.ChunkSize)
chunkSizeString := f.opt.ChunkSize.String()
f.m.Set("chunk_size", chunkSizeString)
chunkSizeMap["current"] = chunkSizeString
out["chunk_size"] = chunkSizeMap
@@ -3366,12 +3436,27 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
}
if _, ok := opt["config"]; ok {
lines := []string{}
for _, drive := range drives {
upstreams := []string{}
names := make(map[string]struct{}, len(drives))
for i, drive := range drives {
name := fspath.MakeConfigName(drive.Name)
for {
if _, found := names[name]; !found {
break
}
name += fmt.Sprintf("-%d", i)
}
names[name] = struct{}{}
lines = append(lines, "")
lines = append(lines, fmt.Sprintf("[%s]", drive.Name))
lines = append(lines, fmt.Sprintf("type = alias"))
lines = append(lines, fmt.Sprintf("[%s]", name))
lines = append(lines, "type = alias")
lines = append(lines, fmt.Sprintf("remote = %s,team_drive=%s,root_folder_id=:", f.name, drive.Id))
upstreams = append(upstreams, fmt.Sprintf(`"%s=%s:"`, name, name))
}
lines = append(lines, "")
lines = append(lines, "[AllDrives]")
lines = append(lines, "type = combine")
lines = append(lines, fmt.Sprintf("upstreams = %s", strings.Join(upstreams, " ")))
return lines, nil
}
return drives, nil
@@ -3394,6 +3479,10 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
}
}
return nil, nil
case "exportformats":
return f.exportFormats(ctx), nil
case "importformats":
return f.importFormats(ctx), nil
default:
return nil, fs.ErrorCommandNotFound
}
@@ -3443,12 +3532,6 @@ func (o *baseObject) Size() int64 {
return o.bytes
}
// getRemoteInfo returns a drive.File for the remote
func (f *Fs) getRemoteInfo(ctx context.Context, remote string) (info *drive.File, err error) {
info, _, _, _, _, err = f.getRemoteInfoWithExport(ctx, remote)
return
}
// getRemoteInfoWithExport returns a drive.File and the export settings for the remote
func (f *Fs) getRemoteInfoWithExport(ctx context.Context, remote string) (
info *drive.File, extension, exportName, exportMimeType string, isDocument bool, err error) {
@@ -3489,7 +3572,6 @@ func (f *Fs) getRemoteInfoWithExport(ctx context.Context, remote string) (
// ModTime returns the modification time of the object
//
//
// It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers
func (o *baseObject) ModTime(ctx context.Context) time.Time {
@@ -3530,6 +3612,14 @@ func (o *baseObject) Storable() bool {
return true
}
// addResourceKey adds a X-Goog-Drive-Resource-Keys header for this
// object if required.
func (o *baseObject) addResourceKey(header http.Header) {
if o.resourceKey != nil {
header.Add("X-Goog-Drive-Resource-Keys", fmt.Sprintf("%s/%s", o.id, *o.resourceKey))
}
}
// httpResponse gets an http.Response object for the object
// using the url and method passed in
func (o *baseObject) httpResponse(ctx context.Context, url, method string, options []fs.OpenOption) (req *http.Request, res *http.Response, err error) {
@@ -3545,6 +3635,7 @@ func (o *baseObject) httpResponse(ctx context.Context, url, method string, optio
// Don't supply range requests for 0 length objects as they always fail
delete(req.Header, "Range")
}
o.addResourceKey(req.Header)
err = o.fs.pacer.Call(func() (bool, error) {
res, err = o.fs.client.Do(req)
if err == nil {
@@ -3624,7 +3715,7 @@ func (o *baseObject) open(ctx context.Context, url string, options ...fs.OpenOpt
url += "acknowledgeAbuse=true"
_, res, err = o.httpResponse(ctx, url, "GET", options)
} else {
err = fmt.Errorf("Use the --drive-acknowledge-abuse flag to download this file: %w", err)
err = fmt.Errorf("use the --drive-acknowledge-abuse flag to download this file: %w", err)
}
}
if err != nil {
@@ -3711,7 +3802,7 @@ func (o *linkObject) Open(ctx context.Context, options ...fs.OpenOption) (in io.
data = data[:limit]
}
return ioutil.NopCloser(bytes.NewReader(data)), nil
return io.NopCloser(bytes.NewReader(data)), nil
}
func (o *baseObject) update(ctx context.Context, updateInfo *drive.File, uploadMimeType string, in io.Reader,
@@ -3737,7 +3828,7 @@ func (o *baseObject) update(ctx context.Context, updateInfo *drive.File, uploadM
// Update the already existing object
//
// Copy the reader into the object updating modTime and size
// Copy the reader into the object updating modTime and size.
//
// The new object may have been created if an error is returned
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {

View File

@@ -7,7 +7,6 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"mime"
"os"
"path"
@@ -19,6 +18,7 @@ import (
_ "github.com/rclone/rclone/backend/local"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/filter"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fs/sync"
@@ -28,6 +28,7 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"google.golang.org/api/drive/v3"
"google.golang.org/api/googleapi"
)
func TestDriveScopes(t *testing.T) {
@@ -76,7 +77,7 @@ var additionalMimeTypes = map[string]string{
// Load the example export formats into exportFormats for testing
func TestInternalLoadExampleFormats(t *testing.T) {
fetchFormatsOnce.Do(func() {})
buf, err := ioutil.ReadFile(filepath.FromSlash("test/about.json"))
buf, err := os.ReadFile(filepath.FromSlash("test/about.json"))
var about struct {
ExportFormats map[string][]string `json:"exportFormats,omitempty"`
ImportFormats map[string][]string `json:"importFormats,omitempty"`
@@ -190,6 +191,69 @@ func TestExtensionsForImportFormats(t *testing.T) {
}
}
func (f *Fs) InternalTestShouldRetry(t *testing.T) {
ctx := context.Background()
gatewayTimeout := googleapi.Error{
Code: 503,
}
timeoutRetry, timeoutError := f.shouldRetry(ctx, &gatewayTimeout)
assert.True(t, timeoutRetry)
assert.Equal(t, &gatewayTimeout, timeoutError)
generic403 := googleapi.Error{
Code: 403,
}
rLEItem := googleapi.ErrorItem{
Reason: "rateLimitExceeded",
Message: "User rate limit exceeded.",
}
generic403.Errors = append(generic403.Errors, rLEItem)
oldStopUpload := f.opt.StopOnUploadLimit
oldStopDownload := f.opt.StopOnDownloadLimit
f.opt.StopOnUploadLimit = true
f.opt.StopOnDownloadLimit = true
defer func() {
f.opt.StopOnUploadLimit = oldStopUpload
f.opt.StopOnDownloadLimit = oldStopDownload
}()
expectedRLError := fserrors.FatalError(&generic403)
rateLimitRetry, rateLimitErr := f.shouldRetry(ctx, &generic403)
assert.False(t, rateLimitRetry)
assert.Equal(t, rateLimitErr, expectedRLError)
dQEItem := googleapi.ErrorItem{
Reason: "downloadQuotaExceeded",
}
generic403.Errors[0] = dQEItem
expectedDQError := fserrors.FatalError(&generic403)
downloadQuotaRetry, downloadQuotaError := f.shouldRetry(ctx, &generic403)
assert.False(t, downloadQuotaRetry)
assert.Equal(t, downloadQuotaError, expectedDQError)
tDFLEItem := googleapi.ErrorItem{
Reason: "teamDriveFileLimitExceeded",
}
generic403.Errors[0] = tDFLEItem
expectedTDFLError := fserrors.FatalError(&generic403)
teamDriveFileLimitRetry, teamDriveFileLimitError := f.shouldRetry(ctx, &generic403)
assert.False(t, teamDriveFileLimitRetry)
assert.Equal(t, teamDriveFileLimitError, expectedTDFLError)
qEItem := googleapi.ErrorItem{
Reason: "quotaExceeded",
}
generic403.Errors[0] = qEItem
expectedQuotaError := fserrors.FatalError(&generic403)
quotaExceededRetry, quotaExceededError := f.shouldRetry(ctx, &generic403)
assert.False(t, quotaExceededRetry)
assert.Equal(t, quotaExceededError, expectedQuotaError)
sqEItem := googleapi.ErrorItem{
Reason: "storageQuotaExceeded",
}
generic403.Errors[0] = sqEItem
expectedStorageQuotaError := fserrors.FatalError(&generic403)
storageQuotaExceededRetry, storageQuotaExceededError := f.shouldRetry(ctx, &generic403)
assert.False(t, storageQuotaExceededRetry)
assert.Equal(t, storageQuotaExceededError, expectedStorageQuotaError)
}
func (f *Fs) InternalTestDocumentImport(t *testing.T) {
oldAllow := f.opt.AllowImportNameChange
f.opt.AllowImportNameChange = true
@@ -378,9 +442,9 @@ func (f *Fs) InternalTestUnTrash(t *testing.T) {
// Make some objects, one in a subdir
contents := random.String(100)
file1 := fstest.NewItem("trashDir/toBeTrashed", contents, time.Now())
_, obj1 := fstests.PutTestContents(ctx, t, f, &file1, contents, false)
obj1 := fstests.PutTestContents(ctx, t, f, &file1, contents, false)
file2 := fstest.NewItem("trashDir/subdir/toBeTrashed", contents, time.Now())
_, _ = fstests.PutTestContents(ctx, t, f, &file2, contents, false)
_ = fstests.PutTestContents(ctx, t, f, &file2, contents, false)
// Check objects
checkObjects := func() {
@@ -462,6 +526,9 @@ func (f *Fs) InternalTestCopyID(t *testing.T) {
// TestIntegration/FsMkdir/FsPutFiles/Internal/AgeQuery
func (f *Fs) InternalTestAgeQuery(t *testing.T) {
// Check set up for filtering
assert.True(t, f.Features().FilterAware)
opt := &filter.Opt{}
err := opt.MaxAge.Set("1h")
assert.NoError(t, err)
@@ -496,7 +563,7 @@ func (f *Fs) InternalTestAgeQuery(t *testing.T) {
require.NoError(t, err)
file1 := fstest.Item{ModTime: time.Now(), Path: "agequery.txt"}
_, _ = fstests.PutTestContents(defCtx, t, tempFs1, &file1, "abcxyz", true)
_ = fstests.PutTestContents(defCtx, t, tempFs1, &file1, "abcxyz", true)
// validate sync/copy
const timeQuery = "(modifiedTime >= '"
@@ -545,6 +612,7 @@ func (f *Fs) InternalTest(t *testing.T) {
t.Run("UnTrash", f.InternalTestUnTrash)
t.Run("CopyID", f.InternalTestCopyID)
t.Run("AgeQuery", f.InternalTestAgeQuery)
t.Run("ShouldRetry", f.InternalTestShouldRetry)
}
var _ fstests.InternalTester = (*Fs)(nil)

View File

@@ -118,12 +118,12 @@ func (b *batcher) Batching() bool {
}
// finishBatch commits the batch, returning a batch status to poll or maybe complete
func (b *batcher) finishBatch(ctx context.Context, items []*files.UploadSessionFinishArg) (batchStatus *files.UploadSessionFinishBatchLaunch, err error) {
func (b *batcher) finishBatch(ctx context.Context, items []*files.UploadSessionFinishArg) (complete *files.UploadSessionFinishBatchResult, err error) {
var arg = &files.UploadSessionFinishBatchArg{
Entries: items,
}
err = b.f.pacer.Call(func() (bool, error) {
batchStatus, err = b.f.srv.UploadSessionFinishBatch(arg)
complete, err = b.f.srv.UploadSessionFinishBatchV2(arg)
// If error is insufficient space then don't retry
if e, ok := err.(files.UploadSessionFinishAPIError); ok {
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.WriteErrorInsufficientSpace {
@@ -137,7 +137,7 @@ func (b *batcher) finishBatch(ctx context.Context, items []*files.UploadSessionF
if err != nil {
return nil, fmt.Errorf("batch commit failed: %w", err)
}
return batchStatus, nil
return complete, nil
}
// finishBatchJobStatus waits for the batch to complete returning completed entries
@@ -199,26 +199,11 @@ func (b *batcher) commitBatch(ctx context.Context, items []*files.UploadSessionF
fs.Debugf(b.f, "Committing %s", desc)
// finalise the batch getting either a result or a job id to poll
batchStatus, err := b.finishBatch(ctx, items)
complete, err := b.finishBatch(ctx, items)
if err != nil {
return err
}
// check whether batch is complete
var complete *files.UploadSessionFinishBatchResult
switch batchStatus.Tag {
case "async_job_id":
// wait for batch to complete
complete, err = b.finishBatchJobStatus(ctx, batchStatus)
if err != nil {
return err
}
case "complete":
complete = batchStatus.Complete
default:
return fmt.Errorf("batch returned unknown status %q", batchStatus.Tag)
}
// Check we got the right number of entries
entries := complete.Entries
if len(entries) != len(results) {
@@ -319,9 +304,12 @@ outer:
//
// Can be called from atexit handler
func (b *batcher) Shutdown() {
if !b.Batching() {
return
}
b.shutOnce.Do(func() {
atexit.Unregister(b.atexit)
fs.Infof(b.f, "Commiting uploads - please wait...")
fs.Infof(b.f, "Committing uploads - please wait...")
// show that batcher is shutting down
close(b.closed)
// quit the commitLoop by sending a quitRequest message

View File

@@ -268,7 +268,7 @@ default based on the batch_mode in use.
Advanced: true,
}, {
Name: "batch_commit_timeout",
Help: `Max time to wait for a batch to finish comitting`,
Help: `Max time to wait for a batch to finish committing`,
Default: fs.Duration(10 * time.Minute),
Advanced: true,
}, {
@@ -472,10 +472,12 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
args := team.NewMembersGetInfoArgs(members)
memberIds, err := f.team.MembersGetInfo(args)
if err != nil {
return nil, fmt.Errorf("invalid dropbox team member: %q: %w", opt.Impersonate, err)
}
if len(memberIds) == 0 || memberIds[0].MemberInfo == nil || memberIds[0].MemberInfo.Profile == nil {
return nil, fmt.Errorf("dropbox team member not found: %q", opt.Impersonate)
}
cfg.AsMemberID = memberIds[0].MemberInfo.Profile.MemberProfile.TeamMemberId
}
@@ -923,7 +925,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// Put the object
//
// Copy the reader in to the new object which is returned
// Copy the reader in to the new object which is returned.
//
// The new object may have been created if an error is returned
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
@@ -1042,9 +1044,9 @@ func (f *Fs) Precision() time.Duration {
// Copy src to this remote using server-side copy operations.
//
// This is stored with the remote path given
// This is stored with the remote path given.
//
// It returns the destination Object and a possible error
// It returns the destination Object and a possible error.
//
// Will only be called if src.Fs().Name() == f.Name()
//
@@ -1103,9 +1105,9 @@ func (f *Fs) Purge(ctx context.Context, dir string) (err error) {
// Move src to this remote using server-side move operations.
//
// This is stored with the remote path given
// This is stored with the remote path given.
//
// It returns the destination Object and a possible error
// It returns the destination Object and a possible error.
//
// Will only be called if src.Fs().Name() == f.Name()
//
@@ -1197,7 +1199,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
return
}
if len(listRes.Links) == 0 {
err = errors.New("Dropbox says the sharing link already exists, but list came back empty")
err = errors.New("sharing link already exists, but list came back empty")
return
}
linkRes = listRes.Links[0]
@@ -1209,7 +1211,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
case *sharing.FolderLinkMetadata:
link = res.Url
default:
err = fmt.Errorf("Don't know how to extract link, response has unknown format: %T", res)
err = fmt.Errorf("don't know how to extract link, response has unknown format: %T", res)
}
}
return
@@ -1269,7 +1271,7 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
return shouldRetry(ctx, err)
})
if err != nil {
return nil, fmt.Errorf("about failed: %w", err)
return nil, err
}
var total uint64
if q.Allocation != nil {
@@ -1370,10 +1372,12 @@ func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.
if timeout < 30 {
timeout = 30
fs.Debugf(f, "Increasing poll interval to minimum 30s")
}
if timeout > 480 {
timeout = 480
fs.Debugf(f, "Decreasing poll interval to maximum 480s")
}
err = f.pacer.Call(func() (bool, error) {
@@ -1431,7 +1435,7 @@ func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.
}
if entryPath != "" {
notifyFunc(entryPath, entryType)
notifyFunc(f.opt.Enc.ToStandardPath(entryPath), entryType)
}
}
if !changeList.HasMore {
@@ -1665,7 +1669,7 @@ func (o *Object) uploadChunked(ctx context.Context, in0 io.Reader, commitInfo *f
correctOffset := uErr.EndpointError.IncorrectOffset.CorrectOffset
delta := int64(correctOffset) - int64(cursor.Offset)
skip += delta
what := fmt.Sprintf("incorrect offset error receved: sent %d, need %d, skip %d", cursor.Offset, correctOffset, skip)
what := fmt.Sprintf("incorrect offset error received: sent %d, need %d, skip %d", cursor.Offset, correctOffset, skip)
if skip < 0 {
return false, fmt.Errorf("can't seek backwards to correct offset: %s", what)
} else if skip == chunkSize {
@@ -1693,6 +1697,9 @@ func (o *Object) uploadChunked(ctx context.Context, in0 io.Reader, commitInfo *f
if size > 0 {
// if size is known, check if next chunk is final
appendArg.Close = uint64(size)-in.BytesRead() <= uint64(chunkSize)
if in.BytesRead() > uint64(size) {
return nil, fmt.Errorf("expected %d bytes in input, but have read %d so far", size, in.BytesRead())
}
} else {
// if size is unknown, upload as long as we can read full chunks from the reader
appendArg.Close = in.BytesRead()-cursor.Offset < uint64(chunkSize)
@@ -1756,7 +1763,7 @@ func checkPathLength(name string) (err error) {
// Update the already existing object
//
// Copy the reader into the object updating modTime and size
// Copy the reader into the object updating modTime and size.
//
// The new object may have been created if an error is returned
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {

View File

@@ -28,25 +28,44 @@ var retryErrorCodes = []int{
509, // Bandwidth Limit Exceeded
}
var errorRegex = regexp.MustCompile(`#\d{1,3}`)
func parseFichierError(err error) int {
matches := errorRegex.FindStringSubmatch(err.Error())
if len(matches) == 0 {
return 0
}
code, err := strconv.Atoi(matches[0])
if err != nil {
fs.Debugf(nil, "failed parsing fichier error: %v", err)
return 0
}
return code
}
// shouldRetry returns a boolean as to whether this resp and err
// deserve to be retried. It returns the err as a convenience
func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
if fserrors.ContextError(ctx, &err) {
return false, err
}
// Detect this error which the integration tests provoke
// error HTTP error 403 (403 Forbidden) returned body: "{\"message\":\"Flood detected: IP Locked #374\",\"status\":\"KO\"}"
// 1Fichier uses HTTP error code 403 (Forbidden) for all kinds of errors with
// responses looking like this: "{\"message\":\"Flood detected: IP Locked #374\",\"status\":\"KO\"}"
//
// https://1fichier.com/api.html
//
// file/ls.cgi is limited :
//
// Warning (can be changed in case of abuses) :
// List all files of the account is limited to 1 request per hour.
// List folders is limited to 5 000 results and 1 request per folder per 30s.
if err != nil && strings.Contains(err.Error(), "Flood detected") {
fs.Debugf(nil, "Sleeping for 30 seconds due to: %v", err)
time.Sleep(30 * time.Second)
// We attempt to parse the actual 1Fichier error code from this body and handle it accordingly
// Most importantly #374 (Flood detected: IP locked) which the integration tests provoke
// The list below is far from complete and should be expanded if we see any more error codes.
if err != nil {
switch parseFichierError(err) {
case 93:
return false, err // No such user
case 186:
return false, err // IP blocked?
case 374:
fs.Debugf(nil, "Sleeping for 30 seconds due to: %v", err)
time.Sleep(30 * time.Second)
default:
}
}
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
}
@@ -468,7 +487,7 @@ func (f *Fs) uploadFile(ctx context.Context, in io.Reader, size int64, fileName,
fileName = f.opt.Enc.FromStandardName(fileName)
if len(uploadID) > 10 || !isAlphaNumeric(uploadID) {
return nil, errors.New("Invalid UploadID")
return nil, errors.New("invalid UploadID")
}
opts := rest.Opts{
@@ -510,7 +529,7 @@ func (f *Fs) endUpload(ctx context.Context, uploadID string, nodeurl string) (re
// fs.Debugf(f, "Ending File Upload `%s`", uploadID)
if len(uploadID) > 10 || !isAlphaNumeric(uploadID) {
return nil, errors.New("Invalid UploadID")
return nil, errors.New("invalid UploadID")
}
opts := rest.Opts{

View File

@@ -1,3 +1,4 @@
// Package fichier provides an interface to the 1Fichier storage system.
package fichier
import (
@@ -294,7 +295,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
path, ok := f.dirCache.GetInv(directoryID)
if !ok {
return nil, errors.New("Cannot find dir in dircache")
return nil, errors.New("cannot find dir in dircache")
}
return f.newObjectFromFile(ctx, path, file), nil

View File

@@ -84,7 +84,7 @@ type CopyFileResponse struct {
URLs []FileCopy `json:"urls"`
}
// FileCopy is used in the the CopyFileResponse
// FileCopy is used in the CopyFileResponse
type FileCopy struct {
FromURL string `json:"from_url"`
ToURL string `json:"to_url"`

View File

@@ -19,7 +19,7 @@ const (
timeFormatJSON = `"` + timeFormatParameters + `"`
)
// Time represents represents date and time information for the
// Time represents date and time information for the
// filefabric API
type Time time.Time
@@ -95,7 +95,7 @@ type Status struct {
// Warning string `json:"warning"` // obsolete
}
// Status statisfies the error interface
// Status satisfies the error interface
func (e *Status) Error() string {
return fmt.Sprintf("%s (%s)", e.Message, e.Code)
}

View File

@@ -20,7 +20,6 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"path"
@@ -150,7 +149,7 @@ type Fs struct {
opt Options // parsed options
features *fs.Features // optional features
m configmap.Mapper // to save config
srv *rest.Client // the connection to the one drive server
srv *rest.Client // the connection to the server
dirCache *dircache.DirCache // Map of directory path to directory id
pacer *fs.Pacer // pacer for API calls
tokenMu sync.Mutex // hold when reading the token
@@ -373,7 +372,7 @@ type params map[string]interface{}
// rpc calls the rpc.php method of the SME file fabric
//
// This is an entry point to all the method calls
// This is an entry point to all the method calls.
//
// If result is nil then resp.Body will need closing
func (f *Fs) rpc(ctx context.Context, function string, p params, result api.OKError, options []fs.OpenOption) (resp *http.Response, err error) {
@@ -490,7 +489,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
// Root is a dir - cache its ID
f.dirCache.Put(f.root, info.ID)
}
} else {
//} else {
// Root is not found so a directory
}
}
@@ -678,7 +677,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// Creates from the parameters passed in a half finished Object which
// must have setMetaData called on it
//
// Returns the object, leaf, directoryID and error
// Returns the object, leaf, directoryID and error.
//
// Used to create new objects
func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time, size int64) (o *Object, leaf string, directoryID string, err error) {
@@ -697,7 +696,7 @@ func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time,
// Put the object
//
// Copy the reader in to the new object which is returned
// Copy the reader in to the new object which is returned.
//
// The new object may have been created if an error is returned
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
@@ -783,9 +782,9 @@ func (f *Fs) Precision() time.Duration {
// Copy src to this remote using server side copy operations.
//
// This is stored with the remote path given
// This is stored with the remote path given.
//
// It returns the destination Object and a possible error
// It returns the destination Object and a possible error.
//
// Will only be called if src.Fs().Name() == f.Name()
//
@@ -843,7 +842,7 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
return f.purgeCheck(ctx, dir, false)
}
// Wait for the the background task to complete if necessary
// Wait for the background task to complete if necessary
func (f *Fs) waitForBackgroundTask(ctx context.Context, taskID api.String) (err error) {
if taskID == "" || taskID == "0" {
// No task to wait for
@@ -956,9 +955,9 @@ func (f *Fs) move(ctx context.Context, isDir bool, id, oldLeaf, newLeaf, oldDire
// Move src to this remote using server side move operations.
//
// This is stored with the remote path given
// This is stored with the remote path given.
//
// It returns the destination Object and a possible error
// It returns the destination Object and a possible error.
//
// Will only be called if src.Fs().Name() == f.Name()
//
@@ -1135,7 +1134,6 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
// ModTime returns the modification time of the object
//
//
// It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers
func (o *Object) ModTime(ctx context.Context) time.Time {
@@ -1187,7 +1185,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
return nil, errors.New("can't download - no id")
}
if o.contentType == emptyMimeType {
return ioutil.NopCloser(bytes.NewReader([]byte{})), nil
return io.NopCloser(bytes.NewReader([]byte{})), nil
}
fs.FixRangeOption(options, o.size)
resp, err := o.fs.rpc(ctx, "getFile", params{
@@ -1201,7 +1199,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
// Update the object with the contents of the io.Reader, modTime and size
//
// If existing is set then it updates the object rather than creating a new one
// If existing is set then it updates the object rather than creating a new one.
//
// The new object may have been created if an error is returned
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {

View File

@@ -15,7 +15,7 @@ import (
"sync"
"time"
"github.com/rclone/ftp"
"github.com/jlaffaye/ftp"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/config"
@@ -45,7 +45,7 @@ const (
func init() {
fs.Register(&fs.RegInfo{
Name: "ftp",
Description: "FTP Connection",
Description: "FTP",
NewFs: NewFs,
Options: []fs.Option{{
Name: "host",
@@ -70,7 +70,7 @@ func init() {
When using implicit FTP over TLS the client connects using TLS
right from the start which breaks compatibility with
non-TLS-aware servers. This is usually served over port 990 rather
than port 21. Cannot be used in combination with explicit FTP.`,
than port 21. Cannot be used in combination with explicit FTPS.`,
Default: false,
}, {
Name: "explicit_tls",
@@ -78,11 +78,25 @@ than port 21. Cannot be used in combination with explicit FTP.`,
When using explicit FTP over TLS the client explicitly requests
security from the server in order to upgrade a plain text connection
to an encrypted one. Cannot be used in combination with implicit FTP.`,
to an encrypted one. Cannot be used in combination with implicit FTPS.`,
Default: false,
}, {
Name: "concurrency",
Help: "Maximum number of FTP simultaneous connections, 0 for unlimited.",
Name: "concurrency",
Help: strings.Replace(`Maximum number of FTP simultaneous connections, 0 for unlimited.
Note that setting this is very likely to cause deadlocks so it should
be used with care.
If you are doing a sync or copy then make sure concurrency is one more
than the sum of |--transfers| and |--checkers|.
If you use |--check-first| then it just needs to be one more than the
maximum of |--checkers| and |--transfers|.
So for |concurrency 3| you'd use |--checkers 2 --transfers 2
--check-first| or |--checkers 1 --transfers 1|.
`, "|", "`", -1),
Default: 0,
Advanced: true,
}, {
@@ -100,11 +114,21 @@ to an encrypted one. Cannot be used in combination with implicit FTP.`,
Help: "Disable using MLSD even if server advertises support.",
Default: false,
Advanced: true,
}, {
Name: "disable_utf8",
Help: "Disable using UTF-8 even if server advertises support.",
Default: false,
Advanced: true,
}, {
Name: "writing_mdtm",
Help: "Use MDTM to set modification time (VsFtpd quirk)",
Default: false,
Advanced: true,
}, {
Name: "force_list_hidden",
Help: "Use LIST -a to force listing of hidden files and folders. This will disable the use of MLSD.",
Default: false,
Advanced: true,
}, {
Name: "idle_timeout",
Default: fs.Duration(60 * time.Second),
@@ -184,7 +208,9 @@ type Options struct {
SkipVerifyTLSCert bool `config:"no_check_certificate"`
DisableEPSV bool `config:"disable_epsv"`
DisableMLSD bool `config:"disable_mlsd"`
DisableUTF8 bool `config:"disable_utf8"`
WritingMDTM bool `config:"writing_mdtm"`
ForceListHidden bool `config:"force_list_hidden"`
IdleTimeout fs.Duration `config:"idle_timeout"`
CloseTimeout fs.Duration `config:"close_timeout"`
ShutTimeout fs.Duration `config:"shut_timeout"`
@@ -289,18 +315,26 @@ func (dl *debugLog) Write(p []byte) (n int, err error) {
return len(p), nil
}
// returns true if this FTP error should be retried
func isRetriableFtpError(err error) bool {
switch errX := err.(type) {
case *textproto.Error:
switch errX.Code {
case ftp.StatusNotAvailable, ftp.StatusTransfertAborted:
return true
}
}
return false
}
// shouldRetry returns a boolean as to whether this err deserve to be
// retried. It returns the err as a convenience
func shouldRetry(ctx context.Context, err error) (bool, error) {
if fserrors.ContextError(ctx, &err) {
return false, err
}
switch errX := err.(type) {
case *textproto.Error:
switch errX.Code {
case ftp.StatusNotAvailable:
return true, err
}
if isRetriableFtpError(err) {
return true, err
}
return fserrors.ShouldRetry(err), err
}
@@ -310,14 +344,44 @@ func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
fs.Debugf(f, "Connecting to FTP server")
// Make ftp library dial with fshttp dialer optionally using TLS
initialConnection := true
dial := func(network, address string) (conn net.Conn, err error) {
fs.Debugf(f, "dial(%q,%q)", network, address)
defer func() {
fs.Debugf(f, "> dial: conn=%T, err=%v", conn, err)
}()
conn, err = fshttp.NewDialer(ctx).Dial(network, address)
if f.tlsConf != nil && err == nil {
conn = tls.Client(conn, f.tlsConf)
if err != nil {
return nil, err
}
return
// Connect using cleartext only for non TLS
if f.tlsConf == nil {
return conn, nil
}
// Initial connection only needs to be cleartext for explicit TLS
if f.opt.ExplicitTLS && initialConnection {
initialConnection = false
return conn, nil
}
// Upgrade connection to TLS
tlsConn := tls.Client(conn, f.tlsConf)
// Do the initial handshake - tls.Client doesn't do it for us
// If we do this then connections to proftpd/pureftpd lock up
// See: https://github.com/rclone/rclone/issues/6426
// See: https://github.com/jlaffaye/ftp/issues/282
if false {
err = tlsConn.HandshakeContext(ctx)
if err != nil {
_ = conn.Close()
return nil, err
}
}
return tlsConn, nil
}
ftpConfig := []ftp.DialOption{
ftp.DialWithContext(ctx),
ftp.DialWithDialFunc(dial),
}
ftpConfig := []ftp.DialOption{ftp.DialWithDialFunc(dial)}
if f.opt.TLS {
// Our dialer takes care of TLS but ftp library also needs tlsConf
@@ -325,12 +389,6 @@ func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
ftpConfig = append(ftpConfig, ftp.DialWithTLS(f.tlsConf))
} else if f.opt.ExplicitTLS {
ftpConfig = append(ftpConfig, ftp.DialWithExplicitTLS(f.tlsConf))
// Initial connection needs to be cleartext for explicit TLS
conn, err := fshttp.NewDialer(ctx).Dial("tcp", f.dialAddr)
if err != nil {
return nil, err
}
ftpConfig = append(ftpConfig, ftp.DialWithNetConn(conn))
}
if f.opt.DisableEPSV {
ftpConfig = append(ftpConfig, ftp.DialWithDisabledEPSV(true))
@@ -338,12 +396,18 @@ func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
if f.opt.DisableMLSD {
ftpConfig = append(ftpConfig, ftp.DialWithDisabledMLSD(true))
}
if f.opt.DisableUTF8 {
ftpConfig = append(ftpConfig, ftp.DialWithDisabledUTF8(true))
}
if f.opt.ShutTimeout != 0 && f.opt.ShutTimeout != fs.DurationOff {
ftpConfig = append(ftpConfig, ftp.DialWithShutTimeout(time.Duration(f.opt.ShutTimeout)))
}
if f.opt.WritingMDTM {
ftpConfig = append(ftpConfig, ftp.DialWithWritingMDTM(true))
}
if f.opt.ForceListHidden {
ftpConfig = append(ftpConfig, ftp.DialWithForceListHidden(true))
}
if f.ci.Dump&(fs.DumpHeaders|fs.DumpBodies|fs.DumpRequests|fs.DumpResponses) != 0 {
ftpConfig = append(ftpConfig, ftp.DialWithDebugOutput(&debugLog{auth: f.ci.Dump&fs.DumpAuth != 0}))
}
@@ -478,7 +542,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
protocol = "ftps://"
}
if opt.TLS && opt.ExplicitTLS {
return nil, errors.New("Implicit TLS and explicit TLS are mutually incompatible. Please revise your config")
return nil, errors.New("implicit TLS and explicit TLS are mutually incompatible, please revise your config")
}
var tlsConfig *tls.Config
if opt.TLS || opt.ExplicitTLS {
@@ -601,8 +665,7 @@ func (f *Fs) dirFromStandardPath(dir string) string {
// findItem finds a directory entry for the name in its parent directory
func (f *Fs) findItem(ctx context.Context, remote string) (entry *ftp.Entry, err error) {
// defer fs.Trace(remote, "")("o=%v, err=%v", &o, &err)
fullPath := path.Join(f.root, remote)
if fullPath == "" || fullPath == "." || fullPath == "/" {
if remote == "" || remote == "." || remote == "/" {
// if root, assume exists and synthesize an entry
return &ftp.Entry{
Name: "",
@@ -610,13 +673,32 @@ func (f *Fs) findItem(ctx context.Context, remote string) (entry *ftp.Entry, err
Time: time.Now(),
}, nil
}
dir := path.Dir(fullPath)
base := path.Base(fullPath)
c, err := f.getFtpConnection(ctx)
if err != nil {
return nil, fmt.Errorf("findItem: %w", err)
}
// returns TRUE if MLST is supported which is required to call GetEntry
if c.IsTimePreciseInList() {
entry, err := c.GetEntry(f.opt.Enc.FromStandardPath(remote))
f.putFtpConnection(&c, err)
if err != nil {
err = translateErrorFile(err)
if err == fs.ErrorObjectNotFound {
return nil, nil
}
return nil, err
}
if entry != nil {
f.entryToStandard(entry)
}
return entry, nil
}
dir := path.Dir(remote)
base := path.Base(remote)
files, err := c.List(f.dirFromStandardPath(dir))
f.putFtpConnection(&c, err)
if err != nil {
@@ -635,7 +717,7 @@ func (f *Fs) findItem(ctx context.Context, remote string) (entry *ftp.Entry, err
// it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err error) {
// defer fs.Trace(remote, "")("o=%v, err=%v", &o, &err)
entry, err := f.findItem(ctx, remote)
entry, err := f.findItem(ctx, path.Join(f.root, remote))
if err != nil {
return nil, err
}
@@ -657,7 +739,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err err
// dirExists checks the directory pointed to by remote exists or not
func (f *Fs) dirExists(ctx context.Context, remote string) (exists bool, err error) {
entry, err := f.findItem(ctx, remote)
entry, err := f.findItem(ctx, path.Join(f.root, remote))
if err != nil {
return false, fmt.Errorf("dirExists: %w", err)
}
@@ -709,7 +791,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
case <-timer.C:
// if timer fired assume no error but connection dead
fs.Errorf(f, "Timeout when waiting for List")
return nil, errors.New("Timeout when waiting for List")
return nil, errors.New("timeout when waiting for List")
}
// Annoyingly FTP returns success for a directory which
@@ -760,11 +842,12 @@ func (f *Fs) Hashes() hash.Set {
// Precision shows whether modified time is supported or not depending on the
// FTP server capabilities, namely whether FTP server:
// - accepts the MDTM command to get file time (fGetTime)
// or supports MLSD returning precise file time in the list (fLstTime)
// - accepts the MFMT command to set file time (fSetTime)
// or non-standard form of the MDTM command (fSetTime, too)
// used by VsFtpd for the same purpose (WritingMDTM)
// - accepts the MDTM command to get file time (fGetTime)
// or supports MLSD returning precise file time in the list (fLstTime)
// - accepts the MFMT command to set file time (fSetTime)
// or non-standard form of the MDTM command (fSetTime, too)
// used by VsFtpd for the same purpose (WritingMDTM)
//
// See "mdtm_write" in https://security.appspot.com/vsftpd/vsftpd_conf.html
func (f *Fs) Precision() time.Duration {
if (f.fGetTime || f.fLstTime) && f.fSetTime {
@@ -800,32 +883,18 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
// getInfo reads the FileInfo for a path
func (f *Fs) getInfo(ctx context.Context, remote string) (fi *FileInfo, err error) {
// defer fs.Trace(remote, "")("fi=%v, err=%v", &fi, &err)
dir := path.Dir(remote)
base := path.Base(remote)
c, err := f.getFtpConnection(ctx)
file, err := f.findItem(ctx, remote)
if err != nil {
return nil, fmt.Errorf("getInfo: %w", err)
}
files, err := c.List(f.dirFromStandardPath(dir))
f.putFtpConnection(&c, err)
if err != nil {
return nil, translateErrorFile(err)
}
for i := range files {
file := files[i]
f.entryToStandard(file)
if file.Name == base {
info := &FileInfo{
Name: remote,
Size: file.Size,
ModTime: file.Time,
precise: f.fLstTime,
IsDir: file.Type == ftp.EntryTypeFolder,
}
return info, nil
return nil, err
} else if file != nil {
info := &FileInfo{
Name: remote,
Size: file.Size,
ModTime: file.Time,
precise: f.fLstTime,
IsDir: file.Type == ftp.EntryTypeFolder,
}
return info, nil
}
return nil, fs.ErrorObjectNotFound
}
@@ -1125,22 +1194,33 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.Read
}
}
}
c, err := o.fs.getFtpConnection(ctx)
var (
fd *ftp.Response
c *ftp.ServerConn
)
err = o.fs.pacer.Call(func() (bool, error) {
c, err = o.fs.getFtpConnection(ctx)
if err != nil {
return false, err // getFtpConnection has retries already
}
fd, err = c.RetrFrom(o.fs.opt.Enc.FromStandardPath(path), uint64(offset))
if err != nil {
o.fs.putFtpConnection(&c, err)
}
return shouldRetry(ctx, err)
})
if err != nil {
return nil, fmt.Errorf("open: %w", err)
}
fd, err := c.RetrFrom(o.fs.opt.Enc.FromStandardPath(path), uint64(offset))
if err != nil {
o.fs.putFtpConnection(&c, err)
return nil, fmt.Errorf("open: %w", err)
}
rc = &ftpReadCloser{rc: readers.NewLimitedReadCloser(fd, limit), c: c, f: o.fs}
return rc, nil
}
// Update the already existing object
//
// Copy the reader into the object updating modTime and size
// Copy the reader into the object updating modTime and size.
//
// The new object may have been created if an error is returned
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {

View File

@@ -34,9 +34,9 @@ func deriveFs(ctx context.Context, t *testing.T, f fs.Fs, opts settings) fs.Fs {
// test that big file uploads do not cause network i/o timeout
func (f *Fs) testUploadTimeout(t *testing.T) {
const (
fileSize = 100000000 // 100 MiB
idleTimeout = 40 * time.Millisecond // small because test server is local
maxTime = 10 * time.Second // prevent test hangup
fileSize = 100000000 // 100 MiB
idleTimeout = 1 * time.Second // small because test server is local
maxTime = 10 * time.Second // prevent test hangup
)
if testing.Short() {

View File

@@ -19,11 +19,12 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"os"
"path"
"strconv"
"strings"
"sync"
"time"
"github.com/rclone/rclone/fs"
@@ -43,6 +44,7 @@ import (
"golang.org/x/oauth2"
"golang.org/x/oauth2/google"
"google.golang.org/api/googleapi"
option "google.golang.org/api/option"
// NOTE: This API is deprecated
storage "google.golang.org/api/storage/v1"
@@ -80,7 +82,8 @@ func init() {
saFile, _ := m.Get("service_account_file")
saCreds, _ := m.Get("service_account_credentials")
anonymous, _ := m.Get("anonymous")
if saFile != "" || saCreds != "" || anonymous == "true" {
envAuth, _ := m.Get("env_auth")
if saFile != "" || saCreds != "" || anonymous == "true" || envAuth == "true" {
return nil, nil
}
return oauthutil.ConfigOut("", &oauthutil.Options{
@@ -304,6 +307,23 @@ rclone does if you know the bucket exists already.
`,
Default: false,
Advanced: true,
}, {
Name: "decompress",
Help: `If set this will decompress gzip encoded objects.
It is possible to upload objects to GCS with "Content-Encoding: gzip"
set. Normally rclone will download these files as compressed objects.
If this flag is set then rclone will decompress these files with
"Content-Encoding: gzip" as they are received. This means that rclone
can't check the size and hash but the file contents will be decompressed.
`,
Advanced: true,
Default: false,
}, {
Name: "endpoint",
Help: "Endpoint for the service.\n\nLeave blank normally.",
Advanced: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
@@ -311,6 +331,17 @@ rclone does if you know the bucket exists already.
Default: (encoder.Base |
encoder.EncodeCrLf |
encoder.EncodeInvalidUtf8),
}, {
Name: "env_auth",
Help: "Get GCP IAM credentials from runtime (environment variables or instance meta data if no env vars).\n\nOnly applies if service_account_file and service_account_credentials is blank.",
Default: false,
Examples: []fs.OptionExample{{
Value: "false",
Help: "Enter credentials in the next step.",
}, {
Value: "true",
Help: "Get GCP IAM credentials from the environment (env vars or IAM).",
}},
}}...),
})
}
@@ -327,21 +358,25 @@ type Options struct {
Location string `config:"location"`
StorageClass string `config:"storage_class"`
NoCheckBucket bool `config:"no_check_bucket"`
Decompress bool `config:"decompress"`
Endpoint string `config:"endpoint"`
Enc encoder.MultiEncoder `config:"encoding"`
EnvAuth bool `config:"env_auth"`
}
// Fs represents a remote storage server
type Fs struct {
name string // name of this remote
root string // the path we are working on if any
opt Options // parsed options
features *fs.Features // optional features
svc *storage.Service // the connection to the storage server
client *http.Client // authorized client
rootBucket string // bucket part of root (if any)
rootDirectory string // directory part of root (if any)
cache *bucket.Cache // cache of bucket status
pacer *fs.Pacer // To pace the API calls
name string // name of this remote
root string // the path we are working on if any
opt Options // parsed options
features *fs.Features // optional features
svc *storage.Service // the connection to the storage server
client *http.Client // authorized client
rootBucket string // bucket part of root (if any)
rootDirectory string // directory part of root (if any)
cache *bucket.Cache // cache of bucket status
pacer *fs.Pacer // To pace the API calls
warnCompressed sync.Once // warn once about compressed files
}
// Object describes a storage object
@@ -355,6 +390,7 @@ type Object struct {
bytes int64 // Bytes in the object
modTime time.Time // Modified time of the object
mimeType string
gzipped bool // set if object has Content-Encoding: gzip
}
// ------------------------------------------------------------
@@ -372,7 +408,7 @@ func (f *Fs) Root() string {
// String converts this Fs to a string
func (f *Fs) String() string {
if f.rootBucket == "" {
return fmt.Sprintf("GCS root")
return "GCS root"
}
if f.rootDirectory == "" {
return fmt.Sprintf("GCS bucket %s", f.rootBucket)
@@ -464,7 +500,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
// try loading service account credentials from env variable, then from a file
if opt.ServiceAccountCredentials == "" && opt.ServiceAccountFile != "" {
loadedCreds, err := ioutil.ReadFile(env.ShellExpand(opt.ServiceAccountFile))
loadedCreds, err := os.ReadFile(env.ShellExpand(opt.ServiceAccountFile))
if err != nil {
return nil, fmt.Errorf("error opening service account credentials file: %w", err)
}
@@ -477,6 +513,11 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
if err != nil {
return nil, fmt.Errorf("failed configuring Google Cloud Storage Service Account: %w", err)
}
} else if opt.EnvAuth {
oAuthClient, err = google.DefaultClient(ctx, storage.DevstorageFullControlScope)
if err != nil {
return nil, fmt.Errorf("failed to configure Google Cloud Storage: %w", err)
}
} else {
oAuthClient, _, err = oauthutil.NewClient(ctx, name, m, storageConfig)
if err != nil {
@@ -505,7 +546,11 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
// Create a new authorized Drive client.
f.client = oAuthClient
f.svc, err = storage.New(f.client)
gcsOpts := []option.ClientOption{option.WithHTTPClient(f.client)}
if opt.Endpoint != "" {
gcsOpts = append(gcsOpts, option.WithEndpoint(opt.Endpoint))
}
f.svc, err = storage.NewService(context.Background(), gcsOpts...)
if err != nil {
return nil, fmt.Errorf("couldn't create Google Cloud Storage client: %w", err)
}
@@ -562,7 +607,7 @@ type listFn func(remote string, object *storage.Object, isDirectory bool) error
//
// dir is the starting directory, "" for root
//
// Set recurse to read sub directories
// Set recurse to read sub directories.
//
// The remote has prefix removed from it and if addBucket is set
// then it adds the bucket to the start.
@@ -780,7 +825,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
// Put the object into the bucket
//
// Copy the reader in to the new object which is returned
// Copy the reader in to the new object which is returned.
//
// The new object may have been created if an error is returned
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
@@ -882,9 +927,9 @@ func (f *Fs) Precision() time.Duration {
// Copy src to this remote using server-side copy operations.
//
// This is stored with the remote path given
// This is stored with the remote path given.
//
// It returns the destination Object and a possible error
// It returns the destination Object and a possible error.
//
// Will only be called if src.Fs().Name() == f.Name()
//
@@ -975,6 +1020,7 @@ func (o *Object) setMetaData(info *storage.Object) {
o.url = info.MediaLink
o.bytes = int64(info.Size)
o.mimeType = info.ContentType
o.gzipped = info.ContentEncoding == "gzip"
// Read md5sum
md5sumData, err := base64.StdEncoding.DecodeString(info.Md5Hash)
@@ -1013,6 +1059,12 @@ func (o *Object) setMetaData(info *storage.Object) {
} else {
o.modTime = modTime
}
// If gunzipping then size and md5sum are unknown
if o.gzipped && o.fs.opt.Decompress {
o.bytes = -1
o.md5sum = ""
}
}
// readObjectInfo reads the definition for an object
@@ -1113,6 +1165,18 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
return nil, err
}
fs.FixRangeOption(options, o.bytes)
if o.gzipped && !o.fs.opt.Decompress {
// Allow files which are stored on the cloud storage system
// compressed to be downloaded without being decompressed. Note
// that setting this here overrides the automatic decompression
// in the Transport.
//
// See: https://cloud.google.com/storage/docs/transcoding
req.Header.Set("Accept-Encoding", "gzip")
o.fs.warnCompressed.Do(func() {
fs.Logf(o, "Not decompressing 'Content-Encoding: gzip' compressed file. Use --gcs-decompress to override")
})
}
fs.OpenOptionAddHTTPHeaders(req.Header, options)
var res *http.Response
err = o.fs.pacer.Call(func() (bool, error) {

View File

@@ -1,3 +1,4 @@
// Package api provides types used by the Google Photos API.
package api
import (

View File

@@ -178,7 +178,7 @@ type Fs struct {
opt Options // parsed options
features *fs.Features // optional features
unAuth *rest.Client // unauthenticated http client
srv *rest.Client // the connection to the one drive server
srv *rest.Client // the connection to the server
ts *oauthutil.TokenSource // token source for oauth2
pacer *fs.Pacer // To pace the API calls
startTime time.Time // time Fs was started - used for datestamps
@@ -562,7 +562,7 @@ func (f *Fs) list(ctx context.Context, filter api.SearchFilter, fn listFn) (err
for i := range items {
item := &result.MediaItems[i]
remote := item.Filename
remote = strings.Replace(remote, "/", "", -1)
remote = strings.ReplaceAll(remote, "/", "")
err = fn(remote, item, false)
if err != nil {
return err
@@ -661,7 +661,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// Put the object into the bucket
//
// Copy the reader in to the new object which is returned
// Copy the reader in to the new object which is returned.
//
// The new object may have been created if an error is returned
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {

View File

@@ -3,7 +3,7 @@ package googlephotos
import (
"context"
"fmt"
"io/ioutil"
"io"
"net/http"
"path"
"testing"
@@ -12,7 +12,6 @@ import (
_ "github.com/rclone/rclone/backend/local"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/lib/random"
"github.com/stretchr/testify/assert"
@@ -37,7 +36,7 @@ func TestIntegration(t *testing.T) {
}
f, err := fs.NewFs(ctx, *fstest.RemoteName)
if err == fs.ErrorNotFoundInConfigFile {
t.Skip(fmt.Sprintf("Couldn't create google photos backend - skipping tests: %v", err))
t.Skipf("Couldn't create google photos backend - skipping tests: %v", err)
}
require.NoError(t, err)
@@ -56,7 +55,7 @@ func TestIntegration(t *testing.T) {
require.NoError(t, err)
in, err := srcObj.Open(ctx)
require.NoError(t, err)
dstObj, err := f.Put(ctx, in, operations.NewOverrideRemote(srcObj, remote))
dstObj, err := f.Put(ctx, in, fs.NewOverrideRemote(srcObj, remote))
require.NoError(t, err)
assert.Equal(t, remote, dstObj.Remote())
_ = in.Close()
@@ -99,7 +98,7 @@ func TestIntegration(t *testing.T) {
t.Run("ObjectOpen", func(t *testing.T) {
in, err := dstObj.Open(ctx)
require.NoError(t, err)
buf, err := ioutil.ReadAll(in)
buf, err := io.ReadAll(in)
require.NoError(t, err)
require.NoError(t, in.Close())
assert.True(t, len(buf) > 1000)
@@ -221,7 +220,7 @@ func TestIntegration(t *testing.T) {
require.NoError(t, err)
in, err := srcObj.Open(ctx)
require.NoError(t, err)
dstObj, err := f.Put(ctx, in, operations.NewOverrideRemote(srcObj, remote))
dstObj, err := f.Put(ctx, in, fs.NewOverrideRemote(srcObj, remote))
require.NoError(t, err)
assert.Equal(t, remote, dstObj.Remote())
_ = in.Close()

View File

@@ -315,7 +315,7 @@ func yearMonthDayFilter(ctx context.Context, f lister, match []string) (sf api.S
// featureFilter creates a filter for the Feature enum
//
// The API only supports one feature, FAVORITES, so hardcode that feature
// The API only supports one feature, FAVORITES, so hardcode that feature.
//
// https://developers.google.com/photos/library/reference/rest/v1/mediaItems/search#FeatureFilter
func featureFilter(ctx context.Context, f lister, match []string) (sf api.SearchFilter) {

View File

@@ -50,7 +50,7 @@ func (f *testLister) listAlbums(ctx context.Context, shared bool) (all *albums,
// mock listUploads for testing
func (f *testLister) listUploads(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
entries, _ = f.uploaded[dir]
entries = f.uploaded[dir]
return entries, nil
}

View File

@@ -161,7 +161,7 @@ func (f *Fs) dbImport(ctx context.Context, hashName, sumRemote string, sticky bo
if err := o.putHashes(ctx, hashMap{hashType: hash}); err != nil {
fs.Errorf(nil, "%s: failed to import: %v", remote, err)
}
accounting.Stats(ctx).NewCheckingTransfer(obj).Done(ctx, err)
accounting.Stats(ctx).NewCheckingTransfer(obj, "importing").Done(ctx, err)
doneCount++
}
})

View File

@@ -27,6 +27,9 @@ func init() {
Name: "hasher",
Description: "Better checksums for other remotes",
NewFs: NewFs,
MetadataInfo: &fs.MetadataInfo{
Help: `Any metadata supported by the underlying remote is read and written.`,
},
CommandHelp: commandHelp,
Options: []fs.Option{{
Name: "remote",
@@ -158,6 +161,11 @@ func NewFs(ctx context.Context, fsname, rpath string, cmap configmap.Mapper) (fs
IsLocal: true,
ReadMimeType: true,
WriteMimeType: true,
SetTier: true,
GetTier: true,
ReadMetadata: true,
WriteMetadata: true,
UserMetadata: true,
}
f.features = stubFeatures.Fill(ctx, f).Mask(ctx, f.Fs).WrapsFs(f, f.Fs)
@@ -282,7 +290,7 @@ func (f *Fs) CleanUp(ctx context.Context) error {
if do := f.Fs.Features().CleanUp; do != nil {
return do(ctx)
}
return errors.New("CleanUp not supported")
return errors.New("not supported by underlying remote")
}
// About gets quota information from the Fs
@@ -290,7 +298,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
if do := f.Fs.Features().About; do != nil {
return do(ctx)
}
return nil, errors.New("About not supported")
return nil, errors.New("not supported by underlying remote")
}
// ChangeNotify calls the passed function with a path that has had changes.
@@ -485,6 +493,17 @@ func (o *Object) MimeType(ctx context.Context) string {
return ""
}
// Metadata returns metadata for an object
//
// It should return nil if there is no Metadata
func (o *Object) Metadata(ctx context.Context) (fs.Metadata, error) {
do, ok := o.Object.(fs.Metadataer)
if !ok {
return nil, nil
}
return do.Metadata(ctx)
}
// Check the interfaces are satisfied
var (
_ fs.Fs = (*Fs)(nil)
@@ -507,10 +526,5 @@ var (
_ fs.UserInfoer = (*Fs)(nil)
_ fs.Disconnecter = (*Fs)(nil)
_ fs.Shutdowner = (*Fs)(nil)
_ fs.Object = (*Object)(nil)
_ fs.ObjectUnWrapper = (*Object)(nil)
_ fs.IDer = (*Object)(nil)
_ fs.SetTierer = (*Object)(nil)
_ fs.GetTierer = (*Object)(nil)
_ fs.MimeTyper = (*Object)(nil)
_ fs.FullObject = (*Object)(nil)
)

View File

@@ -19,7 +19,7 @@ import (
func putFile(ctx context.Context, t *testing.T, f fs.Fs, name, data string) fs.Object {
mtime1 := fstest.Time("2001-02-03T04:05:06.499999999Z")
item := fstest.Item{Path: name, ModTime: mtime1}
_, o := fstests.PutTestContents(ctx, t, f, &item, data, true)
o := fstests.PutTestContents(ctx, t, f, &item, data, true)
require.NotNil(t, o)
return o
}
@@ -35,7 +35,7 @@ func (f *Fs) testUploadFromCrypt(t *testing.T) {
// make a temporary crypt remote
ctx := context.Background()
pass := obscure.MustObscure("crypt")
remote := fmt.Sprintf(":crypt,remote=%s,password=%s:", tempRoot, pass)
remote := fmt.Sprintf(`:crypt,remote="%s",password="%s":`, tempRoot, pass)
cryptFs, err := fs.NewFs(ctx, remote)
require.NoError(t, err)

View File

@@ -33,6 +33,7 @@ func TestIntegration(t *testing.T) {
{Name: "TestHasher", Key: "remote", Value: tempDir},
}
opt.RemoteName = "TestHasher:"
opt.QuickTestOK = true
}
fstests.Run(t, &opt)
}

View File

@@ -5,7 +5,6 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"path"
"time"
@@ -118,7 +117,7 @@ func (o *Object) updateHashes(ctx context.Context) error {
defer func() {
_ = r.Close()
}()
if _, err = io.Copy(ioutil.Discard, r); err != nil {
if _, err = io.Copy(io.Discard, r); err != nil {
fs.Infof(o, "update failed (copy): %v", err)
return err
}

View File

@@ -92,7 +92,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
if opt.ServicePrincipalName != "" {
options.KerberosClient, err = getKerberosClient()
if err != nil {
return nil, fmt.Errorf("Problem with kerberos authentication: %s", err)
return nil, fmt.Errorf("problem with kerberos authentication: %w", err)
}
options.KerberosServicePrincipleName = opt.ServicePrincipalName
@@ -265,9 +265,9 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
// Move src to this remote using server-side move operations.
//
// This is stored with the remote path given
// This is stored with the remote path given.
//
// It returns the destination Object and a possible error
// It returns the destination Object and a possible error.
//
// Will only be called if src.Fs().Name() == f.Name()
//

View File

@@ -1,6 +1,7 @@
//go:build !plan9
// +build !plan9
// Package hdfs provides an interface to the HDFS storage system.
package hdfs
import (

View File

@@ -115,7 +115,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return err
}
info, err := o.fs.client.Stat(realpath)
_, err = o.fs.client.Stat(realpath)
if err == nil {
err = o.fs.client.Remove(realpath)
if err != nil {
@@ -147,7 +147,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return err
}
info, err = o.fs.client.Stat(realpath)
info, err := o.fs.client.Stat(realpath)
if err != nil {
return err
}

View File

@@ -0,0 +1,81 @@
package api
import (
"encoding/json"
"net/url"
"path"
"strings"
"time"
)
// Some presets for different amounts of information that can be requested for fields;
// it is recommended to only request the information that is actually needed.
var (
HiDriveObjectNoMetadataFields = []string{"name", "type"}
HiDriveObjectWithMetadataFields = append(HiDriveObjectNoMetadataFields, "id", "size", "mtime", "chash")
HiDriveObjectWithDirectoryMetadataFields = append(HiDriveObjectWithMetadataFields, "nmembers")
DirectoryContentFields = []string{"nmembers"}
)
// QueryParameters represents the parameters passed to an API-call.
type QueryParameters struct {
url.Values
}
// NewQueryParameters initializes an instance of QueryParameters and
// returns a pointer to it.
func NewQueryParameters() *QueryParameters {
return &QueryParameters{url.Values{}}
}
// SetFileInDirectory sets the appropriate parameters
// to specify a path to a file in a directory.
// This is used by requests that work with paths for files that do not exist yet.
// (For example when creating a file).
// Most requests use the format produced by SetPath(...).
func (p *QueryParameters) SetFileInDirectory(filePath string) {
directory, file := path.Split(path.Clean(filePath))
p.Set("dir", path.Clean(directory))
p.Set("name", file)
// NOTE: It would be possible to switch to pid-based requests
// by modifying this function.
}
// SetPath sets the appropriate parameters to access the given path.
func (p *QueryParameters) SetPath(objectPath string) {
p.Set("path", path.Clean(objectPath))
// NOTE: It would be possible to switch to pid-based requests
// by modifying this function.
}
// SetTime sets the key to the time-value. It replaces any existing values.
func (p *QueryParameters) SetTime(key string, value time.Time) error {
valueAPI := Time(value)
valueBytes, err := json.Marshal(&valueAPI)
if err != nil {
return err
}
p.Set(key, string(valueBytes))
return nil
}
// AddList adds the given values as a list
// with each value separated by the separator.
// It appends to any existing values associated with key.
func (p *QueryParameters) AddList(key string, separator string, values ...string) {
original := p.Get(key)
p.Set(key, strings.Join(values, separator))
if original != "" {
p.Set(key, original+separator+p.Get(key))
}
}
// AddFields sets the appropriate parameter to access the given fields.
// The given fields will be appended to any other existing fields.
func (p *QueryParameters) AddFields(prefix string, fields ...string) {
modifiedFields := make([]string, len(fields))
for i, field := range fields {
modifiedFields[i] = prefix + field
}
p.AddList("fields", ",", modifiedFields...)
}

View File

@@ -0,0 +1,135 @@
// Package api has type definitions and code related to API-calls for the HiDrive-API.
package api
import (
"encoding/json"
"fmt"
"net/url"
"strconv"
"time"
)
// Time represents date and time information for the API.
type Time time.Time
// MarshalJSON turns Time into JSON (in Unix-time/UTC).
func (t *Time) MarshalJSON() ([]byte, error) {
secs := time.Time(*t).Unix()
return []byte(strconv.FormatInt(secs, 10)), nil
}
// UnmarshalJSON turns JSON into Time.
func (t *Time) UnmarshalJSON(data []byte) error {
secs, err := strconv.ParseInt(string(data), 10, 64)
if err != nil {
return err
}
*t = Time(time.Unix(secs, 0))
return nil
}
// Error is returned from the API when things go wrong.
type Error struct {
Code json.Number `json:"code"`
ContextInfo json.RawMessage
Message string `json:"msg"`
}
// Error returns a string for the error and satisfies the error interface.
func (e *Error) Error() string {
out := fmt.Sprintf("Error %q", e.Code.String())
if e.Message != "" {
out += ": " + e.Message
}
if e.ContextInfo != nil {
out += fmt.Sprintf(" (%+v)", e.ContextInfo)
}
return out
}
// Check Error satisfies the error interface.
var _ error = (*Error)(nil)
// possible types for HiDriveObject
const (
HiDriveObjectTypeDirectory = "dir"
HiDriveObjectTypeFile = "file"
HiDriveObjectTypeSymlink = "symlink"
)
// HiDriveObject describes a folder, a symlink or a file.
// Depending on the type and content, not all fields are present.
type HiDriveObject struct {
Type string `json:"type"`
ID string `json:"id"`
ParentID string `json:"parent_id"`
Name string `json:"name"`
Path string `json:"path"`
Size int64 `json:"size"`
MemberCount int64 `json:"nmembers"`
ModifiedAt Time `json:"mtime"`
ChangedAt Time `json:"ctime"`
MetaHash string `json:"mhash"`
MetaOnlyHash string `json:"mohash"`
NameHash string `json:"nhash"`
ContentHash string `json:"chash"`
IsTeamfolder bool `json:"teamfolder"`
Readable bool `json:"readable"`
Writable bool `json:"writable"`
Shareable bool `json:"shareable"`
MIMEType string `json:"mime_type"`
}
// ModTime returns the modification time of the HiDriveObject.
func (i *HiDriveObject) ModTime() time.Time {
t := time.Time(i.ModifiedAt)
if t.IsZero() {
t = time.Time(i.ChangedAt)
}
return t
}
// UnmarshalJSON turns JSON into HiDriveObject and
// introduces specific default-values where necessary.
func (i *HiDriveObject) UnmarshalJSON(data []byte) error {
type objectAlias HiDriveObject
defaultObject := objectAlias{
Size: -1,
MemberCount: -1,
}
err := json.Unmarshal(data, &defaultObject)
if err != nil {
return err
}
name, err := url.PathUnescape(defaultObject.Name)
if err == nil {
defaultObject.Name = name
}
*i = HiDriveObject(defaultObject)
return nil
}
// DirectoryContent describes the content of a directory.
type DirectoryContent struct {
TotalCount int64 `json:"nmembers"`
Entries []HiDriveObject `json:"members"`
}
// UnmarshalJSON turns JSON into DirectoryContent and
// introduces specific default-values where necessary.
func (d *DirectoryContent) UnmarshalJSON(data []byte) error {
type directoryContentAlias DirectoryContent
defaultDirectoryContent := directoryContentAlias{
TotalCount: -1,
}
err := json.Unmarshal(data, &defaultDirectoryContent)
if err != nil {
return err
}
*d = DirectoryContent(defaultDirectoryContent)
return nil
}

888
backend/hidrive/helpers.go Normal file
View File

@@ -0,0 +1,888 @@
package hidrive
// This file is for helper-functions which may provide more general and
// specialized functionality than the generic interfaces.
// There are two sections:
// 1. methods bound to Fs
// 2. other functions independent from Fs used throughout the package
// NOTE: Functions accessing paths expect any relative paths
// to be resolved prior to execution with resolvePath(...).
import (
"bytes"
"context"
"errors"
"io"
"net/http"
"path"
"strconv"
"sync"
"time"
"github.com/rclone/rclone/backend/hidrive/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/lib/ranges"
"github.com/rclone/rclone/lib/readers"
"github.com/rclone/rclone/lib/rest"
"golang.org/x/sync/errgroup"
"golang.org/x/sync/semaphore"
)
const (
// MaximumUploadBytes represents the maximum amount of bytes
// a single upload-operation will support.
MaximumUploadBytes = 2147483647 // = 2GiB - 1
// iterationChunkSize represents the chunk size used to iterate directory contents.
iterationChunkSize = 5000
)
var (
// retryErrorCodes is a slice of error codes that we will always retry.
retryErrorCodes = []int{
429, // Too Many Requests
500, // Internal Server Error
502, // Bad Gateway
503, // Service Unavailable
504, // Gateway Timeout
509, // Bandwidth Limit Exceeded
}
// ErrorFileExists is returned when a query tries to create a file
// that already exists.
ErrorFileExists = errors.New("destination file already exists")
)
// MemberType represents the possible types of entries a directory can contain.
type MemberType string
// possible values for MemberType
const (
AllMembers MemberType = "all"
NoMembers MemberType = "none"
DirectoryMembers MemberType = api.HiDriveObjectTypeDirectory
FileMembers MemberType = api.HiDriveObjectTypeFile
SymlinkMembers MemberType = api.HiDriveObjectTypeSymlink
)
// SortByField represents possible fields to sort entries of a directory by.
type SortByField string
// possible values for SortByField
const (
descendingSort string = "-"
SortByName SortByField = "name"
SortByModTime SortByField = "mtime"
SortByObjectType SortByField = "type"
SortBySize SortByField = "size"
SortByNameDescending SortByField = SortByField(descendingSort) + SortByName
SortByModTimeDescending SortByField = SortByField(descendingSort) + SortByModTime
SortByObjectTypeDescending SortByField = SortByField(descendingSort) + SortByObjectType
SortBySizeDescending SortByField = SortByField(descendingSort) + SortBySize
)
var (
// Unsorted disables sorting and can therefore not be combined with other values.
Unsorted = []SortByField{"none"}
// DefaultSorted does not specify how to sort and
// therefore implies the default sort order.
DefaultSorted = []SortByField{}
)
// CopyOrMoveOperationType represents the possible types of copy- and move-operations.
type CopyOrMoveOperationType int
// possible values for CopyOrMoveOperationType
const (
MoveOriginal CopyOrMoveOperationType = iota
CopyOriginal
CopyOriginalPreserveModTime
)
// OnExistAction represents possible actions the API should take,
// when a request tries to create a path that already exists.
type OnExistAction string
// possible values for OnExistAction
const (
// IgnoreOnExist instructs the API not to execute
// the request in case of a conflict, but to return an error.
IgnoreOnExist OnExistAction = "ignore"
// AutoNameOnExist instructs the API to automatically rename
// any conflicting request-objects.
AutoNameOnExist OnExistAction = "autoname"
// OverwriteOnExist instructs the API to overwrite any conflicting files.
// This can only be used, if the request operates on files directly.
// (For example when moving/copying a file.)
// For most requests this action will simply be ignored.
OverwriteOnExist OnExistAction = "overwrite"
)
// shouldRetry returns a boolean as to whether this resp and err deserve to be retried.
// It tries to expire/invalidate the token, if necessary.
// It returns the err as a convenience.
func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
if fserrors.ContextError(ctx, &err) {
return false, err
}
if resp != nil && (resp.StatusCode == 401 || isHTTPError(err, 401)) && len(resp.Header["Www-Authenticate"]) > 0 {
fs.Debugf(f, "Token might be invalid: %v", err)
if f.tokenRenewer != nil {
iErr := f.tokenRenewer.Expire()
if iErr == nil {
return true, err
}
}
}
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
}
// resolvePath resolves the given (relative) path and
// returns a path suitable for API-calls.
// This will consider the root-path of the fs and any needed prefixes.
//
// Any relative paths passed to functions that access these paths should
// be resolved with this first!
func (f *Fs) resolvePath(objectPath string) string {
resolved := path.Join(f.opt.RootPrefix, f.root, f.opt.Enc.FromStandardPath(objectPath))
return resolved
}
// iterateOverDirectory calls the given function callback
// on each item found in a given directory.
//
// If callback ever returns true then this exits early with found = true.
func (f *Fs) iterateOverDirectory(ctx context.Context, directory string, searchOnly MemberType, callback func(*api.HiDriveObject) bool, fields []string, sortBy []SortByField) (found bool, err error) {
parameters := api.NewQueryParameters()
parameters.SetPath(directory)
parameters.AddFields("members.", fields...)
parameters.AddFields("", api.DirectoryContentFields...)
parameters.Set("members", string(searchOnly))
for _, v := range sortBy {
// The explicit conversion is necessary for each element.
parameters.AddList("sort", ",", string(v))
}
opts := rest.Opts{
Method: "GET",
Path: "/dir",
Parameters: parameters.Values,
}
iterateContent := func(result *api.DirectoryContent, err error) (bool, error) {
if err != nil {
return false, err
}
for _, item := range result.Entries {
item.Name = f.opt.Enc.ToStandardName(item.Name)
if callback(&item) {
return true, nil
}
}
return false, nil
}
return f.paginateDirectoryAccess(ctx, &opts, iterationChunkSize, 0, iterateContent)
}
// paginateDirectoryAccess executes requests specified via ctx and opts
// which should produce api.DirectoryContent.
// This will paginate the requests using limit starting at the given offset.
//
// The given function callback is called on each api.DirectoryContent found
// along with any errors that occurred.
// If callback ever returns true then this exits early with found = true.
// If callback ever returns an error then this exits early with that error.
func (f *Fs) paginateDirectoryAccess(ctx context.Context, opts *rest.Opts, limit int64, offset int64, callback func(*api.DirectoryContent, error) (bool, error)) (found bool, err error) {
for {
opts.Parameters.Set("limit", strconv.FormatInt(offset, 10)+","+strconv.FormatInt(limit, 10))
var result api.DirectoryContent
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, opts, nil, &result)
return f.shouldRetry(ctx, resp, err)
})
found, err = callback(&result, err)
if found || err != nil {
return found, err
}
offset += int64(len(result.Entries))
if offset >= result.TotalCount || limit > int64(len(result.Entries)) {
break
}
}
return false, nil
}
// fetchMetadataForPath reads the metadata from the path.
func (f *Fs) fetchMetadataForPath(ctx context.Context, path string, fields []string) (*api.HiDriveObject, error) {
parameters := api.NewQueryParameters()
parameters.SetPath(path)
parameters.AddFields("", fields...)
opts := rest.Opts{
Method: "GET",
Path: "/meta",
Parameters: parameters.Values,
}
var result api.HiDriveObject
var resp *http.Response
var err error
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
return f.shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, err
}
return &result, nil
}
// copyOrMove copies or moves a directory or file
// from the source-path to the destination-path.
//
// The operation will only be successful
// if the parent-directory of the destination-path exists.
//
// NOTE: Use the explicit methods instead of directly invoking this method.
// (Those are: copyDirectory, moveDirectory, copyFile, moveFile.)
func (f *Fs) copyOrMove(ctx context.Context, isDirectory bool, operationType CopyOrMoveOperationType, source string, destination string, onExist OnExistAction) (*api.HiDriveObject, error) {
parameters := api.NewQueryParameters()
parameters.Set("src", source)
parameters.Set("dst", destination)
if onExist == AutoNameOnExist ||
(onExist == OverwriteOnExist && !isDirectory) {
parameters.Set("on_exist", string(onExist))
}
endpoint := "/"
if isDirectory {
endpoint += "dir"
} else {
endpoint += "file"
}
switch operationType {
case MoveOriginal:
endpoint += "/move"
case CopyOriginalPreserveModTime:
parameters.Set("preserve_mtime", strconv.FormatBool(true))
fallthrough
case CopyOriginal:
endpoint += "/copy"
}
opts := rest.Opts{
Method: "POST",
Path: endpoint,
Parameters: parameters.Values,
}
var result api.HiDriveObject
var resp *http.Response
var err error
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
return f.shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, err
}
return &result, nil
}
// copyDirectory moves the directory at the source-path to the destination-path and
// returns the resulting api-object if successful.
//
// The operation will only be successful
// if the parent-directory of the destination-path exists.
func (f *Fs) copyDirectory(ctx context.Context, source string, destination string, onExist OnExistAction) (*api.HiDriveObject, error) {
return f.copyOrMove(ctx, true, CopyOriginalPreserveModTime, source, destination, onExist)
}
// moveDirectory moves the directory at the source-path to the destination-path and
// returns the resulting api-object if successful.
//
// The operation will only be successful
// if the parent-directory of the destination-path exists.
func (f *Fs) moveDirectory(ctx context.Context, source string, destination string, onExist OnExistAction) (*api.HiDriveObject, error) {
return f.copyOrMove(ctx, true, MoveOriginal, source, destination, onExist)
}
// copyFile copies the file at the source-path to the destination-path and
// returns the resulting api-object if successful.
//
// The operation will only be successful
// if the parent-directory of the destination-path exists.
//
// NOTE: This operation will expand sparse areas in the content of the source-file
// to blocks of 0-bytes in the destination-file.
func (f *Fs) copyFile(ctx context.Context, source string, destination string, onExist OnExistAction) (*api.HiDriveObject, error) {
return f.copyOrMove(ctx, false, CopyOriginalPreserveModTime, source, destination, onExist)
}
// moveFile moves the file at the source-path to the destination-path and
// returns the resulting api-object if successful.
//
// The operation will only be successful
// if the parent-directory of the destination-path exists.
//
// NOTE: This operation may expand sparse areas in the content of the source-file
// to blocks of 0-bytes in the destination-file.
func (f *Fs) moveFile(ctx context.Context, source string, destination string, onExist OnExistAction) (*api.HiDriveObject, error) {
return f.copyOrMove(ctx, false, MoveOriginal, source, destination, onExist)
}
// createDirectory creates the directory at the given path and
// returns the resulting api-object if successful.
//
// The directory will only be created if its parent-directory exists.
// This returns fs.ErrorDirNotFound if the parent-directory is not found.
// This returns fs.ErrorDirExists if the directory already exists.
func (f *Fs) createDirectory(ctx context.Context, directory string, onExist OnExistAction) (*api.HiDriveObject, error) {
parameters := api.NewQueryParameters()
parameters.SetPath(directory)
if onExist == AutoNameOnExist {
parameters.Set("on_exist", string(onExist))
}
opts := rest.Opts{
Method: "POST",
Path: "/dir",
Parameters: parameters.Values,
}
var result api.HiDriveObject
var resp *http.Response
var err error
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
return f.shouldRetry(ctx, resp, err)
})
switch {
case err == nil:
return &result, nil
case isHTTPError(err, 404):
return nil, fs.ErrorDirNotFound
case isHTTPError(err, 409):
return nil, fs.ErrorDirExists
}
return nil, err
}
// createDirectories creates the directory at the given path
// along with any missing parent directories and
// returns the resulting api-object (of the created directory) if successful.
//
// This returns fs.ErrorDirExists if the directory already exists.
//
// If an error occurs while the parent directories are being created,
// any directories already created will NOT be deleted again.
func (f *Fs) createDirectories(ctx context.Context, directory string, onExist OnExistAction) (*api.HiDriveObject, error) {
result, err := f.createDirectory(ctx, directory, onExist)
if err == nil {
return result, nil
}
if err != fs.ErrorDirNotFound {
return nil, err
}
parentDirectory := path.Dir(directory)
_, err = f.createDirectories(ctx, parentDirectory, onExist)
if err != nil && err != fs.ErrorDirExists {
return nil, err
}
// NOTE: Ignoring fs.ErrorDirExists does no harm,
// since it does not mean the child directory cannot be created.
return f.createDirectory(ctx, directory, onExist)
}
// deleteDirectory deletes the directory at the given path.
//
// If recursive is false, the directory will only be deleted if it is empty.
// If recursive is true, the directory will be deleted regardless of its content.
// This returns fs.ErrorDirNotFound if the directory is not found.
// This returns fs.ErrorDirectoryNotEmpty if the directory is not empty and
// recursive is false.
func (f *Fs) deleteDirectory(ctx context.Context, directory string, recursive bool) error {
parameters := api.NewQueryParameters()
parameters.SetPath(directory)
parameters.Set("recursive", strconv.FormatBool(recursive))
opts := rest.Opts{
Method: "DELETE",
Path: "/dir",
Parameters: parameters.Values,
NoResponse: true,
}
var resp *http.Response
var err error
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.Call(ctx, &opts)
return f.shouldRetry(ctx, resp, err)
})
switch {
case isHTTPError(err, 404):
return fs.ErrorDirNotFound
case isHTTPError(err, 409):
return fs.ErrorDirectoryNotEmpty
}
return err
}
// deleteObject deletes the object/file at the given path.
//
// This returns fs.ErrorObjectNotFound if the object is not found.
func (f *Fs) deleteObject(ctx context.Context, path string) error {
parameters := api.NewQueryParameters()
parameters.SetPath(path)
opts := rest.Opts{
Method: "DELETE",
Path: "/file",
Parameters: parameters.Values,
NoResponse: true,
}
var resp *http.Response
var err error
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.Call(ctx, &opts)
return f.shouldRetry(ctx, resp, err)
})
if isHTTPError(err, 404) {
return fs.ErrorObjectNotFound
}
return err
}
// createFile creates a file at the given path
// with the content of the io.ReadSeeker.
// This guarantees that existing files will not be overwritten.
// The maximum size of the content is limited by MaximumUploadBytes.
// The io.ReadSeeker should be resettable by seeking to its start.
// If modTime is not the zero time instant,
// it will be set as the file's modification time after the operation.
//
// This returns fs.ErrorDirNotFound
// if the parent directory of the file is not found.
// This returns ErrorFileExists if a file already exists at the specified path.
func (f *Fs) createFile(ctx context.Context, path string, content io.ReadSeeker, modTime time.Time, onExist OnExistAction) (*api.HiDriveObject, error) {
parameters := api.NewQueryParameters()
parameters.SetFileInDirectory(path)
if onExist == AutoNameOnExist {
parameters.Set("on_exist", string(onExist))
}
var err error
if !modTime.IsZero() {
err = parameters.SetTime("mtime", modTime)
if err != nil {
return nil, err
}
}
opts := rest.Opts{
Method: "POST",
Path: "/file",
Body: content,
ContentType: "application/octet-stream",
Parameters: parameters.Values,
}
var result api.HiDriveObject
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
// Reset the reading index (in case this is a retry).
if _, err = content.Seek(0, io.SeekStart); err != nil {
return false, err
}
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
return f.shouldRetry(ctx, resp, err)
})
switch {
case err == nil:
return &result, nil
case isHTTPError(err, 404):
return nil, fs.ErrorDirNotFound
case isHTTPError(err, 409):
return nil, ErrorFileExists
}
return nil, err
}
// overwriteFile updates the content of the file at the given path
// with the content of the io.ReadSeeker.
// If the file does not exist it will be created.
// The maximum size of the content is limited by MaximumUploadBytes.
// The io.ReadSeeker should be resettable by seeking to its start.
// If modTime is not the zero time instant,
// it will be set as the file's modification time after the operation.
//
// This returns fs.ErrorDirNotFound
// if the parent directory of the file is not found.
func (f *Fs) overwriteFile(ctx context.Context, path string, content io.ReadSeeker, modTime time.Time) (*api.HiDriveObject, error) {
parameters := api.NewQueryParameters()
parameters.SetFileInDirectory(path)
var err error
if !modTime.IsZero() {
err = parameters.SetTime("mtime", modTime)
if err != nil {
return nil, err
}
}
opts := rest.Opts{
Method: "PUT",
Path: "/file",
Body: content,
ContentType: "application/octet-stream",
Parameters: parameters.Values,
}
var result api.HiDriveObject
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
// Reset the reading index (in case this is a retry).
if _, err = content.Seek(0, io.SeekStart); err != nil {
return false, err
}
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
return f.shouldRetry(ctx, resp, err)
})
switch {
case err == nil:
return &result, nil
case isHTTPError(err, 404):
return nil, fs.ErrorDirNotFound
}
return nil, err
}
// uploadFileChunked updates the content of the existing file at the given path
// with the content of the io.Reader.
// Returns the position of the last successfully written byte, stopping before the first failed write.
// If nothing was written this will be 0.
// Returns the resulting api-object if successful.
//
// Replaces the file contents by uploading multiple chunks of the given size in parallel.
// Therefore this can and be used to upload files of any size efficiently.
// The number of parallel transfers is limited by transferLimit which should larger than 0.
// If modTime is not the zero time instant,
// it will be set as the file's modification time after the operation.
//
// NOTE: This method uses updateFileChunked and may create sparse files,
// if the upload of a chunk fails unexpectedly.
// See note about sparse files in patchFile.
// If any of the uploads fail, the process will be aborted and
// the first error that occurred will be returned.
// This is not an atomic operation,
// therefore if the upload fails the file may be partially modified.
//
// This returns fs.ErrorObjectNotFound if the object is not found.
func (f *Fs) uploadFileChunked(ctx context.Context, path string, content io.Reader, modTime time.Time, chunkSize int, transferLimit int64) (okSize uint64, info *api.HiDriveObject, err error) {
okSize, err = f.updateFileChunked(ctx, path, content, 0, chunkSize, transferLimit)
if err == nil {
info, err = f.resizeFile(ctx, path, okSize, modTime)
}
return okSize, info, err
}
// updateFileChunked updates the content of the existing file at the given path
// starting at the given offset.
// Returns the position of the last successfully written byte, stopping before the first failed write.
// If nothing was written this will be 0.
//
// Replaces the file contents starting from the given byte offset
// with the content of the io.Reader.
// If the offset is beyond the file end, the file is extended up to the offset.
//
// The upload is done multiple chunks of the given size in parallel.
// Therefore this can and be used to upload files of any size efficiently.
// The number of parallel transfers is limited by transferLimit which should larger than 0.
//
// NOTE: Because it is inefficient to set the modification time with every chunk,
// setting it to a specific value must be done in a separate request
// after this operation finishes.
//
// NOTE: This method uses patchFile and may create sparse files,
// especially if the upload of a chunk fails unexpectedly.
// See note about sparse files in patchFile.
// If any of the uploads fail, the process will be aborted and
// the first error that occurred will be returned.
// This is not an atomic operation,
// therefore if the upload fails the file may be partially modified.
//
// This returns fs.ErrorObjectNotFound if the object is not found.
func (f *Fs) updateFileChunked(ctx context.Context, path string, content io.Reader, offset uint64, chunkSize int, transferLimit int64) (okSize uint64, err error) {
var (
okChunksMu sync.Mutex // protects the variables below
okChunks []ranges.Range
)
g, gCtx := errgroup.WithContext(ctx)
transferSemaphore := semaphore.NewWeighted(transferLimit)
var readErr error
startMoreTransfers := true
zeroTime := time.Time{}
for chunk := uint64(0); startMoreTransfers; chunk++ {
// Acquire semaphore to limit number of transfers in parallel.
readErr = transferSemaphore.Acquire(gCtx, 1)
if readErr != nil {
break
}
// Read a chunk of data.
chunkReader, bytesRead, readErr := readerForChunk(content, chunkSize)
if bytesRead < chunkSize {
startMoreTransfers = false
}
if readErr != nil || bytesRead <= 0 {
break
}
// Transfer the chunk.
chunkOffset := uint64(chunkSize)*chunk + offset
g.Go(func() error {
// After this upload is done,
// signal that another transfer can be started.
defer transferSemaphore.Release(1)
uploadErr := f.patchFile(gCtx, path, cachedReader(chunkReader), chunkOffset, zeroTime)
if uploadErr == nil {
// Remember successfully written chunks.
okChunksMu.Lock()
okChunks = append(okChunks, ranges.Range{Pos: int64(chunkOffset), Size: int64(bytesRead)})
okChunksMu.Unlock()
fs.Debugf(f, "Done uploading chunk of size %v at offset %v.", bytesRead, chunkOffset)
} else {
fs.Infof(f, "Error while uploading chunk at offset %v. Error is %v.", chunkOffset, uploadErr)
}
return uploadErr
})
}
if readErr != nil {
// Log the error in case it is later ignored because of an upload-error.
fs.Infof(f, "Error while reading/preparing to upload a chunk. Error is %v.", readErr)
}
err = g.Wait()
// Compute the first continuous range of the file content,
// which does not contain any failed chunks.
// Do not forget to add the file content up to the starting offset,
// which is presumed to be already correct.
rs := ranges.Ranges{}
rs.Insert(ranges.Range{Pos: 0, Size: int64(offset)})
for _, chunkRange := range okChunks {
rs.Insert(chunkRange)
}
if len(rs) > 0 && rs[0].Pos == 0 {
okSize = uint64(rs[0].Size)
}
if err != nil {
return okSize, err
}
if readErr != nil {
return okSize, readErr
}
return okSize, nil
}
// patchFile updates the content of the existing file at the given path
// starting at the given offset.
//
// Replaces the file contents starting from the given byte offset
// with the content of the io.ReadSeeker.
// If the offset is beyond the file end, the file is extended up to the offset.
// The maximum size of the update is limited by MaximumUploadBytes.
// The io.ReadSeeker should be resettable by seeking to its start.
// If modTime is not the zero time instant,
// it will be set as the file's modification time after the operation.
//
// NOTE: By extending the file up to the offset this may create sparse files,
// which allocate less space on the file system than their apparent size indicates,
// since holes between data chunks are "real" holes
// and not regions made up of consecutive 0-bytes.
// Subsequent operations (such as copying data)
// usually expand the holes into regions of 0-bytes.
//
// This returns fs.ErrorObjectNotFound if the object is not found.
func (f *Fs) patchFile(ctx context.Context, path string, content io.ReadSeeker, offset uint64, modTime time.Time) error {
parameters := api.NewQueryParameters()
parameters.SetPath(path)
parameters.Set("offset", strconv.FormatUint(offset, 10))
if !modTime.IsZero() {
err := parameters.SetTime("mtime", modTime)
if err != nil {
return err
}
}
opts := rest.Opts{
Method: "PATCH",
Path: "/file",
Body: content,
ContentType: "application/octet-stream",
Parameters: parameters.Values,
NoResponse: true,
}
var resp *http.Response
var err error
err = f.pacer.Call(func() (bool, error) {
// Reset the reading index (in case this is a retry).
_, err = content.Seek(0, io.SeekStart)
if err != nil {
return false, err
}
resp, err = f.srv.Call(ctx, &opts)
if isHTTPError(err, 423) {
return true, err
}
return f.shouldRetry(ctx, resp, err)
})
if isHTTPError(err, 404) {
return fs.ErrorObjectNotFound
}
return err
}
// resizeFile updates the existing file at the given path to be of the given size
// and returns the resulting api-object if successful.
//
// If the given size is smaller than the current filesize,
// the file is cut/truncated at that position.
// If the given size is larger, the file is extended up to that position.
// If modTime is not the zero time instant,
// it will be set as the file's modification time after the operation.
//
// NOTE: By extending the file this may create sparse files,
// which allocate less space on the file system than their apparent size indicates,
// since holes between data chunks are "real" holes
// and not regions made up of consecutive 0-bytes.
// Subsequent operations (such as copying data)
// usually expand the holes into regions of 0-bytes.
//
// This returns fs.ErrorObjectNotFound if the object is not found.
func (f *Fs) resizeFile(ctx context.Context, path string, size uint64, modTime time.Time) (*api.HiDriveObject, error) {
parameters := api.NewQueryParameters()
parameters.SetPath(path)
parameters.Set("size", strconv.FormatUint(size, 10))
if !modTime.IsZero() {
err := parameters.SetTime("mtime", modTime)
if err != nil {
return nil, err
}
}
opts := rest.Opts{
Method: "POST",
Path: "/file/truncate",
Parameters: parameters.Values,
}
var result api.HiDriveObject
var resp *http.Response
var err error
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
return f.shouldRetry(ctx, resp, err)
})
switch {
case err == nil:
return &result, nil
case isHTTPError(err, 404):
return nil, fs.ErrorObjectNotFound
}
return nil, err
}
// ------------------------------------------------------------
// isHTTPError compares the numerical status code
// of an api.Error to the given HTTP status.
//
// If the given error is not an api.Error or
// a numerical status code could not be determined, this returns false.
// Otherwise this returns whether the status code of the error is equal to the given status.
func isHTTPError(err error, status int64) bool {
if apiErr, ok := err.(*api.Error); ok {
errStatus, decodeErr := apiErr.Code.Int64()
if decodeErr == nil && errStatus == status {
return true
}
}
return false
}
// createHiDriveScopes creates oauth-scopes
// from the given user-role and access-permissions.
//
// If the arguments are empty, they will not be included in the result.
func createHiDriveScopes(role string, access string) []string {
switch {
case role != "" && access != "":
return []string{access + "," + role}
case role != "":
return []string{role}
case access != "":
return []string{access}
}
return []string{}
}
// cachedReader returns a version of the reader that caches its contents and
// can therefore be reset using Seek.
func cachedReader(reader io.Reader) io.ReadSeeker {
bytesReader, ok := reader.(*bytes.Reader)
if ok {
return bytesReader
}
repeatableReader, ok := reader.(*readers.RepeatableReader)
if ok {
return repeatableReader
}
return readers.NewRepeatableReader(reader)
}
// readerForChunk reads a chunk of bytes from reader (after handling any accounting).
// Returns a new io.Reader (chunkReader) for that chunk
// and the number of bytes that have been read from reader.
func readerForChunk(reader io.Reader, length int) (chunkReader io.Reader, bytesRead int, err error) {
// Unwrap any accounting from the input if present.
reader, wrap := accounting.UnWrap(reader)
// Read a chunk of data.
buffer := make([]byte, length)
bytesRead, err = io.ReadFull(reader, buffer)
if err == io.EOF || err == io.ErrUnexpectedEOF {
err = nil
}
if err != nil {
return nil, bytesRead, err
}
// Truncate unused capacity.
buffer = buffer[:bytesRead]
// Use wrap to put any accounting back for chunkReader.
return wrap(bytes.NewReader(buffer)), bytesRead, nil
}

1002
backend/hidrive/hidrive.go Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,45 @@
// Test HiDrive filesystem interface
package hidrive
import (
"testing"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote.
func TestIntegration(t *testing.T) {
name := "TestHiDrive"
fstests.Run(t, &fstests.Opt{
RemoteName: name + ":",
NilObject: (*Object)(nil),
ChunkedUpload: fstests.ChunkedUploadConfig{
MinChunkSize: 1,
MaxChunkSize: MaximumUploadBytes,
CeilChunkSize: nil,
NeedMultipleChunks: false,
},
})
}
// Change the configured UploadChunkSize.
// Will only be called while no transfer is in progress.
func (f *Fs) SetUploadChunkSize(chunksize fs.SizeSuffix) (fs.SizeSuffix, error) {
var old fs.SizeSuffix
old, f.opt.UploadChunkSize = f.opt.UploadChunkSize, chunksize
return old, nil
}
// Change the configured UploadCutoff.
// Will only be called while no transfer is in progress.
func (f *Fs) SetUploadCutoff(cutoff fs.SizeSuffix) (fs.SizeSuffix, error) {
var old fs.SizeSuffix
old, f.opt.UploadCutoff = f.opt.UploadCutoff, cutoff
return old, nil
}
var (
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
_ fstests.SetUploadCutoffer = (*Fs)(nil)
)

View File

@@ -0,0 +1,410 @@
// Package hidrivehash implements the HiDrive hashing algorithm which combines SHA-1 hashes hierarchically to a single top-level hash.
//
// Note: This implementation does not grant access to any partial hashes generated.
//
// See: https://developer.hidrive.com/wp-content/uploads/2021/07/HiDrive_Synchronization-v3.3-rev28.pdf
// (link to newest version: https://static.hidrive.com/dev/0001)
package hidrivehash
import (
"bytes"
"crypto/sha1"
"encoding"
"encoding/binary"
"errors"
"fmt"
"hash"
"io"
"github.com/rclone/rclone/backend/hidrive/hidrivehash/internal"
)
const (
// BlockSize of the checksum in bytes.
BlockSize = 4096
// Size of the checksum in bytes.
Size = sha1.Size
// sumsPerLevel is the number of checksums
sumsPerLevel = 256
)
var (
// zeroSum is a special hash consisting of 20 null-bytes.
// This will be the hash of any empty file (or ones containing only null-bytes).
zeroSum = [Size]byte{}
// ErrorInvalidEncoding is returned when a hash should be decoded from a binary form that is invalid.
ErrorInvalidEncoding = errors.New("encoded binary form is invalid for this hash")
// ErrorHashFull is returned when a hash reached its capacity and cannot accept any more input.
ErrorHashFull = errors.New("hash reached its capacity")
)
// writeByBlock writes len(p) bytes from p to the io.Writer in blocks of size blockSize.
// It returns the number of bytes written from p (0 <= n <= len(p))
// and any error encountered that caused the write to stop early.
//
// A pointer bytesInBlock to a counter needs to be supplied,
// that is used to keep track how many bytes have been written to the writer already.
// A pointer onlyNullBytesInBlock to a boolean needs to be supplied,
// that is used to keep track whether the block so far only consists of null-bytes.
// The callback onBlockWritten is called whenever a full block has been written to the writer
// and is given as input the number of bytes that still need to be written.
func writeByBlock(p []byte, writer io.Writer, blockSize uint32, bytesInBlock *uint32, onlyNullBytesInBlock *bool, onBlockWritten func(remaining int) error) (n int, err error) {
total := len(p)
nullBytes := make([]byte, blockSize)
for len(p) > 0 {
toWrite := int(blockSize - *bytesInBlock)
if toWrite > len(p) {
toWrite = len(p)
}
c, err := writer.Write(p[:toWrite])
*bytesInBlock += uint32(c)
*onlyNullBytesInBlock = *onlyNullBytesInBlock && bytes.Equal(nullBytes[:toWrite], p[:toWrite])
// Discard data written through a reslice
p = p[c:]
if err != nil {
return total - len(p), err
}
if *bytesInBlock == blockSize {
err = onBlockWritten(len(p))
if err != nil {
return total - len(p), err
}
*bytesInBlock = 0
*onlyNullBytesInBlock = true
}
}
return total, nil
}
// level is a hash.Hash that is used to aggregate the checksums produced by the level hierarchically beneath it.
// It is used to represent any level-n hash, except for level-0.
type level struct {
checksum [Size]byte // aggregated checksum of this level
sumCount uint32 // number of sums contained in this level so far
bytesInHasher uint32 // number of bytes written into hasher so far
onlyNullBytesInHasher bool // whether the hasher only contains null-bytes so far
hasher hash.Hash
}
// NewLevel returns a new hash.Hash computing any level-n hash, except level-0.
func NewLevel() hash.Hash {
l := &level{}
l.Reset()
return l
}
// Add takes a position-embedded SHA-1 checksum and adds it to the level.
func (l *level) Add(sha1sum []byte) {
var tmp uint
var carry bool
for i := Size - 1; i >= 0; i-- {
tmp = uint(sha1sum[i]) + uint(l.checksum[i])
if carry {
tmp++
}
carry = tmp > 255
l.checksum[i] = byte(tmp)
}
}
// IsFull returns whether the number of checksums added to this level reached its capacity.
func (l *level) IsFull() bool {
return l.sumCount >= sumsPerLevel
}
// Write (via the embedded io.Writer interface) adds more data to the running hash.
// Contrary to the specification from hash.Hash, this DOES return an error,
// specifically ErrorHashFull if and only if IsFull() returns true.
func (l *level) Write(p []byte) (n int, err error) {
if l.IsFull() {
return 0, ErrorHashFull
}
onBlockWritten := func(remaining int) error {
if !l.onlyNullBytesInHasher {
c, err := l.hasher.Write([]byte{byte(l.sumCount)})
l.bytesInHasher += uint32(c)
if err != nil {
return err
}
l.Add(l.hasher.Sum(nil))
}
l.sumCount++
l.hasher.Reset()
if remaining > 0 && l.IsFull() {
return ErrorHashFull
}
return nil
}
return writeByBlock(p, l.hasher, uint32(l.BlockSize()), &l.bytesInHasher, &l.onlyNullBytesInHasher, onBlockWritten)
}
// Sum appends the current hash to b and returns the resulting slice.
// It does not change the underlying hash state.
func (l *level) Sum(b []byte) []byte {
return append(b, l.checksum[:]...)
}
// Reset resets the Hash to its initial state.
func (l *level) Reset() {
l.checksum = zeroSum // clear the current checksum
l.sumCount = 0
l.bytesInHasher = 0
l.onlyNullBytesInHasher = true
l.hasher = sha1.New()
}
// Size returns the number of bytes Sum will return.
func (l *level) Size() int {
return Size
}
// BlockSize returns the hash's underlying block size.
// The Write method must be able to accept any amount
// of data, but it may operate more efficiently if all writes
// are a multiple of the block size.
func (l *level) BlockSize() int {
return Size
}
// MarshalBinary encodes the hash into a binary form and returns the result.
func (l *level) MarshalBinary() ([]byte, error) {
b := make([]byte, Size+4+4+1)
copy(b, l.checksum[:])
binary.BigEndian.PutUint32(b[Size:], l.sumCount)
binary.BigEndian.PutUint32(b[Size+4:], l.bytesInHasher)
if l.onlyNullBytesInHasher {
b[Size+4+4] = 1
}
encodedHasher, err := l.hasher.(encoding.BinaryMarshaler).MarshalBinary()
if err != nil {
return nil, err
}
b = append(b, encodedHasher...)
return b, nil
}
// UnmarshalBinary decodes the binary form generated by MarshalBinary.
// The hash will replace its internal state accordingly.
func (l *level) UnmarshalBinary(b []byte) error {
if len(b) < Size+4+4+1 {
return ErrorInvalidEncoding
}
copy(l.checksum[:], b)
l.sumCount = binary.BigEndian.Uint32(b[Size:])
l.bytesInHasher = binary.BigEndian.Uint32(b[Size+4:])
switch b[Size+4+4] {
case 0:
l.onlyNullBytesInHasher = false
case 1:
l.onlyNullBytesInHasher = true
default:
return ErrorInvalidEncoding
}
err := l.hasher.(encoding.BinaryUnmarshaler).UnmarshalBinary(b[Size+4+4+1:])
return err
}
// hidriveHash is the hash computing the actual checksum used by HiDrive by combining multiple level-hashes.
type hidriveHash struct {
levels []*level // collection of level-hashes, one for each level starting at level-1
lastSumWritten [Size]byte // the last checksum written to any of the levels
bytesInBlock uint32 // bytes written into blockHash so far
onlyNullBytesInBlock bool // whether the hasher only contains null-bytes so far
blockHash hash.Hash
}
// New returns a new hash.Hash computing the HiDrive checksum.
func New() hash.Hash {
h := &hidriveHash{}
h.Reset()
return h
}
// aggregateToLevel writes the checksum to the level at the given index
// and if necessary propagates any changes to levels above.
func (h *hidriveHash) aggregateToLevel(index int, sum []byte) {
for i := index; ; i++ {
if i >= len(h.levels) {
h.levels = append(h.levels, NewLevel().(*level))
}
_, err := h.levels[i].Write(sum)
copy(h.lastSumWritten[:], sum)
if err != nil {
panic(fmt.Errorf("level-hash should not have produced an error: %w", err))
}
if !h.levels[i].IsFull() {
break
}
sum = h.levels[i].Sum(nil)
h.levels[i].Reset()
}
}
// Write (via the embedded io.Writer interface) adds more data to the running hash.
// It never returns an error.
func (h *hidriveHash) Write(p []byte) (n int, err error) {
onBlockWritten := func(remaining int) error {
var sum []byte
if h.onlyNullBytesInBlock {
sum = zeroSum[:]
} else {
sum = h.blockHash.Sum(nil)
}
h.blockHash.Reset()
h.aggregateToLevel(0, sum)
return nil
}
return writeByBlock(p, h.blockHash, uint32(BlockSize), &h.bytesInBlock, &h.onlyNullBytesInBlock, onBlockWritten)
}
// Sum appends the current hash to b and returns the resulting slice.
// It does not change the underlying hash state.
func (h *hidriveHash) Sum(b []byte) []byte {
// Save internal state.
state, err := h.MarshalBinary()
if err != nil {
panic(fmt.Errorf("saving the internal state should not have produced an error: %w", err))
}
if h.bytesInBlock > 0 {
// Fill remainder of block with null-bytes.
filler := make([]byte, h.BlockSize()-int(h.bytesInBlock))
_, err = h.Write(filler)
if err != nil {
panic(fmt.Errorf("filling with null-bytes should not have an error: %w", err))
}
}
checksum := zeroSum
for i := 0; i < len(h.levels); i++ {
level := h.levels[i]
if i < len(h.levels)-1 {
// Aggregate non-empty non-final levels.
if level.sumCount >= 1 {
h.aggregateToLevel(i+1, level.Sum(nil))
level.Reset()
}
} else {
// Determine sum of final level.
if level.sumCount > 1 {
copy(checksum[:], level.Sum(nil))
} else {
// This is needed, otherwise there is no way to return
// the non-position-embedded checksum.
checksum = h.lastSumWritten
}
}
}
// Restore internal state.
err = h.UnmarshalBinary(state)
if err != nil {
panic(fmt.Errorf("restoring the internal state should not have produced an error: %w", err))
}
return append(b, checksum[:]...)
}
// Reset resets the Hash to its initial state.
func (h *hidriveHash) Reset() {
h.levels = nil
h.lastSumWritten = zeroSum // clear the last written checksum
h.bytesInBlock = 0
h.onlyNullBytesInBlock = true
h.blockHash = sha1.New()
}
// Size returns the number of bytes Sum will return.
func (h *hidriveHash) Size() int {
return Size
}
// BlockSize returns the hash's underlying block size.
// The Write method must be able to accept any amount
// of data, but it may operate more efficiently if all writes
// are a multiple of the block size.
func (h *hidriveHash) BlockSize() int {
return BlockSize
}
// MarshalBinary encodes the hash into a binary form and returns the result.
func (h *hidriveHash) MarshalBinary() ([]byte, error) {
b := make([]byte, Size+4+1+8)
copy(b, h.lastSumWritten[:])
binary.BigEndian.PutUint32(b[Size:], h.bytesInBlock)
if h.onlyNullBytesInBlock {
b[Size+4] = 1
}
binary.BigEndian.PutUint64(b[Size+4+1:], uint64(len(h.levels)))
for _, level := range h.levels {
encodedLevel, err := level.MarshalBinary()
if err != nil {
return nil, err
}
encodedLength := make([]byte, 8)
binary.BigEndian.PutUint64(encodedLength, uint64(len(encodedLevel)))
b = append(b, encodedLength...)
b = append(b, encodedLevel...)
}
encodedBlockHash, err := h.blockHash.(encoding.BinaryMarshaler).MarshalBinary()
if err != nil {
return nil, err
}
b = append(b, encodedBlockHash...)
return b, nil
}
// UnmarshalBinary decodes the binary form generated by MarshalBinary.
// The hash will replace its internal state accordingly.
func (h *hidriveHash) UnmarshalBinary(b []byte) error {
if len(b) < Size+4+1+8 {
return ErrorInvalidEncoding
}
copy(h.lastSumWritten[:], b)
h.bytesInBlock = binary.BigEndian.Uint32(b[Size:])
switch b[Size+4] {
case 0:
h.onlyNullBytesInBlock = false
case 1:
h.onlyNullBytesInBlock = true
default:
return ErrorInvalidEncoding
}
amount := binary.BigEndian.Uint64(b[Size+4+1:])
h.levels = make([]*level, int(amount))
offset := Size + 4 + 1 + 8
for i := range h.levels {
length := int(binary.BigEndian.Uint64(b[offset:]))
offset += 8
h.levels[i] = NewLevel().(*level)
err := h.levels[i].UnmarshalBinary(b[offset : offset+length])
if err != nil {
return err
}
offset += length
}
err := h.blockHash.(encoding.BinaryUnmarshaler).UnmarshalBinary(b[offset:])
return err
}
// Sum returns the HiDrive checksum of the data.
func Sum(data []byte) [Size]byte {
h := New().(*hidriveHash)
_, _ = h.Write(data)
var result [Size]byte
copy(result[:], h.Sum(nil))
return result
}
// Check the interfaces are satisfied.
var (
_ hash.Hash = (*level)(nil)
_ encoding.BinaryMarshaler = (*level)(nil)
_ encoding.BinaryUnmarshaler = (*level)(nil)
_ internal.LevelHash = (*level)(nil)
_ hash.Hash = (*hidriveHash)(nil)
_ encoding.BinaryMarshaler = (*hidriveHash)(nil)
_ encoding.BinaryUnmarshaler = (*hidriveHash)(nil)
)

View File

@@ -0,0 +1,395 @@
package hidrivehash_test
import (
"crypto/sha1"
"encoding"
"encoding/hex"
"fmt"
"io"
"testing"
"github.com/rclone/rclone/backend/hidrive/hidrivehash"
"github.com/rclone/rclone/backend/hidrive/hidrivehash/internal"
"github.com/stretchr/testify/assert"
)
// helper functions to set up test-tables
func sha1ArrayAsSlice(sum [sha1.Size]byte) []byte {
return sum[:]
}
func mustDecode(hexstring string) []byte {
result, err := hex.DecodeString(hexstring)
if err != nil {
panic(err)
}
return result
}
// ------------------------------------------------------------
var testTableLevelPositionEmbedded = []struct {
ins [][]byte
outs [][]byte
name string
}{
{
[][]byte{
sha1ArrayAsSlice([20]byte{245, 202, 195, 223, 121, 198, 189, 112, 138, 202, 222, 2, 146, 156, 127, 16, 208, 233, 98, 88}),
sha1ArrayAsSlice([20]byte{78, 188, 156, 219, 173, 54, 81, 55, 47, 220, 222, 207, 201, 21, 57, 252, 255, 239, 251, 186}),
},
[][]byte{
sha1ArrayAsSlice([20]byte{245, 202, 195, 223, 121, 198, 189, 112, 138, 202, 222, 2, 146, 156, 127, 16, 208, 233, 98, 88}),
sha1ArrayAsSlice([20]byte{68, 135, 96, 187, 38, 253, 14, 167, 186, 167, 188, 210, 91, 177, 185, 13, 208, 217, 94, 18}),
},
"documentation-v3.2rev27-example L0 (position-embedded)",
},
{
[][]byte{
sha1ArrayAsSlice([20]byte{68, 254, 92, 166, 52, 37, 104, 180, 22, 123, 249, 144, 182, 78, 64, 74, 57, 117, 225, 195}),
sha1ArrayAsSlice([20]byte{75, 211, 153, 190, 125, 179, 67, 49, 60, 149, 98, 246, 142, 20, 11, 254, 159, 162, 129, 237}),
sha1ArrayAsSlice([20]byte{150, 2, 9, 153, 97, 153, 189, 104, 147, 14, 77, 203, 244, 243, 25, 212, 67, 48, 111, 107}),
},
[][]byte{
sha1ArrayAsSlice([20]byte{68, 254, 92, 166, 52, 37, 104, 180, 22, 123, 249, 144, 182, 78, 64, 74, 57, 117, 225, 195}),
sha1ArrayAsSlice([20]byte{144, 209, 246, 100, 177, 216, 171, 229, 83, 17, 92, 135, 68, 98, 76, 72, 217, 24, 99, 176}),
sha1ArrayAsSlice([20]byte{38, 211, 255, 254, 19, 114, 105, 77, 230, 31, 170, 83, 57, 85, 102, 29, 28, 72, 211, 27}),
},
"documentation-example L0 (position-embedded)",
},
{
[][]byte{
sha1ArrayAsSlice([20]byte{173, 123, 132, 245, 176, 172, 43, 183, 121, 40, 66, 252, 101, 249, 188, 193, 160, 189, 2, 116}),
sha1ArrayAsSlice([20]byte{40, 34, 8, 238, 37, 5, 237, 184, 79, 105, 10, 167, 171, 254, 13, 229, 132, 112, 254, 8}),
sha1ArrayAsSlice([20]byte{39, 112, 26, 86, 190, 35, 100, 101, 28, 131, 122, 191, 254, 144, 239, 107, 253, 124, 104, 203}),
},
[][]byte{
sha1ArrayAsSlice([20]byte{173, 123, 132, 245, 176, 172, 43, 183, 121, 40, 66, 252, 101, 249, 188, 193, 160, 189, 2, 116}),
sha1ArrayAsSlice([20]byte{213, 157, 141, 227, 213, 178, 25, 111, 200, 145, 77, 164, 17, 247, 202, 167, 37, 46, 0, 124}),
sha1ArrayAsSlice([20]byte{253, 13, 168, 58, 147, 213, 125, 212, 229, 20, 200, 100, 16, 136, 186, 19, 34, 170, 105, 71}),
},
"documentation-example L1 (position-embedded)",
},
}
var testTableLevel = []struct {
ins [][]byte
outs [][]byte
name string
}{
{
[][]byte{
mustDecode("09f077820a8a41f34a639f2172f1133b1eafe4e6"),
mustDecode("09f077820a8a41f34a639f2172f1133b1eafe4e6"),
mustDecode("09f077820a8a41f34a639f2172f1133b1eafe4e6"),
},
[][]byte{
mustDecode("44fe5ca6342568b4167bf990b64e404a3975e1c3"),
mustDecode("90d1f664b1d8abe553115c8744624c48d91863b0"),
mustDecode("26d3fffe1372694de61faa533955661d1c48d31b"),
},
"documentation-example L0",
},
{
[][]byte{
mustDecode("75a9f88fb219ef1dd31adf41c93e2efaac8d0245"),
mustDecode("daedc425199501b1e86b5eaba5649cbde205e6ae"),
mustDecode("286ac5283f99c4e0f11683900a3e39661c375dd6"),
},
[][]byte{
mustDecode("ad7b84f5b0ac2bb7792842fc65f9bcc1a0bd0274"),
mustDecode("d59d8de3d5b2196fc8914da411f7caa7252e007c"),
mustDecode("fd0da83a93d57dd4e514c8641088ba1322aa6947"),
},
"documentation-example L1",
},
{
[][]byte{
mustDecode("0000000000000000000000000000000000000000"),
mustDecode("0000000000000000000000000000000000000000"),
mustDecode("75a9f88fb219ef1dd31adf41c93e2efaac8d0245"),
mustDecode("0000000000000000000000000000000000000000"),
mustDecode("daedc425199501b1e86b5eaba5649cbde205e6ae"),
mustDecode("0000000000000000000000000000000000000000"),
mustDecode("0000000000000000000000000000000000000000"),
mustDecode("0000000000000000000000000000000000000000"),
mustDecode("286ac5283f99c4e0f11683900a3e39661c375dd6"),
mustDecode("0000000000000000000000000000000000000000"),
},
[][]byte{
mustDecode("0000000000000000000000000000000000000000"),
mustDecode("0000000000000000000000000000000000000000"),
mustDecode("a197464ec19f2b2b2bc6b21f6c939c7e57772843"),
mustDecode("a197464ec19f2b2b2bc6b21f6c939c7e57772843"),
mustDecode("b04769357aa4eb4b52cd5bec6935bc8f977fa3a1"),
mustDecode("b04769357aa4eb4b52cd5bec6935bc8f977fa3a1"),
mustDecode("b04769357aa4eb4b52cd5bec6935bc8f977fa3a1"),
mustDecode("b04769357aa4eb4b52cd5bec6935bc8f977fa3a1"),
mustDecode("8f56351897b4e1d100646fa122c924347721b2f5"),
mustDecode("8f56351897b4e1d100646fa122c924347721b2f5"),
},
"mixed-with-empties",
},
}
var testTable = []struct {
data []byte
// pattern describes how to use data to construct the hash-input.
// For every entry n at even indices this repeats the data n times.
// For every entry m at odd indices this repeats a null-byte m times.
// The input-data is constructed by concatenating the results in order.
pattern []int64
out []byte
name string
}{
{
[]byte("#ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789abcdefghijklmnopqrstuvwxyz\n"),
[]int64{64},
mustDecode("09f077820a8a41f34a639f2172f1133b1eafe4e6"),
"documentation-example L0",
},
{
[]byte("#ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789abcdefghijklmnopqrstuvwxyz\n"),
[]int64{64 * 256},
mustDecode("75a9f88fb219ef1dd31adf41c93e2efaac8d0245"),
"documentation-example L1",
},
{
[]byte("#ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789abcdefghijklmnopqrstuvwxyz\n"),
[]int64{64 * 256, 0, 64 * 128, 4096 * 128, 64*2 + 32},
mustDecode("fd0da83a93d57dd4e514c8641088ba1322aa6947"),
"documentation-example L2",
},
{
[]byte("hello rclone\n"),
[]int64{316},
mustDecode("72370f9c18a2c20b31d71f3f4cee7a3cd2703737"),
"not-block-aligned",
},
{
[]byte("hello rclone\n"),
[]int64{13, 4096 * 3, 4},
mustDecode("a6990b81791f0d2db750b38f046df321c975aa60"),
"not-block-aligned-with-null-bytes",
},
{
[]byte{},
[]int64{},
mustDecode("0000000000000000000000000000000000000000"),
"empty",
},
{
[]byte{},
[]int64{0, 4096 * 256 * 256},
mustDecode("0000000000000000000000000000000000000000"),
"null-bytes",
},
}
// ------------------------------------------------------------
func TestLevelAdd(t *testing.T) {
for _, test := range testTableLevelPositionEmbedded {
l := hidrivehash.NewLevel().(internal.LevelHash)
t.Run(test.name, func(t *testing.T) {
for i := range test.ins {
l.Add(test.ins[i])
assert.Equal(t, test.outs[i], l.Sum(nil))
}
})
}
}
func TestLevelWrite(t *testing.T) {
for _, test := range testTableLevel {
l := hidrivehash.NewLevel()
t.Run(test.name, func(t *testing.T) {
for i := range test.ins {
l.Write(test.ins[i])
assert.Equal(t, test.outs[i], l.Sum(nil))
}
})
}
}
func TestLevelIsFull(t *testing.T) {
content := [hidrivehash.Size]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19}
l := hidrivehash.NewLevel()
for i := 0; i < 256; i++ {
assert.False(t, l.(internal.LevelHash).IsFull())
written, err := l.Write(content[:])
assert.Equal(t, len(content), written)
if !assert.NoError(t, err) {
t.FailNow()
}
}
assert.True(t, l.(internal.LevelHash).IsFull())
written, err := l.Write(content[:])
assert.True(t, l.(internal.LevelHash).IsFull())
assert.Equal(t, 0, written)
assert.ErrorIs(t, err, hidrivehash.ErrorHashFull)
}
func TestLevelReset(t *testing.T) {
l := hidrivehash.NewLevel()
zeroHash := l.Sum(nil)
_, err := l.Write([]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19})
if assert.NoError(t, err) {
assert.NotEqual(t, zeroHash, l.Sum(nil))
l.Reset()
assert.Equal(t, zeroHash, l.Sum(nil))
}
}
func TestLevelSize(t *testing.T) {
l := hidrivehash.NewLevel()
assert.Equal(t, 20, l.Size())
}
func TestLevelBlockSize(t *testing.T) {
l := hidrivehash.NewLevel()
assert.Equal(t, 20, l.BlockSize())
}
func TestLevelBinaryMarshaler(t *testing.T) {
content := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19}
l := hidrivehash.NewLevel().(internal.LevelHash)
l.Write(content[:10])
encoded, err := l.MarshalBinary()
if assert.NoError(t, err) {
d := hidrivehash.NewLevel().(internal.LevelHash)
err = d.UnmarshalBinary(encoded)
if assert.NoError(t, err) {
assert.Equal(t, l.Sum(nil), d.Sum(nil))
l.Write(content[10:])
d.Write(content[10:])
assert.Equal(t, l.Sum(nil), d.Sum(nil))
}
}
}
func TestLevelInvalidEncoding(t *testing.T) {
l := hidrivehash.NewLevel().(internal.LevelHash)
err := l.UnmarshalBinary([]byte{})
assert.ErrorIs(t, err, hidrivehash.ErrorInvalidEncoding)
}
// ------------------------------------------------------------
type infiniteReader struct {
source []byte
offset int
}
func (m *infiniteReader) Read(b []byte) (int, error) {
count := copy(b, m.source[m.offset:])
m.offset += count
m.offset %= len(m.source)
return count, nil
}
func writeInChunks(writer io.Writer, chunkSize int64, data []byte, pattern []int64) error {
readers := make([]io.Reader, len(pattern))
nullBytes := [4096]byte{}
for i, n := range pattern {
if i%2 == 0 {
readers[i] = io.LimitReader(&infiniteReader{data, 0}, n*int64(len(data)))
} else {
readers[i] = io.LimitReader(&infiniteReader{nullBytes[:], 0}, n)
}
}
reader := io.MultiReader(readers...)
for {
_, err := io.CopyN(writer, reader, chunkSize)
if err != nil {
if err == io.EOF {
err = nil
}
return err
}
}
}
func TestWrite(t *testing.T) {
for _, test := range testTable {
t.Run(test.name, func(t *testing.T) {
h := hidrivehash.New()
err := writeInChunks(h, int64(h.BlockSize()), test.data, test.pattern)
if assert.NoError(t, err) {
normalSum := h.Sum(nil)
assert.Equal(t, test.out, normalSum)
// Test if different block-sizes produce differing results.
for _, blockSize := range []int64{397, 512, 4091, 8192, 10000} {
t.Run(fmt.Sprintf("block-size %v", blockSize), func(t *testing.T) {
h := hidrivehash.New()
err := writeInChunks(h, blockSize, test.data, test.pattern)
if assert.NoError(t, err) {
assert.Equal(t, normalSum, h.Sum(nil))
}
})
}
}
})
}
}
func TestReset(t *testing.T) {
h := hidrivehash.New()
zeroHash := h.Sum(nil)
_, err := h.Write([]byte{1})
if assert.NoError(t, err) {
assert.NotEqual(t, zeroHash, h.Sum(nil))
h.Reset()
assert.Equal(t, zeroHash, h.Sum(nil))
}
}
func TestSize(t *testing.T) {
h := hidrivehash.New()
assert.Equal(t, 20, h.Size())
}
func TestBlockSize(t *testing.T) {
h := hidrivehash.New()
assert.Equal(t, 4096, h.BlockSize())
}
func TestBinaryMarshaler(t *testing.T) {
for _, test := range testTable {
h := hidrivehash.New()
d := hidrivehash.New()
half := len(test.pattern) / 2
t.Run(test.name, func(t *testing.T) {
err := writeInChunks(h, int64(h.BlockSize()), test.data, test.pattern[:half])
assert.NoError(t, err)
encoded, err := h.(encoding.BinaryMarshaler).MarshalBinary()
if assert.NoError(t, err) {
err = d.(encoding.BinaryUnmarshaler).UnmarshalBinary(encoded)
if assert.NoError(t, err) {
assert.Equal(t, h.Sum(nil), d.Sum(nil))
err = writeInChunks(h, int64(h.BlockSize()), test.data, test.pattern[half:])
assert.NoError(t, err)
err = writeInChunks(d, int64(d.BlockSize()), test.data, test.pattern[half:])
assert.NoError(t, err)
assert.Equal(t, h.Sum(nil), d.Sum(nil))
}
}
})
}
}
func TestInvalidEncoding(t *testing.T) {
h := hidrivehash.New()
err := h.(encoding.BinaryUnmarshaler).UnmarshalBinary([]byte{})
assert.ErrorIs(t, err, hidrivehash.ErrorInvalidEncoding)
}
func TestSum(t *testing.T) {
assert.Equal(t, [hidrivehash.Size]byte{}, hidrivehash.Sum([]byte{}))
content := []byte{1}
h := hidrivehash.New()
h.Write(content)
sum := hidrivehash.Sum(content)
assert.Equal(t, h.Sum(nil), sum[:])
}

View File

@@ -0,0 +1,18 @@
// Package internal provides utilities for HiDrive.
package internal
import (
"encoding"
"hash"
)
// LevelHash is an internal interface for level-hashes.
type LevelHash interface {
encoding.BinaryMarshaler
encoding.BinaryUnmarshaler
hash.Hash
// Add takes a position-embedded checksum and adds it to the level.
Add(sum []byte)
// IsFull returns whether the number of checksums added to this level reached its capacity.
IsFull() bool
}

View File

@@ -13,7 +13,6 @@ import (
"net/http"
"net/url"
"path"
"strconv"
"strings"
"sync"
"time"
@@ -35,11 +34,11 @@ var (
func init() {
fsi := &fs.RegInfo{
Name: "http",
Description: "http Connection",
Description: "HTTP",
NewFs: NewFs,
Options: []fs.Option{{
Name: "url",
Help: "URL of http host to connect to.\n\nE.g. \"https://example.com\", or \"https://user:pass@example.com\" to use a username and password.",
Help: "URL of HTTP host to connect to.\n\nE.g. \"https://example.com\", or \"https://user:pass@example.com\" to use a username and password.",
Required: true,
}, {
Name: "headers",
@@ -305,7 +304,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
fs: f,
remote: remote,
}
err := o.stat(ctx)
err := o.head(ctx)
if err != nil {
return nil, err
}
@@ -317,15 +316,6 @@ func (f *Fs) url(remote string) string {
return f.endpointURL + rest.URLPathEscape(remote)
}
// parse s into an int64, on failure return def
func parseInt64(s string, def int64) int64 {
n, e := strconv.ParseInt(s, 10, 64)
if e != nil {
return def
}
return n
}
// Errors returned by parseName
var (
errURLJoinFailed = errors.New("URLJoin failed")
@@ -500,7 +490,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
fs: f,
remote: remote,
}
switch err := file.stat(ctx); err {
switch err := file.head(ctx); err {
case nil:
add(file)
case fs.ErrorNotAFile:
@@ -579,8 +569,8 @@ func (o *Object) url() string {
return o.fs.url(o.remote)
}
// stat updates the info field in the Object
func (o *Object) stat(ctx context.Context) error {
// head sends a HEAD request to update info fields in the Object
func (o *Object) head(ctx context.Context) error {
if o.fs.opt.NoHead {
o.size = -1
o.modTime = timeUnset
@@ -601,13 +591,19 @@ func (o *Object) stat(ctx context.Context) error {
if err != nil {
return fmt.Errorf("failed to stat: %w", err)
}
return o.decodeMetadata(ctx, res)
}
// decodeMetadata updates info fields in the Object according to HTTP response headers
func (o *Object) decodeMetadata(ctx context.Context, res *http.Response) error {
t, err := http.ParseTime(res.Header.Get("Last-Modified"))
if err != nil {
t = timeUnset
}
o.size = parseInt64(res.Header.Get("Content-Length"), -1)
o.modTime = t
o.contentType = res.Header.Get("Content-Type")
o.size = rest.ParseSizeFromHeaders(res.Header)
// If NoSlash is set then check ContentType to see if it is a directory
if o.fs.opt.NoSlash {
mediaType, _, err := mime.ParseMediaType(o.contentType)
@@ -653,6 +649,9 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
if err != nil {
return nil, fmt.Errorf("Open failed: %w", err)
}
if err = o.decodeMetadata(ctx, res); err != nil {
return nil, fmt.Errorf("decodeMetadata failed: %w", err)
}
return res.Body, nil
}

View File

@@ -3,7 +3,7 @@ package http
import (
"context"
"fmt"
"io/ioutil"
"io"
"net/http"
"net/http/httptest"
"net/url"
@@ -33,20 +33,21 @@ var (
lineEndSize = 1
)
// prepareServer the test server and return a function to tidy it up afterwards
func prepareServer(t *testing.T) (configmap.Simple, func()) {
// prepareServer prepares the test server and shuts it down automatically
// when the test completes.
func prepareServer(t *testing.T) configmap.Simple {
// file server for test/files
fileServer := http.FileServer(http.Dir(filesPath))
// verify the file path is correct, and also check which line endings
// are used to get sizes right ("\n" except on Windows, but even there
// we may have "\n" or "\r\n" depending on git crlf setting)
fileList, err := ioutil.ReadDir(filesPath)
fileList, err := os.ReadDir(filesPath)
require.NoError(t, err)
require.Greater(t, len(fileList), 0)
for _, file := range fileList {
if !file.IsDir() {
data, _ := ioutil.ReadFile(filepath.Join(filesPath, file.Name()))
data, _ := os.ReadFile(filepath.Join(filesPath, file.Name()))
if strings.HasSuffix(string(data), "\r\n") {
lineEndSize = 2
}
@@ -78,20 +79,21 @@ func prepareServer(t *testing.T) (configmap.Simple, func()) {
"url": ts.URL,
"headers": strings.Join(headers, ","),
}
t.Cleanup(ts.Close)
// return a function to tidy up
return m, ts.Close
return m
}
// prepare the test server and return a function to tidy it up afterwards
func prepare(t *testing.T) (fs.Fs, func()) {
m, tidy := prepareServer(t)
// prepare prepares the test server and shuts it down automatically
// when the test completes.
func prepare(t *testing.T) fs.Fs {
m := prepareServer(t)
// Instantiate it
f, err := NewFs(context.Background(), remoteName, "", m)
require.NoError(t, err)
return f, tidy
return f
}
func testListRoot(t *testing.T, f fs.Fs, noSlash bool) {
@@ -134,22 +136,19 @@ func testListRoot(t *testing.T, f fs.Fs, noSlash bool) {
}
func TestListRoot(t *testing.T) {
f, tidy := prepare(t)
defer tidy()
f := prepare(t)
testListRoot(t, f, false)
}
func TestListRootNoSlash(t *testing.T) {
f, tidy := prepare(t)
f := prepare(t)
f.(*Fs).opt.NoSlash = true
defer tidy()
testListRoot(t, f, true)
}
func TestListSubDir(t *testing.T) {
f, tidy := prepare(t)
defer tidy()
f := prepare(t)
entries, err := f.List(context.Background(), "three")
require.NoError(t, err)
@@ -166,8 +165,7 @@ func TestListSubDir(t *testing.T) {
}
func TestNewObject(t *testing.T) {
f, tidy := prepare(t)
defer tidy()
f := prepare(t)
o, err := f.NewObject(context.Background(), "four/under four.txt")
require.NoError(t, err)
@@ -194,36 +192,69 @@ func TestNewObject(t *testing.T) {
}
func TestOpen(t *testing.T) {
f, tidy := prepare(t)
defer tidy()
m := prepareServer(t)
o, err := f.NewObject(context.Background(), "four/under four.txt")
require.NoError(t, err)
for _, head := range []bool{false, true} {
if !head {
m.Set("no_head", "true")
}
f, err := NewFs(context.Background(), remoteName, "", m)
require.NoError(t, err)
// Test normal read
fd, err := o.Open(context.Background())
require.NoError(t, err)
data, err := ioutil.ReadAll(fd)
require.NoError(t, err)
require.NoError(t, fd.Close())
if lineEndSize == 2 {
assert.Equal(t, "beetroot\r\n", string(data))
} else {
assert.Equal(t, "beetroot\n", string(data))
for _, rangeRead := range []bool{false, true} {
o, err := f.NewObject(context.Background(), "four/under four.txt")
require.NoError(t, err)
if !head {
// Test mod time is still indeterminate
tObj := o.ModTime(context.Background())
assert.Equal(t, time.Duration(0), time.Unix(0, 0).Sub(tObj))
// Test file size is still indeterminate
assert.Equal(t, int64(-1), o.Size())
}
var data []byte
if !rangeRead {
// Test normal read
fd, err := o.Open(context.Background())
require.NoError(t, err)
data, err = io.ReadAll(fd)
require.NoError(t, err)
require.NoError(t, fd.Close())
if lineEndSize == 2 {
assert.Equal(t, "beetroot\r\n", string(data))
} else {
assert.Equal(t, "beetroot\n", string(data))
}
} else {
// Test with range request
fd, err := o.Open(context.Background(), &fs.RangeOption{Start: 1, End: 5})
require.NoError(t, err)
data, err = io.ReadAll(fd)
require.NoError(t, err)
require.NoError(t, fd.Close())
assert.Equal(t, "eetro", string(data))
}
fi, err := os.Stat(filepath.Join(filesPath, "four", "under four.txt"))
require.NoError(t, err)
tFile := fi.ModTime()
// Test the time is always correct on the object after file open
tObj := o.ModTime(context.Background())
fstest.AssertTimeEqualWithPrecision(t, o.Remote(), tFile, tObj, time.Second)
if !rangeRead {
// Test the file size
assert.Equal(t, int64(len(data)), o.Size())
}
}
}
// Test with range request
fd, err = o.Open(context.Background(), &fs.RangeOption{Start: 1, End: 5})
require.NoError(t, err)
data, err = ioutil.ReadAll(fd)
require.NoError(t, err)
require.NoError(t, fd.Close())
assert.Equal(t, "eetro", string(data))
}
func TestMimeType(t *testing.T) {
f, tidy := prepare(t)
defer tidy()
f := prepare(t)
o, err := f.NewObject(context.Background(), "four/under four.txt")
require.NoError(t, err)
@@ -234,8 +265,7 @@ func TestMimeType(t *testing.T) {
}
func TestIsAFileRoot(t *testing.T) {
m, tidy := prepareServer(t)
defer tidy()
m := prepareServer(t)
f, err := NewFs(context.Background(), remoteName, "one%.txt", m)
assert.Equal(t, err, fs.ErrorIsFile)
@@ -244,8 +274,7 @@ func TestIsAFileRoot(t *testing.T) {
}
func TestIsAFileSubDir(t *testing.T) {
m, tidy := prepareServer(t)
defer tidy()
m := prepareServer(t)
f, err := NewFs(context.Background(), remoteName, "three/underthree.txt", m)
assert.Equal(t, err, fs.ErrorIsFile)

View File

@@ -1,62 +0,0 @@
package hubic
import (
"context"
"net/http"
"time"
"github.com/ncw/swift/v2"
"github.com/rclone/rclone/fs"
)
// auth is an authenticator for swift
type auth struct {
f *Fs
}
// newAuth creates a swift authenticator
func newAuth(f *Fs) *auth {
return &auth{
f: f,
}
}
// Request constructs an http.Request for authentication
//
// returns nil for not needed
func (a *auth) Request(ctx context.Context, c *swift.Connection) (r *http.Request, err error) {
const retries = 10
for try := 1; try <= retries; try++ {
err = a.f.getCredentials(context.TODO())
if err == nil {
break
}
time.Sleep(100 * time.Millisecond)
fs.Debugf(a.f, "retrying auth request %d/%d: %v", try, retries, err)
}
return nil, err
}
// Response parses the result of an http request
func (a *auth) Response(ctx context.Context, resp *http.Response) error {
return nil
}
// The public storage URL - set Internal to true to read
// internal/service net URL
func (a *auth) StorageUrl(Internal bool) string { // nolint
return a.f.credentials.Endpoint
}
// The access token
func (a *auth) Token() string {
return a.f.credentials.Token
}
// The CDN url if available
func (a *auth) CdnUrl() string { // nolint
return ""
}
// Check the interfaces are satisfied
var _ swift.Authenticator = (*auth)(nil)

View File

@@ -1,200 +0,0 @@
// Package hubic provides an interface to the Hubic object storage
// system.
package hubic
// This uses the normal swift mechanism to update the credentials and
// ignores the expires field returned by the Hubic API. This may need
// to be revisited after some actual experience.
import (
"context"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net/http"
"strings"
"time"
swiftLib "github.com/ncw/swift/v2"
"github.com/rclone/rclone/backend/swift"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/lib/oauthutil"
"golang.org/x/oauth2"
)
const (
rcloneClientID = "api_hubic_svWP970PvSWbw5G3PzrAqZ6X2uHeZBPI"
rcloneEncryptedClientSecret = "leZKCcqy9movLhDWLVXX8cSLp_FzoiAPeEJOIOMRw1A5RuC4iLEPDYPWVF46adC_MVonnLdVEOTHVstfBOZ_lY4WNp8CK_YWlpRZ9diT5YI"
)
// Globals
var (
// Description of how to auth for this app
oauthConfig = &oauth2.Config{
Scopes: []string{
"credentials.r", // Read OpenStack credentials
},
Endpoint: oauth2.Endpoint{
AuthURL: "https://api.hubic.com/oauth/auth/",
TokenURL: "https://api.hubic.com/oauth/token/",
},
ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.RedirectLocalhostURL,
}
)
// Register with Fs
func init() {
fs.Register(&fs.RegInfo{
Name: "hubic",
Description: "Hubic",
NewFs: NewFs,
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
return oauthutil.ConfigOut("", &oauthutil.Options{
OAuth2Config: oauthConfig,
})
},
Options: append(oauthutil.SharedOptions, swift.SharedOptions...),
})
}
// credentials is the JSON returned from the Hubic API to read the
// OpenStack credentials
type credentials struct {
Token string `json:"token"` // OpenStack token
Endpoint string `json:"endpoint"` // OpenStack endpoint
Expires string `json:"expires"` // Expires date - e.g. "2015-11-09T14:24:56+01:00"
}
// Fs represents a remote hubic
type Fs struct {
fs.Fs // wrapped Fs
features *fs.Features // optional features
client *http.Client // client for oauth api
credentials credentials // returned from the Hubic API
expires time.Time // time credentials expire
}
// Object describes a swift object
type Object struct {
*swift.Object
}
// Return a string version
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.Object.String()
}
// ------------------------------------------------------------
// String converts this Fs to a string
func (f *Fs) String() string {
if f.Fs == nil {
return "Hubic"
}
return fmt.Sprintf("Hubic %s", f.Fs.String())
}
// getCredentials reads the OpenStack Credentials using the Hubic API
//
// The credentials are read into the Fs
func (f *Fs) getCredentials(ctx context.Context) (err error) {
req, err := http.NewRequestWithContext(ctx, "GET", "https://api.hubic.com/1.0/account/credentials", nil)
if err != nil {
return err
}
resp, err := f.client.Do(req)
if err != nil {
return err
}
defer fs.CheckClose(resp.Body, &err)
if resp.StatusCode < 200 || resp.StatusCode > 299 {
body, _ := ioutil.ReadAll(resp.Body)
bodyStr := strings.TrimSpace(strings.Replace(string(body), "\n", " ", -1))
return fmt.Errorf("failed to get credentials: %s: %s", resp.Status, bodyStr)
}
decoder := json.NewDecoder(resp.Body)
var result credentials
err = decoder.Decode(&result)
if err != nil {
return err
}
// fs.Debugf(f, "Got credentials %+v", result)
if result.Token == "" || result.Endpoint == "" || result.Expires == "" {
return errors.New("couldn't read token, result and expired from credentials")
}
f.credentials = result
expires, err := time.Parse(time.RFC3339, result.Expires)
if err != nil {
return err
}
f.expires = expires
fs.Debugf(f, "Got swift credentials (expiry %v in %v)", f.expires, f.expires.Sub(time.Now()))
return nil
}
// NewFs constructs an Fs from the path, container:path
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
client, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
if err != nil {
return nil, fmt.Errorf("failed to configure Hubic: %w", err)
}
f := &Fs{
client: client,
}
// Make the swift Connection
ci := fs.GetConfig(ctx)
c := &swiftLib.Connection{
Auth: newAuth(f),
ConnectTimeout: 10 * ci.ConnectTimeout, // Use the timeouts in the transport
Timeout: 10 * ci.Timeout, // Use the timeouts in the transport
Transport: fshttp.NewTransport(ctx),
}
err = c.Authenticate(ctx)
if err != nil {
return nil, fmt.Errorf("error authenticating swift connection: %w", err)
}
// Parse config into swift.Options struct
opt := new(swift.Options)
err = configstruct.Set(m, opt)
if err != nil {
return nil, err
}
// Make inner swift Fs from the connection
swiftFs, err := swift.NewFsWithConnection(ctx, opt, name, root, c, true)
if err != nil && err != fs.ErrorIsFile {
return nil, err
}
f.Fs = swiftFs
f.features = f.Fs.Features().Wrap(f)
return f, err
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// UnWrap returns the Fs that this Fs is wrapping
func (f *Fs) UnWrap() fs.Fs {
return f.Fs
}
// Check the interfaces are satisfied
var (
_ fs.Fs = (*Fs)(nil)
_ fs.UnWrapper = (*Fs)(nil)
)

View File

@@ -1,19 +0,0 @@
// Test Hubic filesystem interface
package hubic_test
import (
"testing"
"github.com/rclone/rclone/backend/hubic"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestHubic:",
NilObject: (*hubic.Object)(nil),
SkipFsCheckWrap: true,
SkipObjectCheckWrap: true,
})
}

View File

@@ -28,6 +28,7 @@ import (
"github.com/rclone/rclone/lib/bucket"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/random"
"github.com/rclone/rclone/lib/rest"
)
@@ -37,6 +38,100 @@ func init() {
Name: "internetarchive",
Description: "Internet Archive",
NewFs: NewFs,
MetadataInfo: &fs.MetadataInfo{
System: map[string]fs.MetadataHelp{
"name": {
Help: "Full file path, without the bucket part",
Type: "filename",
Example: "backend/internetarchive/internetarchive.go",
ReadOnly: true,
},
"source": {
Help: "The source of the file",
Type: "string",
Example: "original",
ReadOnly: true,
},
"mtime": {
Help: "Time of last modification, managed by Rclone",
Type: "RFC 3339",
Example: "2006-01-02T15:04:05.999999999Z",
ReadOnly: true,
},
"size": {
Help: "File size in bytes",
Type: "decimal number",
Example: "123456",
ReadOnly: true,
},
"md5": {
Help: "MD5 hash calculated by Internet Archive",
Type: "string",
Example: "01234567012345670123456701234567",
ReadOnly: true,
},
"crc32": {
Help: "CRC32 calculated by Internet Archive",
Type: "string",
Example: "01234567",
ReadOnly: true,
},
"sha1": {
Help: "SHA1 hash calculated by Internet Archive",
Type: "string",
Example: "0123456701234567012345670123456701234567",
ReadOnly: true,
},
"format": {
Help: "Name of format identified by Internet Archive",
Type: "string",
Example: "Comma-Separated Values",
ReadOnly: true,
},
"old_version": {
Help: "Whether the file was replaced and moved by keep-old-version flag",
Type: "boolean",
Example: "true",
ReadOnly: true,
},
"viruscheck": {
Help: "The last time viruscheck process was run for the file (?)",
Type: "unixtime",
Example: "1654191352",
ReadOnly: true,
},
"summation": {
Help: "Check https://forum.rclone.org/t/31922 for how it is used",
Type: "string",
Example: "md5",
ReadOnly: true,
},
"rclone-ia-mtime": {
Help: "Time of last modification, managed by Internet Archive",
Type: "RFC 3339",
Example: "2006-01-02T15:04:05.999999999Z",
},
"rclone-mtime": {
Help: "Time of last modification, managed by Rclone",
Type: "RFC 3339",
Example: "2006-01-02T15:04:05.999999999Z",
},
"rclone-update-track": {
Help: "Random value used by Rclone for tracking changes inside Internet Archive",
Type: "string",
Example: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
},
},
Help: `Metadata fields provided by Internet Archive.
If there are multiple values for a key, only the first one is returned.
This is a limitation of Rclone, that supports one value per one key.
Owner is able to add custom keys. Metadata feature grabs all the keys including them.
`,
},
Options: []fs.Option{{
Name: "access_key_id",
Help: "IAS3 Access Key.\n\nLeave blank for anonymous access.\nYou can find one here: https://archive.org/account/s3.php",
@@ -89,6 +184,14 @@ Only enable if you need to be guaranteed to be reflected after write operations.
// maximum size of an item. this is constant across all items
const iaItemMaxSize int64 = 1099511627776
// metadata keys that are not writeable
var roMetadataKey = map[string]interface{}{
// do not add mtime here, it's a documented exception
"name": nil, "source": nil, "size": nil, "md5": nil,
"crc32": nil, "sha1": nil, "format": nil, "old_version": nil,
"viruscheck": nil, "summation": nil,
}
// Options defines the configuration for this backend
type Options struct {
AccessKeyID string `config:"access_key_id"`
@@ -121,26 +224,37 @@ type Object struct {
md5 string // md5 hash of the file presented by the server
sha1 string // sha1 hash of the file presented by the server
crc32 string // crc32 of the file presented by the server
rawData json.RawMessage
}
// IAFile reprensents a subset of object in MetadataResponse.Files
// IAFile represents a subset of object in MetadataResponse.Files
type IAFile struct {
Name string `json:"name"`
// Source string `json:"source"`
Mtime string `json:"mtime"`
RcloneMtime json.RawMessage `json:"rclone-mtime"`
UpdateTrack json.RawMessage `json:"rclone-update-track"`
Size string `json:"size"`
Md5 string `json:"md5"`
Crc32 string `json:"crc32"`
Sha1 string `json:"sha1"`
Summation string `json:"summation"`
rawData json.RawMessage
}
// MetadataResponse reprensents subset of the JSON object returned by (frontend)/metadata/
// MetadataResponse represents subset of the JSON object returned by (frontend)/metadata/
type MetadataResponse struct {
Files []IAFile `json:"files"`
ItemSize int64 `json:"item_size"`
}
// MetadataResponseRaw is the form of MetadataResponse to deal with metadata
type MetadataResponseRaw struct {
Files []json.RawMessage `json:"files"`
ItemSize int64 `json:"item_size"`
}
// ModMetadataResponse represents response for amending metadata
type ModMetadataResponse struct {
// https://archive.org/services/docs/api/md-write.html#example
@@ -224,7 +338,10 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
}
f.setRoot(root)
f.features = (&fs.Features{
BucketBased: true,
BucketBased: true,
ReadMetadata: true,
WriteMetadata: true,
UserMetadata: true,
}).Fill(ctx, f)
f.srv = rest.NewClient(fshttp.NewClient(ctx))
@@ -294,7 +411,7 @@ func (o *Object) Storable() bool {
return true
}
// SetModTime is not supported
// SetModTime sets modTime on a particular file
func (o *Object) SetModTime(ctx context.Context, t time.Time) (err error) {
bucket, reqDir := o.split()
if bucket == "" {
@@ -305,18 +422,17 @@ func (o *Object) SetModTime(ctx context.Context, t time.Time) (err error) {
}
// https://archive.org/services/docs/api/md-write.html
var patch = []interface{}{
// the following code might be useful for modifying metadata of an uploaded file
patch := []map[string]string{
// we should drop it first to clear all rclone-provided mtimes
struct {
Op string `json:"op"`
Path string `json:"path"`
}{"remove", "/rclone-mtime"},
struct {
Op string `json:"op"`
Path string `json:"path"`
Value string `json:"value"`
}{"add", "/rclone-mtime", t.Format(time.RFC3339Nano)},
}
{
"op": "remove",
"path": "/rclone-mtime",
}, {
"op": "add",
"path": "/rclone-mtime",
"value": t.Format(time.RFC3339Nano),
}}
res, err := json.Marshal(patch)
if err != nil {
return err
@@ -456,14 +572,14 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
return "", err
}
bucket, bucketPath := f.split(remote)
return path.Join(f.opt.FrontEndpoint, "/download/", bucket, bucketPath), nil
return path.Join(f.opt.FrontEndpoint, "/download/", bucket, quotePath(bucketPath)), nil
}
// Copy src to this remote using server-side copy operations.
//
// This is stored with the remote path given
// This is stored with the remote path given.
//
// It returns the destination Object and a possible error
// It returns the destination Object and a possible error.
//
// Will only be called if src.Fs().Name() == f.Name()
//
@@ -483,6 +599,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (_ fs.Objec
return nil, fs.ErrorCantCopy
}
updateTracker := random.String(32)
headers := map[string]string{
"x-archive-auto-make-bucket": "1",
"x-archive-queue-derive": "0",
@@ -494,7 +611,8 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (_ fs.Objec
"x-archive-filemeta-crc32": srcObj.crc32,
"x-archive-filemeta-size": fmt.Sprint(srcObj.size),
// add this too for sure
"x-archive-filemeta-rclone-mtime": srcObj.modTime.Format(time.RFC3339Nano),
"x-archive-filemeta-rclone-mtime": srcObj.modTime.Format(time.RFC3339Nano),
"x-archive-filemeta-rclone-update-track": updateTracker,
}
// make a PUT request at (IAS3)/:item/:path without body
@@ -515,7 +633,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (_ fs.Objec
// we can't update/find metadata here as IA will also
// queue server-side copy as well as upload/delete.
return f.waitFileUpload(ctx, trimPathPrefix(path.Join(dstBucket, dstPath), f.root, f.opt.Enc), f.getHashes(ctx, src), srcObj.size)
return f.waitFileUpload(ctx, trimPathPrefix(path.Join(dstBucket, dstPath), f.root, f.opt.Enc), updateTracker, srcObj.size)
}
// ListR lists the objects and directories of the Fs starting
@@ -642,7 +760,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
// make a GET request to (frontend)/download/:item/:path
opts := rest.Opts{
Method: "GET",
Path: path.Join("/download/", o.fs.root, o.fs.opt.Enc.FromStandardPath(o.remote)),
Path: path.Join("/download/", o.fs.root, quotePath(o.fs.opt.Enc.FromStandardPath(o.remote))),
Options: optionsFixed,
}
err = o.fs.pacer.Call(func() (bool, error) {
@@ -660,12 +778,14 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
bucket, bucketPath := o.split()
modTime := src.ModTime(ctx)
size := src.Size()
updateTracker := random.String(32)
// Set the mtime in the metadata
// internetarchive backend builds at header level as IAS3 has extension outside X-Amz-
headers := map[string]string{
// https://github.com/jjjake/internetarchive/blob/2456376533251df9d05e0a14d796ec1ced4959f5/internetarchive/iarequest.py#L158
"x-amz-filemeta-rclone-mtime": modTime.Format(time.RFC3339Nano),
"x-amz-filemeta-rclone-mtime": modTime.Format(time.RFC3339Nano),
"x-amz-filemeta-rclone-update-track": updateTracker,
// we add some more headers for intuitive actions
"x-amz-auto-make-bucket": "1", // create an item if does not exist, do nothing if already
@@ -679,6 +799,23 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
headers["Content-Length"] = fmt.Sprintf("%d", size)
headers["x-archive-size-hint"] = fmt.Sprintf("%d", size)
}
var mdata fs.Metadata
mdata, err = fs.GetMetadataOptions(ctx, src, options)
if err == nil && mdata != nil {
for mk, mv := range mdata {
mk = strings.ToLower(mk)
if strings.HasPrefix(mk, "rclone-") {
fs.LogPrintf(fs.LogLevelWarning, o, "reserved metadata key %s is about to set", mk)
} else if _, ok := roMetadataKey[mk]; ok {
fs.LogPrintf(fs.LogLevelWarning, o, "setting or modifying read-only key %s is requested, skipping", mk)
continue
} else if mk == "mtime" {
// redirect to make it work
mk = "rclone-mtime"
}
headers[fmt.Sprintf("x-amz-filemeta-%s", mk)] = mv
}
}
// read the md5sum if available
var md5sumHex string
@@ -712,7 +849,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// or we have to wait for finish? (needs polling (frontend)/metadata/:item or scraping (frontend)/history/:item)
var newObj *Object
if err == nil {
newObj, err = o.fs.waitFileUpload(ctx, o.remote, o.fs.getHashes(ctx, src), size)
newObj, err = o.fs.waitFileUpload(ctx, o.remote, updateTracker, size)
} else {
newObj = &Object{}
}
@@ -756,6 +893,34 @@ func (o *Object) String() string {
return o.remote
}
// Metadata returns all file metadata provided by Internet Archive
func (o *Object) Metadata(ctx context.Context) (m fs.Metadata, err error) {
if o.rawData == nil {
return nil, nil
}
raw := make(map[string]json.RawMessage)
err = json.Unmarshal(o.rawData, &raw)
if err != nil {
// fatal: json parsing failed
return
}
for k, v := range raw {
items, err := listOrString(v)
if len(items) == 0 || err != nil {
// skip: an entry failed to parse
continue
}
m.Set(k, items[0])
}
// move the old mtime to an another key
if v, ok := m["mtime"]; ok {
m["rclone-ia-mtime"] = v
}
// overwrite with a correct mtime
m["mtime"] = o.modTime.Format(time.RFC3339Nano)
return
}
func (f *Fs) shouldRetry(resp *http.Response, err error) (bool, error) {
if resp != nil {
for _, e := range retryErrorCodes {
@@ -782,19 +947,7 @@ func (o *Object) split() (bucket, bucketPath string) {
return o.fs.split(o.remote)
}
func (f *Fs) getHashes(ctx context.Context, src fs.ObjectInfo) map[hash.Type]string {
hashMap := map[hash.Type]string{}
for _, ty := range f.Hashes().Array() {
sum, err := src.Hash(ctx, ty)
if err != nil || sum == "" {
continue
}
hashMap[ty] = sum
}
return hashMap
}
func (f *Fs) requestMetadata(ctx context.Context, bucket string) (result MetadataResponse, err error) {
func (f *Fs) requestMetadata(ctx context.Context, bucket string) (result *MetadataResponse, err error) {
var resp *http.Response
// make a GET request to (frontend)/metadata/:item/
opts := rest.Opts{
@@ -802,12 +955,15 @@ func (f *Fs) requestMetadata(ctx context.Context, bucket string) (result Metadat
Path: path.Join("/metadata/", bucket),
}
var temp MetadataResponseRaw
err = f.pacer.Call(func() (bool, error) {
resp, err = f.front.CallJSON(ctx, &opts, nil, &result)
resp, err = f.front.CallJSON(ctx, &opts, nil, &temp)
return f.shouldRetry(resp, err)
})
return result, err
if err != nil {
return
}
return temp.unraw()
}
// list up all files/directories without any filters
@@ -852,7 +1008,7 @@ func (f *Fs) listAllUnconstrained(ctx context.Context, bucket string) (entries f
return entries, nil
}
func (f *Fs) waitFileUpload(ctx context.Context, reqPath string, newHashes map[hash.Type]string, newSize int64) (ret *Object, err error) {
func (f *Fs) waitFileUpload(ctx context.Context, reqPath, tracker string, newSize int64) (ret *Object, err error) {
bucket, bucketPath := f.split(reqPath)
ret = &Object{
@@ -869,6 +1025,10 @@ func (f *Fs) waitFileUpload(ctx context.Context, reqPath string, newHashes map[h
ret2, ok := ret2.(*Object)
if ok {
ret = ret2
ret.crc32 = ""
ret.md5 = ""
ret.sha1 = ""
ret.size = -1
}
}
return ret, nil
@@ -881,9 +1041,6 @@ func (f *Fs) waitFileUpload(ctx context.Context, reqPath string, newHashes map[h
go func() {
isFirstTime := true
existed := false
oldMtime := ""
oldCrc32 := ""
unreliablePassCount := 0
for {
if !isFirstTime {
// depending on the queue, it takes time
@@ -908,10 +1065,6 @@ func (f *Fs) waitFileUpload(ctx context.Context, reqPath string, newHashes map[h
if isFirstTime {
isFirstTime = false
existed = iaFile != nil
if iaFile != nil {
oldMtime = iaFile.Mtime
oldCrc32 = iaFile.Crc32
}
}
if iaFile == nil {
continue
@@ -925,38 +1078,20 @@ func (f *Fs) waitFileUpload(ctx context.Context, reqPath string, newHashes map[h
return
}
hashMatched := true
for tt, sum := range newHashes {
if tt == hash.MD5 && !hash.Equals(iaFile.Md5, sum) {
hashMatched = false
break
}
if tt == hash.SHA1 && !hash.Equals(iaFile.Sha1, sum) {
hashMatched = false
break
}
if tt == hash.CRC32 && !hash.Equals(iaFile.Crc32, sum) {
hashMatched = false
fileTrackers, _ := listOrString(iaFile.UpdateTrack)
trackerMatch := false
for _, v := range fileTrackers {
if v == tracker {
trackerMatch = true
break
}
}
if !hashMatched {
if !trackerMatch {
continue
}
if !compareSize(parseSize(iaFile.Size), newSize) {
continue
}
if hash.Equals(oldCrc32, iaFile.Crc32) && unreliablePassCount < 60 {
// the following two are based on a sort of "bad" assumption;
// what if the file is updated immediately, before polling?
// by limiting hits of these tests, avoid infinite loop
unreliablePassCount++
continue
}
if hash.Equals(iaFile.Mtime, oldMtime) && unreliablePassCount < 60 {
unreliablePassCount++
continue
}
// voila!
retC <- struct {
@@ -1017,15 +1152,21 @@ func (f *Fs) waitDelete(ctx context.Context, bucket, bucketPath string) (err err
}
func makeValidObject(f *Fs, remote string, file IAFile, mtime time.Time, size int64) *Object {
return &Object{
ret := &Object{
fs: f,
remote: remote,
modTime: mtime,
size: size,
md5: file.Md5,
crc32: file.Crc32,
sha1: file.Sha1,
rawData: file.rawData,
}
// hashes from _files.xml (where summation != "") is different from one in other files
// https://forum.rclone.org/t/internet-archive-md5-tag-in-id-files-xml-interpreted-incorrectly/31922
if file.Summation == "" {
ret.md5 = file.Md5
ret.crc32 = file.Crc32
ret.sha1 = file.Sha1
}
return ret
}
func makeValidObject2(f *Fs, file IAFile, bucket string) *Object {
@@ -1036,20 +1177,24 @@ func makeValidObject2(f *Fs, file IAFile, bucket string) *Object {
return makeValidObject(f, trimPathPrefix(path.Join(bucket, file.Name), f.root, f.opt.Enc), file, mtimeTime, size)
}
func (file IAFile) parseMtime() (mtime time.Time) {
// method 1: use metadata added by rclone
var rmArray []string
func listOrString(jm json.RawMessage) (rmArray []string, err error) {
// rclone-metadata can be an array or string
// try to deserialize it as array first
err := json.Unmarshal(file.RcloneMtime, &rmArray)
err = json.Unmarshal(jm, &rmArray)
if err != nil {
// if not, it's a string
dst := new(string)
err = json.Unmarshal(file.RcloneMtime, dst)
err = json.Unmarshal(jm, dst)
if err == nil {
rmArray = []string{*dst}
}
}
return
}
func (file IAFile) parseMtime() (mtime time.Time) {
// method 1: use metadata added by rclone
rmArray, err := listOrString(file.RcloneMtime)
// let's take the first value we can deserialize
for _, value := range rmArray {
mtime, err = time.Parse(time.RFC3339Nano, value)
@@ -1068,6 +1213,23 @@ func (file IAFile) parseMtime() (mtime time.Time) {
return mtime
}
func (mrr *MetadataResponseRaw) unraw() (_ *MetadataResponse, err error) {
var files []IAFile
for _, raw := range mrr.Files {
var parsed IAFile
err = json.Unmarshal(raw, &parsed)
if err != nil {
return nil, err
}
parsed.rawData = raw
files = append(files, parsed)
}
return &MetadataResponse{
Files: files,
ItemSize: mrr.ItemSize,
}, nil
}
func compareSize(a, b int64) bool {
if a < 0 || b < 0 {
// we won't compare if any of them is not known
@@ -1111,7 +1273,7 @@ func trimPathPrefix(s, prefix string, enc encoder.MultiEncoder) string {
return enc.ToStandardPath(strings.TrimPrefix(s, prefix+"/"))
}
// mimicks urllib.parse.quote() on Python; exclude / from url.PathEscape
// mimics urllib.parse.quote() on Python; exclude / from url.PathEscape
func quotePath(s string) string {
seg := strings.Split(s, "/")
newValues := []string{}
@@ -1129,4 +1291,5 @@ var (
_ fs.PublicLinker = &Fs{}
_ fs.Abouter = &Fs{}
_ fs.Object = &Object{}
_ fs.Metadataer = &Object{}
)

View File

@@ -1,3 +1,4 @@
// Package api provides types used by the Jottacloud API.
package api
import (

View File

@@ -1,3 +1,4 @@
// Package jottacloud provides an interface to the Jottacloud storage system.
package jottacloud
import (
@@ -11,7 +12,6 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"math/rand"
"net/http"
"net/url"
@@ -46,9 +46,9 @@ const (
decayConstant = 2 // bigger for slower decay, exponential
defaultDevice = "Jotta"
defaultMountpoint = "Archive"
rootURL = "https://jfs.jottacloud.com/jfs/"
jfsURL = "https://jfs.jottacloud.com/jfs/"
apiURL = "https://api.jottacloud.com/"
baseURL = "https://www.jottacloud.com/"
wwwURL = "https://www.jottacloud.com/"
cachePrefix = "rclone-jcmd5-"
configDevice = "device"
configMountpoint = "mountpoint"
@@ -127,7 +127,7 @@ func init() {
func Config(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
switch config.State {
case "":
return fs.ConfigChooseFixed("auth_type_done", "config_type", `Authentication type.`, []fs.OptionExample{{
return fs.ConfigChooseExclusiveFixed("auth_type_done", "config_type", `Select authentication type.`, []fs.OptionExample{{
Value: "standard",
Help: "Standard authentication.\nUse this if you're a normal Jottacloud user.",
}, {
@@ -145,7 +145,7 @@ func Config(ctx context.Context, name string, m configmap.Mapper, config fs.Conf
return fs.ConfigGoto(config.Result)
case "standard": // configure a jottacloud backend using the modern JottaCli token based authentication
m.Set("configVersion", fmt.Sprint(configVersion))
return fs.ConfigInput("standard_token", "config_login_token", "Personal login token.\n\nGenerate here: https://www.jottacloud.com/web/secure")
return fs.ConfigInput("standard_token", "config_login_token", "Personal login token.\nGenerate here: https://www.jottacloud.com/web/secure")
case "standard_token":
loginToken := config.Result
m.Set(configClientID, defaultClientID)
@@ -191,7 +191,7 @@ machines.`)
m.Set("auth_code", "")
return fs.ConfigGoto("legacy_do_auth")
case "legacy_auth_code":
authCode := strings.Replace(config.Result, "-", "", -1) // remove any "-" contained in the code so we have a 6 digit number
authCode := strings.ReplaceAll(config.Result, "-", "") // remove any "-" contained in the code so we have a 6 digit number
m.Set("auth_code", authCode)
return fs.ConfigGoto("legacy_do_auth")
case "legacy_do_auth":
@@ -262,7 +262,11 @@ machines.`)
},
})
case "choose_device":
return fs.ConfigConfirm("choose_device_query", false, "config_non_standard", "Use a non standard device/mountpoint e.g. for accessing files uploaded using the official Jottacloud client?")
return fs.ConfigConfirm("choose_device_query", false, "config_non_standard", `Use a non-standard device/mountpoint?
Choosing no, the default, will let you access the storage used for the archive
section of the official Jottacloud client. If you instead want to access the
sync or the backup section, for example, you must choose yes.`)
case "choose_device_query":
if config.Result != "true" {
m.Set(configDevice, "")
@@ -273,43 +277,139 @@ machines.`)
if err != nil {
return nil, err
}
srv := rest.NewClient(oAuthClient).SetRoot(rootURL)
jfsSrv := rest.NewClient(oAuthClient).SetRoot(jfsURL)
apiSrv := rest.NewClient(oAuthClient).SetRoot(apiURL)
cust, err := getCustomerInfo(ctx, apiSrv)
if err != nil {
return nil, err
}
m.Set(configUsername, cust.Username)
acc, err := getDriveInfo(ctx, srv, cust.Username)
acc, err := getDriveInfo(ctx, jfsSrv, cust.Username)
if err != nil {
return nil, err
}
return fs.ConfigChoose("choose_device_result", "config_device", `Please select the device to use. Normally this will be Jotta`, len(acc.Devices), func(i int) (string, string) {
return acc.Devices[i].Name, ""
deviceNames := make([]string, len(acc.Devices))
for i, dev := range acc.Devices {
if i > 0 && dev.Name == defaultDevice {
// Insert the special Jotta device as first entry, making it the default choice.
copy(deviceNames[1:i+1], deviceNames[0:i])
deviceNames[0] = dev.Name
} else {
deviceNames[i] = dev.Name
}
}
help := fmt.Sprintf(`The device to use. In standard setup the built-in %s device is used,
which contains predefined mountpoints for archive, sync etc. All other devices
are treated as backup devices by the official Jottacloud client. You may create
a new by entering a unique name.`, defaultDevice)
return fs.ConfigChoose("choose_device_result", "config_device", help, len(deviceNames), func(i int) (string, string) {
return deviceNames[i], ""
})
case "choose_device_result":
device := config.Result
m.Set(configDevice, device)
oAuthClient, _, err := getOAuthClient(ctx, name, m)
if err != nil {
return nil, err
}
srv := rest.NewClient(oAuthClient).SetRoot(rootURL)
jfsSrv := rest.NewClient(oAuthClient).SetRoot(jfsURL)
apiSrv := rest.NewClient(oAuthClient).SetRoot(apiURL)
username, _ := m.Get(configUsername)
dev, err := getDeviceInfo(ctx, srv, path.Join(username, device))
cust, err := getCustomerInfo(ctx, apiSrv)
if err != nil {
return nil, err
}
return fs.ConfigChoose("choose_device_mountpoint", "config_mountpoint", `Please select the mountpoint to use. Normally this will be Archive.`, len(dev.MountPoints), func(i int) (string, string) {
acc, err := getDriveInfo(ctx, jfsSrv, cust.Username)
if err != nil {
return nil, err
}
isNew := true
for _, dev := range acc.Devices {
if strings.EqualFold(dev.Name, device) { // If device name exists with different casing we prefer the existing (not sure if and how the api handles the opposite)
device = dev.Name // Prefer same casing as existing, e.g. if user entered "jotta" we use the standard casing "Jotta" instead
isNew = false
break
}
}
var dev *api.JottaDevice
if isNew {
fs.Debugf(nil, "Creating new device: %s", device)
dev, err = createDevice(ctx, jfsSrv, path.Join(cust.Username, device))
if err != nil {
return nil, err
}
}
m.Set(configDevice, device)
if !isNew {
dev, err = getDeviceInfo(ctx, jfsSrv, path.Join(cust.Username, device))
if err != nil {
return nil, err
}
}
var help string
if device == defaultDevice {
// With built-in Jotta device the mountpoint choice is exclusive,
// we do not want to risk any problems by creating new mountpoints on it.
help = fmt.Sprintf(`The mountpoint to use on the built-in device %s.
The standard setup is to use the %s mountpoint. Most other mountpoints
have very limited support in rclone and should generally be avoided.`, defaultDevice, defaultMountpoint)
return fs.ConfigChooseExclusive("choose_device_mountpoint", "config_mountpoint", help, len(dev.MountPoints), func(i int) (string, string) {
return dev.MountPoints[i].Name, ""
})
}
help = fmt.Sprintf(`The mountpoint to use on the non-standard device %s.
You may create a new by entering a unique name.`, device)
return fs.ConfigChoose("choose_device_mountpoint", "config_mountpoint", help, len(dev.MountPoints), func(i int) (string, string) {
return dev.MountPoints[i].Name, ""
})
case "choose_device_mountpoint":
mountpoint := config.Result
oAuthClient, _, err := getOAuthClient(ctx, name, m)
if err != nil {
return nil, err
}
jfsSrv := rest.NewClient(oAuthClient).SetRoot(jfsURL)
apiSrv := rest.NewClient(oAuthClient).SetRoot(apiURL)
cust, err := getCustomerInfo(ctx, apiSrv)
if err != nil {
return nil, err
}
device, _ := m.Get(configDevice)
dev, err := getDeviceInfo(ctx, jfsSrv, path.Join(cust.Username, device))
if err != nil {
return nil, err
}
isNew := true
for _, mnt := range dev.MountPoints {
if strings.EqualFold(mnt.Name, mountpoint) {
mountpoint = mnt.Name
isNew = false
break
}
}
if isNew {
if device == defaultDevice {
return nil, fmt.Errorf("custom mountpoints not supported on built-in %s device: %w", defaultDevice, err)
}
fs.Debugf(nil, "Creating new mountpoint: %s", mountpoint)
_, err := createMountPoint(ctx, jfsSrv, path.Join(cust.Username, device, mountpoint))
if err != nil {
return nil, err
}
}
m.Set(configMountpoint, mountpoint)
return fs.ConfigGoto("end")
case "end":
// All the config flows end up here in case we need to carry on with something
@@ -332,16 +432,17 @@ type Options struct {
// Fs represents a remote jottacloud
type Fs struct {
name string
root string
user string
opt Options
features *fs.Features
endpointURL string
srv *rest.Client
apiSrv *rest.Client
pacer *fs.Pacer
tokenRenewer *oauthutil.Renew // renew the token on expiry
name string
root string
user string
opt Options
features *fs.Features
fileEndpoint string
allocateEndpoint string
jfsSrv *rest.Client
apiSrv *rest.Client
pacer *fs.Pacer
tokenRenewer *oauthutil.Renew // renew the token on expiry
}
// Object describes a jottacloud object
@@ -588,15 +689,47 @@ func getDeviceInfo(ctx context.Context, srv *rest.Client, path string) (info *ap
return info, nil
}
// setEndpointURL generates the API endpoint URL
func (f *Fs) setEndpointURL() {
// createDevice makes a device
func createDevice(ctx context.Context, srv *rest.Client, path string) (info *api.JottaDevice, err error) {
opts := rest.Opts{
Method: "POST",
Path: urlPathEscape(path),
Parameters: url.Values{},
}
opts.Parameters.Set("type", "WORKSTATION")
_, err = srv.CallXML(ctx, &opts, nil, &info)
if err != nil {
return nil, fmt.Errorf("couldn't create device: %w", err)
}
return info, nil
}
// createMountPoint makes a mount point
func createMountPoint(ctx context.Context, srv *rest.Client, path string) (info *api.JottaMountPoint, err error) {
opts := rest.Opts{
Method: "POST",
Path: urlPathEscape(path),
}
_, err = srv.CallXML(ctx, &opts, nil, &info)
if err != nil {
return nil, fmt.Errorf("couldn't create mountpoint: %w", err)
}
return info, nil
}
// setEndpoints generates the API endpoints
func (f *Fs) setEndpoints() {
if f.opt.Device == "" {
f.opt.Device = defaultDevice
}
if f.opt.Mountpoint == "" {
f.opt.Mountpoint = defaultMountpoint
}
f.endpointURL = path.Join(f.user, f.opt.Device, f.opt.Mountpoint)
f.fileEndpoint = path.Join(f.user, f.opt.Device, f.opt.Mountpoint)
f.allocateEndpoint = path.Join("/jfs", f.opt.Device, f.opt.Mountpoint)
}
// readMetaDataForPath reads the metadata from the path
@@ -608,7 +741,7 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.Jo
var result api.JottaFile
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallXML(ctx, &opts, nil, &result)
resp, err = f.jfsSrv.CallXML(ctx, &opts, nil, &result)
return shouldRetry(ctx, resp, err)
})
@@ -649,17 +782,34 @@ func errorHandler(resp *http.Response) error {
// Jottacloud wants '+' to be URL encoded even though the RFC states it's not reserved
func urlPathEscape(in string) string {
return strings.Replace(rest.URLPathEscape(in), "+", "%2B", -1)
return strings.ReplaceAll(rest.URLPathEscape(in), "+", "%2B")
}
// filePathRaw returns an unescaped file path (f.root, file)
func (f *Fs) filePathRaw(file string) string {
return path.Join(f.endpointURL, f.opt.Enc.FromStandardPath(path.Join(f.root, file)))
// Optionally made absolute by prefixing with "/", typically required when used
// as request parameter instead of the path (which is relative to some root url).
func (f *Fs) filePathRaw(file string, absolute bool) string {
prefix := ""
if absolute {
prefix = "/"
}
return path.Join(prefix, f.fileEndpoint, f.opt.Enc.FromStandardPath(path.Join(f.root, file)))
}
// filePath returns an escaped file path (f.root, file)
func (f *Fs) filePath(file string) string {
return urlPathEscape(f.filePathRaw(file))
return urlPathEscape(f.filePathRaw(file, false))
}
// allocatePathRaw returns an unescaped allocate file path (f.root, file)
// Optionally made absolute by prefixing with "/", typically required when used
// as request parameter instead of the path (which is relative to some root url).
func (f *Fs) allocatePathRaw(file string, absolute bool) string {
prefix := ""
if absolute {
prefix = "/"
}
return path.Join(prefix, f.allocateEndpoint, f.opt.Enc.FromStandardPath(path.Join(f.root, file)))
}
// Jottacloud requires the grant_type 'refresh_token' string
@@ -671,7 +821,7 @@ func (f *Fs) filePath(file string) string {
func grantTypeFilter(req *http.Request) {
if legacyTokenURL == req.URL.String() {
// read the entire body
refreshBody, err := ioutil.ReadAll(req.Body)
refreshBody, err := io.ReadAll(req.Body)
if err != nil {
return
}
@@ -681,7 +831,7 @@ func grantTypeFilter(req *http.Request) {
refreshBody = []byte(strings.Replace(string(refreshBody), "grant_type=refresh_token", "grant_type=REFRESH_TOKEN", 1))
// set the new ReadCloser (with a dummy Close())
req.Body = ioutil.NopCloser(bytes.NewReader(refreshBody))
req.Body = io.NopCloser(bytes.NewReader(refreshBody))
}
}
@@ -692,12 +842,12 @@ func getOAuthClient(ctx context.Context, name string, m configmap.Mapper) (oAuth
if ok {
ver, err = strconv.Atoi(version)
if err != nil {
return nil, nil, errors.New("Failed to parse config version")
return nil, nil, errors.New("failed to parse config version")
}
ok = (ver == configVersion) || (ver == legacyConfigVersion)
}
if !ok {
return nil, nil, errors.New("Outdated config - please reconfigure this backend")
return nil, nil, errors.New("outdated config - please reconfigure this backend")
}
baseClient := fshttp.NewClient(ctx)
@@ -743,7 +893,7 @@ func getOAuthClient(ctx context.Context, name string, m configmap.Mapper) (oAuth
// Create OAuth Client
oAuthClient, ts, err = oauthutil.NewClientWithBaseClient(ctx, name, m, oauthConfig, baseClient)
if err != nil {
return nil, nil, fmt.Errorf("Failed to configure Jottacloud oauth client: %w", err)
return nil, nil, fmt.Errorf("failed to configure Jottacloud oauth client: %w", err)
}
return oAuthClient, ts, nil
}
@@ -769,7 +919,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
name: name,
root: root,
opt: *opt,
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
jfsSrv: rest.NewClient(oAuthClient).SetRoot(jfsURL),
apiSrv: rest.NewClient(oAuthClient).SetRoot(apiURL),
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
}
@@ -779,7 +929,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
ReadMimeType: true,
WriteMimeType: false,
}).Fill(ctx, f)
f.srv.SetErrorHandler(errorHandler)
f.jfsSrv.SetErrorHandler(errorHandler)
if opt.TrashedOnly { // we cannot support showing Trashed Files when using ListR right now
f.features.ListR = nil
}
@@ -798,7 +948,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
return nil, err
}
f.user = cust.Username
f.setEndpointURL()
f.setEndpoints()
if root != "" && !rootIsDir {
// Check to see if the root actually an existing file
@@ -864,7 +1014,7 @@ func (f *Fs) CreateDir(ctx context.Context, path string) (jf *api.JottaFolder, e
opts.Parameters.Set("mkDir", "true")
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallXML(ctx, &opts, nil, &jf)
resp, err = f.jfsSrv.CallXML(ctx, &opts, nil, &jf)
return shouldRetry(ctx, resp, err)
})
if err != nil {
@@ -893,7 +1043,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
var resp *http.Response
var result api.JottaFolder
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallXML(ctx, &opts, nil, &result)
resp, err = f.jfsSrv.CallXML(ctx, &opts, nil, &result)
return shouldRetry(ctx, resp, err)
})
@@ -932,7 +1082,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
return entries, nil
}
func parseListRStream(ctx context.Context, r io.Reader, trimPrefix string, filesystem *Fs, callback func(fs.DirEntry) error) error {
func parseListRStream(ctx context.Context, r io.Reader, filesystem *Fs, callback func(fs.DirEntry) error) error {
type stats struct {
Folders int `xml:"folders"`
@@ -968,8 +1118,12 @@ func parseListRStream(ctx context.Context, r io.Reader, trimPrefix string, files
})
}
// liststream paths are /mountpoint/root/path
// so the returned paths should have /mountpoint/root/ trimmed
// as the caller is expecting path.
pathPrefix := filesystem.opt.Enc.FromStandardPath(path.Join("/", filesystem.opt.Mountpoint, filesystem.root))
trimPathPrefix := func(p string) string {
p = strings.TrimPrefix(p, trimPrefix)
p = strings.TrimPrefix(p, pathPrefix)
p = strings.TrimPrefix(p, "/")
return p
}
@@ -1026,7 +1180,7 @@ func parseListRStream(ctx context.Context, r io.Reader, trimPrefix string, files
if expected.Folders != actual.Folders ||
expected.Files != actual.Files {
return fmt.Errorf("Invalid result from listStream: expected[%#v] != actual[%#v]", expected, actual)
return fmt.Errorf("invalid result from listStream: expected[%#v] != actual[%#v]", expected, actual)
}
return nil
}
@@ -1047,16 +1201,12 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.Call(ctx, &opts)
resp, err = f.jfsSrv.Call(ctx, &opts)
if err != nil {
return shouldRetry(ctx, resp, err)
}
// liststream paths are /mountpoint/root/path
// so the returned paths should have /mountpoint/root/ trimmed
// as the caller is expecting path.
trimPrefix := path.Join("/", f.opt.Mountpoint, f.root)
err = parseListRStream(ctx, resp.Body, trimPrefix, f, func(d fs.DirEntry) error {
err = parseListRStream(ctx, resp.Body, f, func(d fs.DirEntry) error {
if d.Remote() == dir {
return nil
}
@@ -1097,13 +1247,10 @@ func (f *Fs) createObject(remote string, modTime time.Time, size int64) (o *Obje
// Put the object
//
// Copy the reader in to the new object which is returned
// Copy the reader in to the new object which is returned.
//
// The new object may have been created if an error is returned
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
if f.opt.Device != "Jotta" {
return nil, errors.New("upload not supported for devices other than Jotta")
}
o := f.createObject(src.Remote(), src.ModTime(ctx), src.Size())
return o, o.Update(ctx, in, src, options...)
}
@@ -1113,10 +1260,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
func (f *Fs) mkParentDir(ctx context.Context, dirPath string) error {
// defer log.Trace(dirPath, "")("")
// chop off trailing / if it exists
if strings.HasSuffix(dirPath, "/") {
dirPath = dirPath[:len(dirPath)-1]
}
parent := path.Dir(dirPath)
parent := path.Dir(strings.TrimSuffix(dirPath, "/"))
if parent == "." {
parent = ""
}
@@ -1164,7 +1308,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error)
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.Call(ctx, &opts)
resp, err = f.jfsSrv.Call(ctx, &opts)
return shouldRetry(ctx, resp, err)
})
if err != nil {
@@ -1217,7 +1361,7 @@ func (f *Fs) createOrUpdate(ctx context.Context, file string, modTime time.Time,
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallXML(ctx, &opts, nil, &info)
resp, err = f.jfsSrv.CallXML(ctx, &opts, nil, &info)
return shouldRetry(ctx, resp, err)
})
@@ -1238,11 +1382,11 @@ func (f *Fs) copyOrMove(ctx context.Context, method, src, dest string) (info *ap
Parameters: url.Values{},
}
opts.Parameters.Set(method, "/"+path.Join(f.endpointURL, f.opt.Enc.FromStandardPath(path.Join(f.root, dest))))
opts.Parameters.Set(method, f.filePathRaw(dest, true))
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallXML(ctx, &opts, nil, &info)
resp, err = f.jfsSrv.CallXML(ctx, &opts, nil, &info)
return shouldRetry(ctx, resp, err)
})
if err != nil {
@@ -1253,9 +1397,9 @@ func (f *Fs) copyOrMove(ctx context.Context, method, src, dest string) (info *ap
// Copy src to this remote using server-side copy operations.
//
// This is stored with the remote path given
// This is stored with the remote path given.
//
// It returns the destination Object and a possible error
// It returns the destination Object and a possible error.
//
// Will only be called if src.Fs().Name() == f.Name()
//
@@ -1273,7 +1417,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
}
info, err := f.copyOrMove(ctx, "cp", srcObj.filePath(), remote)
// if destination was a trashed file then after a successfull copy the copied file is still in trash (bug in api?)
// if destination was a trashed file then after a successful copy the copied file is still in trash (bug in api?)
if err == nil && bool(info.Deleted) && !f.opt.TrashedOnly && info.State == "COMPLETED" {
fs.Debugf(src, "Server-side copied to trashed destination, restoring")
info, err = f.createOrUpdate(ctx, remote, srcObj.modTime, srcObj.size, srcObj.md5)
@@ -1289,9 +1433,9 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
// Move src to this remote using server-side move operations.
//
// This is stored with the remote path given
// This is stored with the remote path given.
//
// It returns the destination Object and a possible error
// It returns the destination Object and a possible error.
//
// Will only be called if src.Fs().Name() == f.Name()
//
@@ -1351,7 +1495,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
return fs.ErrorDirExists
}
_, err = f.copyOrMove(ctx, "mvDir", path.Join(f.endpointURL, f.opt.Enc.FromStandardPath(srcPath))+"/", dstRemote)
_, err = f.copyOrMove(ctx, "mvDir", path.Join(f.fileEndpoint, f.opt.Enc.FromStandardPath(srcPath))+"/", dstRemote)
if err != nil {
return fmt.Errorf("couldn't move directory: %w", err)
@@ -1376,7 +1520,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
var resp *http.Response
var result api.JottaFile
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallXML(ctx, &opts, nil, &result)
resp, err = f.jfsSrv.CallXML(ctx, &opts, nil, &result)
return shouldRetry(ctx, resp, err)
})
@@ -1402,19 +1546,19 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
return "", errors.New("couldn't create public link - no uri received")
}
if result.PublicSharePath != "" {
webLink := joinPath(baseURL, result.PublicSharePath)
webLink := joinPath(wwwURL, result.PublicSharePath)
fs.Debugf(nil, "Web link: %s", webLink)
} else {
fs.Debugf(nil, "No web link received")
}
directLink := joinPath(baseURL, fmt.Sprintf("opin/io/downloadPublic/%s/%s", f.user, result.PublicURI))
directLink := joinPath(wwwURL, fmt.Sprintf("opin/io/downloadPublic/%s/%s", f.user, result.PublicURI))
fs.Debugf(nil, "Direct link: %s", directLink)
return directLink, nil
}
// About gets quota information
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
info, err := getDriveInfo(ctx, f.srv, f.user)
info, err := getDriveInfo(ctx, f.jfsSrv, f.user)
if err != nil {
return nil, err
}
@@ -1617,7 +1761,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
opts.Parameters.Set("mode", "bin")
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.Call(ctx, &opts)
resp, err = o.fs.jfsSrv.Call(ctx, &opts)
return shouldRetry(ctx, resp, err)
})
if err != nil {
@@ -1644,7 +1788,7 @@ func readMD5(in io.Reader, size, threshold int64) (md5sum string, out io.Reader,
var tempFile *os.File
// create the cache file
tempFile, err = ioutil.TempFile("", cachePrefix)
tempFile, err = os.CreateTemp("", cachePrefix)
if err != nil {
return
}
@@ -1672,7 +1816,7 @@ func readMD5(in io.Reader, size, threshold int64) (md5sum string, out io.Reader,
} else {
// that's a small file, just read it into memory
var inData []byte
inData, err = ioutil.ReadAll(teeReader)
inData, err = io.ReadAll(teeReader)
if err != nil {
return
}
@@ -1685,7 +1829,7 @@ func readMD5(in io.Reader, size, threshold int64) (md5sum string, out io.Reader,
// Update the object with the contents of the io.Reader, modTime and size
//
// If existing is set then it updates the object rather than creating a new one
// If existing is set then it updates the object rather than creating a new one.
//
// The new object may have been created if an error is returned
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
@@ -1738,7 +1882,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
Created: fileDate,
Modified: fileDate,
Md5: md5String,
Path: path.Join(o.fs.opt.Mountpoint, o.fs.opt.Enc.FromStandardPath(path.Join(o.fs.root, o.remote))),
Path: o.fs.allocatePathRaw(o.remote, true),
}
// send it
@@ -1769,7 +1913,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// copy the already uploaded bytes into the trash :)
var result api.UploadResponse
_, err = io.CopyN(ioutil.Discard, in, response.ResumePos)
_, err = io.CopyN(io.Discard, in, response.ResumePos)
if err != nil {
return err
}
@@ -1808,7 +1952,7 @@ func (o *Object) remove(ctx context.Context, hard bool) error {
}
return o.fs.pacer.Call(func() (bool, error) {
resp, err := o.fs.srv.CallXML(ctx, &opts, nil, nil)
resp, err := o.fs.jfsSrv.CallXML(ctx, &opts, nil, nil)
return shouldRetry(ctx, resp, err)
})
}

View File

@@ -1,3 +1,4 @@
// Package koofr provides an interface to the Koofr storage system.
package koofr
import (
@@ -351,9 +352,9 @@ func NewFsFromOptions(ctx context.Context, name, root string, opt *Options) (ff
}
if f.mountID == "" {
if opt.MountID == "" {
return nil, errors.New("Failed to find primary mount")
return nil, errors.New("failed to find primary mount")
}
return nil, errors.New("Failed to find mount " + opt.MountID)
return nil, errors.New("failed to find mount " + opt.MountID)
}
rootFile, err := f.client.FilesInfo(f.mountID, f.opt.Enc.FromStandardPath("/"+f.root))
if err == nil && rootFile.Type != "dir" {
@@ -667,7 +668,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
//
// https://app.koofr.net/content/links/39a6cc01-3b23-477a-8059-c0fb3b0f15de/files/get?path=%2F
//
// I am not sure about meaning of "path" parameter; in my expriments
// I am not sure about meaning of "path" parameter; in my experiments
// it is always "%2F", and omitting it or putting any other value
// results in 404.
//

View File

@@ -17,8 +17,12 @@ var getFreeDiskSpace = syscall.NewLazyDLL("kernel32.dll").NewProc("GetDiskFreeSp
// About gets quota information
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
var available, total, free int64
root, e := syscall.UTF16PtrFromString(f.root)
if e != nil {
return nil, fmt.Errorf("failed to read disk usage: %w", e)
}
_, _, e1 := getFreeDiskSpace.Call(
uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(f.root))),
uintptr(unsafe.Pointer(root)),
uintptr(unsafe.Pointer(&available)), // lpFreeBytesAvailable - for this user
uintptr(unsafe.Pointer(&total)), // lpTotalNumberOfBytes
uintptr(unsafe.Pointer(&free)), // lpTotalNumberOfFreeBytes

View File

@@ -7,7 +7,6 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"path"
"path/filepath"
@@ -22,6 +21,7 @@ import (
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/filter"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/encoder"
@@ -42,9 +42,22 @@ func init() {
Description: "Local Disk",
NewFs: NewFs,
CommandHelp: commandHelp,
MetadataInfo: &fs.MetadataInfo{
System: systemMetadataInfo,
Help: `Depending on which OS is in use the local backend may return only some
of the system metadata. Setting system metadata is supported on all
OSes but setting user metadata is only supported on linux, freebsd,
netbsd, macOS and Solaris. It is **not** supported on Windows yet
([see pkg/attrs#47](https://github.com/pkg/xattr/issues/47)).
User metadata is stored as extended attributes (which may not be
supported by all file systems) under the "user.*" prefix.
`,
},
Options: []fs.Option{{
Name: "nounc",
Help: "Disable UNC (long path names) conversion on Windows.",
Default: false,
Advanced: runtime.GOOS != "windows",
Examples: []fs.OptionExample{{
Value: "true",
@@ -110,8 +123,8 @@ routine so this flag shouldn't normally be used.`,
Help: `Don't check to see if the files change during upload.
Normally rclone checks the size and modification time of files as they
are being uploaded and aborts with a message which starts "can't copy
- source file is being updated" if the file changes during upload.
are being uploaded and aborts with a message which starts "can't copy -
source file is being updated" if the file changes during upload.
However on some file systems this modification time check may fail (e.g.
[Glusterfs #2206](https://github.com/rclone/rclone/issues/2206)) so this
@@ -221,15 +234,16 @@ type Options struct {
// Fs represents a local filesystem rooted at root
type Fs struct {
name string // the name of the remote
root string // The root directory (OS path)
opt Options // parsed config options
features *fs.Features // optional features
dev uint64 // device number of root node
precisionOk sync.Once // Whether we need to read the precision
precision time.Duration // precision of local filesystem
warnedMu sync.Mutex // used for locking access to 'warned'.
warned map[string]struct{} // whether we have warned about this string
name string // the name of the remote
root string // The root directory (OS path)
opt Options // parsed config options
features *fs.Features // optional features
dev uint64 // device number of root node
precisionOk sync.Once // Whether we need to read the precision
precision time.Duration // precision of local filesystem
warnedMu sync.Mutex // used for locking access to 'warned'.
warned map[string]struct{} // whether we have warned about this string
xattrSupported int32 // whether xattrs are supported (atomic access)
// do os.Lstat or os.Stat
lstat func(name string) (os.FileInfo, error)
@@ -273,12 +287,19 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
dev: devUnset,
lstat: os.Lstat,
}
if xattrSupported {
f.xattrSupported = 1
}
f.root = cleanRootPath(root, f.opt.NoUNC, f.opt.Enc)
f.features = (&fs.Features{
CaseInsensitive: f.caseInsensitive(),
CanHaveEmptyDirectories: true,
IsLocal: true,
SlowHash: true,
ReadMetadata: true,
WriteMetadata: true,
UserMetadata: xattrSupported, // can only R/W general purpose metadata if xattrs are supported
FilterAware: true,
}).Fill(ctx, f)
if opt.FollowSymlinks {
f.lstat = os.Stat
@@ -423,6 +444,8 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
filter, useFilter := filter.GetConfig(ctx), filter.GetUseFilter(ctx)
fsDirPath := f.localPath(dir)
_, err = os.Stat(fsDirPath)
if err != nil {
@@ -473,7 +496,14 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
continue
}
if fierr != nil {
err = fmt.Errorf("failed to read directory %q: %w", namepath, err)
// Don't report errors on any file names that are excluded
if useFilter {
newRemote := f.cleanRemote(dir, name)
if !filter.IncludeRemote(newRemote) {
continue
}
}
err = fmt.Errorf("failed to read directory %q: %w", namepath, fierr)
fs.Errorf(dir, "%v", fierr)
_ = accounting.Stats(ctx).Error(fserrors.NoRetryError(fierr)) // fail the sync
continue
@@ -494,6 +524,10 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
if f.opt.FollowSymlinks && (mode&os.ModeSymlink) != 0 {
localPath := filepath.Join(fsDirPath, name)
fi, err = os.Stat(localPath)
// Quietly skip errors on excluded files and directories
if err != nil && useFilter && !filter.IncludeRemote(newRemote) {
continue
}
if os.IsNotExist(err) || isCircularSymlinkError(err) {
// Skip bad symlinks and circular symlinks
err = fserrors.NoRetryError(fmt.Errorf("symlink: %w", err))
@@ -506,6 +540,11 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
}
mode = fi.Mode()
}
// Don't include non directory if not included
// we leave directory filtering to the layer above
if useFilter && !fi.IsDir() && !filter.IncludeRemote(newRemote) {
continue
}
if fi.IsDir() {
// Ignore directories which are symlinks. These are junction points under windows which
// are kind of a souped up symlink. Unix doesn't have directories which are symlinks.
@@ -610,7 +649,7 @@ func (f *Fs) readPrecision() (precision time.Duration) {
precision = time.Second
// Create temporary file and test it
fd, err := ioutil.TempFile("", "rclone")
fd, err := os.CreateTemp("", "rclone")
if err != nil {
// If failed return 1s
// fmt.Println("Failed to create temp file", err)
@@ -679,9 +718,9 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
// Move src to this remote using server-side move operations.
//
// This is stored with the remote path given
// This is stored with the remote path given.
//
// It returns the destination Object and a possible error
// It returns the destination Object and a possible error.
//
// Will only be called if src.Fs().Name() == f.Name()
//
@@ -903,7 +942,7 @@ func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
return "", fmt.Errorf("hash: failed to open: %w", err)
}
var hashes map[hash.Type]string
hashes, err = hash.StreamTypes(in, hash.NewHashSet(r))
hashes, err = hash.StreamTypes(readers.NewContextReader(ctx, in), hash.NewHashSet(r))
closeErr := in.Close()
if err != nil {
return "", fmt.Errorf("hash: failed to read: %w", err)
@@ -937,17 +976,22 @@ func (o *Object) ModTime(ctx context.Context) time.Time {
return o.modTime
}
// Set the atime and ltime of the object
func (o *Object) setTimes(atime, mtime time.Time) (err error) {
if o.translatedLink {
err = lChtimes(o.path, atime, mtime)
} else {
err = os.Chtimes(o.path, atime, mtime)
}
return err
}
// SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
if o.fs.opt.NoSetModTime {
return nil
}
var err error
if o.translatedLink {
err = lChtimes(o.path, modTime, modTime)
} else {
err = os.Chtimes(o.path, modTime, modTime)
}
err := o.setTimes(modTime, modTime)
if err != nil {
return err
}
@@ -1032,7 +1076,7 @@ func (o *Object) openTranslatedLink(offset, limit int64) (lrc io.ReadCloser, err
if err != nil {
return nil, err
}
return readers.NewLimitedReadCloser(ioutil.NopCloser(strings.NewReader(linkdst[offset:])), limit), nil
return readers.NewLimitedReadCloser(io.NopCloser(strings.NewReader(linkdst[offset:])), limit), nil
}
// Open an object for read
@@ -1222,6 +1266,16 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return err
}
// Fetch and set metadata if --metadata is in use
meta, err := fs.GetMetadataOptions(ctx, src, options)
if err != nil {
return fmt.Errorf("failed to read metadata from source object: %w", err)
}
err = o.writeMetadata(meta)
if err != nil {
return fmt.Errorf("failed to set metadata: %w", err)
}
// ReRead info now that we have finished
return o.lstat()
}
@@ -1320,31 +1374,56 @@ func (o *Object) Remove(ctx context.Context) error {
return remove(o.path)
}
// Metadata returns metadata for an object
//
// It should return nil if there is no Metadata
func (o *Object) Metadata(ctx context.Context) (metadata fs.Metadata, err error) {
metadata, err = o.getXattr()
if err != nil {
return nil, err
}
err = o.readMetadataFromFile(&metadata)
if err != nil {
return nil, err
}
return metadata, nil
}
// Write the metadata on the object
func (o *Object) writeMetadata(metadata fs.Metadata) (err error) {
err = o.setXattr(metadata)
if err != nil {
return err
}
err = o.writeMetadataToFile(metadata)
if err != nil {
return err
}
return err
}
func cleanRootPath(s string, noUNC bool, enc encoder.MultiEncoder) string {
if runtime.GOOS == "windows" {
if !filepath.IsAbs(s) && !strings.HasPrefix(s, "\\") {
if runtime.GOOS != "windows" || !strings.HasPrefix(s, "\\") {
if !filepath.IsAbs(s) {
s2, err := filepath.Abs(s)
if err == nil {
s = s2
}
} else {
s = filepath.Clean(s)
}
}
if runtime.GOOS == "windows" {
s = filepath.ToSlash(s)
vol := filepath.VolumeName(s)
s = vol + enc.FromStandardPath(s[len(vol):])
s = filepath.FromSlash(s)
if !noUNC {
// Convert to UNC
s = file.UNCPath(s)
}
return s
}
if !filepath.IsAbs(s) {
s2, err := filepath.Abs(s)
if err == nil {
s = s2
}
}
s = enc.FromStandardPath(s)
return s
}
@@ -1359,4 +1438,5 @@ var (
_ fs.Commander = &Fs{}
_ fs.OpenWriterAter = &Fs{}
_ fs.Object = &Object{}
_ fs.Metadataer = &Object{}
)

View File

@@ -3,15 +3,20 @@ package local
import (
"bytes"
"context"
"io/ioutil"
"fmt"
"io"
"os"
"path"
"path/filepath"
"runtime"
"sort"
"testing"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/filter"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/fstest"
@@ -29,7 +34,6 @@ func TestMain(m *testing.M) {
// Test copy with source file that's updating
func TestUpdatingCheck(t *testing.T) {
r := fstest.NewRun(t)
defer r.Finalise()
filePath := "sub dir/local test"
r.WriteFile(filePath, "content", time.Now())
@@ -74,7 +78,6 @@ func TestUpdatingCheck(t *testing.T) {
func TestSymlink(t *testing.T) {
ctx := context.Background()
r := fstest.NewRun(t)
defer r.Finalise()
f := r.Flocal.(*Fs)
dir := f.root
@@ -146,7 +149,7 @@ func TestSymlink(t *testing.T) {
// Check reading the object
in, err := o.Open(ctx)
require.NoError(t, err)
contents, err := ioutil.ReadAll(in)
contents, err := io.ReadAll(in)
require.NoError(t, err)
require.Equal(t, "file.txt", string(contents))
require.NoError(t, in.Close())
@@ -154,7 +157,7 @@ func TestSymlink(t *testing.T) {
// Check reading the object with range
in, err = o.Open(ctx, &fs.RangeOption{Start: 2, End: 5})
require.NoError(t, err)
contents, err = ioutil.ReadAll(in)
contents, err = io.ReadAll(in)
require.NoError(t, err)
require.Equal(t, "file.txt"[2:5+1], string(contents))
require.NoError(t, in.Close())
@@ -173,7 +176,6 @@ func TestSymlinkError(t *testing.T) {
func TestHashOnUpdate(t *testing.T) {
ctx := context.Background()
r := fstest.NewRun(t)
defer r.Finalise()
const filePath = "file.txt"
when := time.Now()
r.WriteFile(filePath, "content", when)
@@ -188,7 +190,7 @@ func TestHashOnUpdate(t *testing.T) {
require.NoError(t, err)
assert.Equal(t, "9a0364b9e99bb480dd25e1f0284c8555", md5)
// Reupload it with diferent contents but same size and timestamp
// Reupload it with different contents but same size and timestamp
var b = bytes.NewBufferString("CONTENT")
src := object.NewStaticObjectInfo(filePath, when, int64(b.Len()), true, nil, f)
err = o.Update(ctx, b, src)
@@ -204,7 +206,6 @@ func TestHashOnUpdate(t *testing.T) {
func TestHashOnDelete(t *testing.T) {
ctx := context.Background()
r := fstest.NewRun(t)
defer r.Finalise()
const filePath = "file.txt"
when := time.Now()
r.WriteFile(filePath, "content", when)
@@ -229,3 +230,239 @@ func TestHashOnDelete(t *testing.T) {
_, err = o.Hash(ctx, hash.MD5)
require.Error(t, err)
}
func TestMetadata(t *testing.T) {
ctx := context.Background()
r := fstest.NewRun(t)
const filePath = "metafile.txt"
when := time.Now()
const dayLength = len("2001-01-01")
whenRFC := when.Format(time.RFC3339Nano)
r.WriteFile(filePath, "metadata file contents", when)
f := r.Flocal.(*Fs)
// Get the object
obj, err := f.NewObject(ctx, filePath)
require.NoError(t, err)
o := obj.(*Object)
features := f.Features()
var hasXID, hasAtime, hasBtime bool
switch runtime.GOOS {
case "darwin", "freebsd", "netbsd", "linux":
hasXID, hasAtime, hasBtime = true, true, true
case "openbsd", "solaris":
hasXID, hasAtime = true, true
case "windows":
hasAtime, hasBtime = true, true
case "plan9", "js":
// nada
default:
t.Errorf("No test cases for OS %q", runtime.GOOS)
}
assert.True(t, features.ReadMetadata)
assert.True(t, features.WriteMetadata)
assert.Equal(t, xattrSupported, features.UserMetadata)
t.Run("Xattr", func(t *testing.T) {
if !xattrSupported {
t.Skip()
}
m, err := o.getXattr()
require.NoError(t, err)
assert.Nil(t, m)
inM := fs.Metadata{
"potato": "chips",
"cabbage": "soup",
}
err = o.setXattr(inM)
require.NoError(t, err)
m, err = o.getXattr()
require.NoError(t, err)
assert.NotNil(t, m)
assert.Equal(t, inM, m)
})
checkTime := func(m fs.Metadata, key string, when time.Time) {
mt, ok := o.parseMetadataTime(m, key)
assert.True(t, ok)
dt := mt.Sub(when)
precision := time.Second
assert.True(t, dt >= -precision && dt <= precision, fmt.Sprintf("%s: dt %v outside +/- precision %v", key, dt, precision))
}
checkInt := func(m fs.Metadata, key string, base int) int {
value, ok := o.parseMetadataInt(m, key, base)
assert.True(t, ok)
return value
}
t.Run("Read", func(t *testing.T) {
m, err := o.Metadata(ctx)
require.NoError(t, err)
assert.NotNil(t, m)
// All OSes have these
checkInt(m, "mode", 8)
checkTime(m, "mtime", when)
assert.Equal(t, len(whenRFC), len(m["mtime"]))
assert.Equal(t, whenRFC[:dayLength], m["mtime"][:dayLength])
if hasAtime {
checkTime(m, "atime", when)
}
if hasBtime {
checkTime(m, "btime", when)
}
if hasXID {
checkInt(m, "uid", 10)
checkInt(m, "gid", 10)
}
})
t.Run("Write", func(t *testing.T) {
newAtimeString := "2011-12-13T14:15:16.999999999Z"
newAtime := fstest.Time(newAtimeString)
newMtimeString := "2011-12-12T14:15:16.999999999Z"
newMtime := fstest.Time(newMtimeString)
newBtimeString := "2011-12-11T14:15:16.999999999Z"
newBtime := fstest.Time(newBtimeString)
newM := fs.Metadata{
"mtime": newMtimeString,
"atime": newAtimeString,
"btime": newBtimeString,
// Can't test uid, gid without being root
"mode": "0767",
"potato": "wedges",
}
err := o.writeMetadata(newM)
require.NoError(t, err)
m, err := o.Metadata(ctx)
require.NoError(t, err)
assert.NotNil(t, m)
mode := checkInt(m, "mode", 8)
if runtime.GOOS != "windows" {
assert.Equal(t, 0767, mode&0777, fmt.Sprintf("mode wrong - expecting 0767 got 0%o", mode&0777))
}
checkTime(m, "mtime", newMtime)
if hasAtime {
checkTime(m, "atime", newAtime)
}
if haveSetBTime {
checkTime(m, "btime", newBtime)
}
if xattrSupported {
assert.Equal(t, "wedges", m["potato"])
}
})
}
func TestFilter(t *testing.T) {
ctx := context.Background()
r := fstest.NewRun(t)
when := time.Now()
r.WriteFile("included", "included file", when)
r.WriteFile("excluded", "excluded file", when)
f := r.Flocal.(*Fs)
// Check set up for filtering
assert.True(t, f.Features().FilterAware)
// Add a filter
ctx, fi := filter.AddConfig(ctx)
require.NoError(t, fi.AddRule("+ included"))
require.NoError(t, fi.AddRule("- *"))
// Check listing without use filter flag
entries, err := f.List(ctx, "")
require.NoError(t, err)
sort.Sort(entries)
require.Equal(t, "[excluded included]", fmt.Sprint(entries))
// Add user filter flag
ctx = filter.SetUseFilter(ctx, true)
// Check listing with use filter flag
entries, err = f.List(ctx, "")
require.NoError(t, err)
sort.Sort(entries)
require.Equal(t, "[included]", fmt.Sprint(entries))
}
func TestFilterSymlink(t *testing.T) {
ctx := context.Background()
r := fstest.NewRun(t)
defer r.Finalise()
when := time.Now()
f := r.Flocal.(*Fs)
// Create a file, a directory, a symlink to a file, a symlink to a directory and a dangling symlink
r.WriteFile("included.file", "included file", when)
r.WriteFile("included.dir/included.sub.file", "included sub file", when)
require.NoError(t, os.Symlink("included.file", filepath.Join(r.LocalName, "included.file.link")))
require.NoError(t, os.Symlink("included.dir", filepath.Join(r.LocalName, "included.dir.link")))
require.NoError(t, os.Symlink("dangling", filepath.Join(r.LocalName, "dangling.link")))
// Set fs into "-L" mode
f.opt.FollowSymlinks = true
f.opt.TranslateSymlinks = false
f.lstat = os.Stat
// Set fs into "-l" mode
// f.opt.FollowSymlinks = false
// f.opt.TranslateSymlinks = true
// f.lstat = os.Lstat
// Check set up for filtering
assert.True(t, f.Features().FilterAware)
// Reset global error count
accounting.Stats(ctx).ResetErrors()
assert.Equal(t, int64(0), accounting.Stats(ctx).GetErrors(), "global errors found")
// Add a filter
ctx, fi := filter.AddConfig(ctx)
require.NoError(t, fi.AddRule("+ included.file"))
require.NoError(t, fi.AddRule("+ included.file.link"))
require.NoError(t, fi.AddRule("+ included.dir/**"))
require.NoError(t, fi.AddRule("+ included.dir.link/**"))
require.NoError(t, fi.AddRule("- *"))
// Check listing without use filter flag
entries, err := f.List(ctx, "")
require.NoError(t, err)
// Check 1 global errors one for each dangling symlink
assert.Equal(t, int64(1), accounting.Stats(ctx).GetErrors(), "global errors found")
accounting.Stats(ctx).ResetErrors()
sort.Sort(entries)
require.Equal(t, "[included.dir included.dir.link included.file included.file.link]", fmt.Sprint(entries))
// Add user filter flag
ctx = filter.SetUseFilter(ctx, true)
// Check listing with use filter flag
entries, err = f.List(ctx, "")
require.NoError(t, err)
assert.Equal(t, int64(0), accounting.Stats(ctx).GetErrors(), "global errors found")
sort.Sort(entries)
require.Equal(t, "[included.dir included.dir.link included.file included.file.link]", fmt.Sprint(entries))
// Check listing through a symlink still works
entries, err = f.List(ctx, "included.dir")
require.NoError(t, err)
assert.Equal(t, int64(0), accounting.Stats(ctx).GetErrors(), "global errors found")
sort.Sort(entries)
require.Equal(t, "[included.dir/included.sub.file]", fmt.Sprint(entries))
}

View File

@@ -11,7 +11,8 @@ import (
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "",
NilObject: (*local.Object)(nil),
RemoteName: "",
NilObject: (*local.Object)(nil),
QuickTestOK: true,
})
}

138
backend/local/metadata.go Normal file
View File

@@ -0,0 +1,138 @@
package local
import (
"fmt"
"os"
"runtime"
"strconv"
"time"
"github.com/rclone/rclone/fs"
)
const metadataTimeFormat = time.RFC3339Nano
// system metadata keys which this backend owns
//
// not all values supported on all OSes
var systemMetadataInfo = map[string]fs.MetadataHelp{
"mode": {
Help: "File type and mode",
Type: "octal, unix style",
Example: "0100664",
},
"uid": {
Help: "User ID of owner",
Type: "decimal number",
Example: "500",
},
"gid": {
Help: "Group ID of owner",
Type: "decimal number",
Example: "500",
},
"rdev": {
Help: "Device ID (if special file)",
Type: "hexadecimal",
Example: "1abc",
},
"atime": {
Help: "Time of last access",
Type: "RFC 3339",
Example: "2006-01-02T15:04:05.999999999Z07:00",
},
"mtime": {
Help: "Time of last modification",
Type: "RFC 3339",
Example: "2006-01-02T15:04:05.999999999Z07:00",
},
"btime": {
Help: "Time of file birth (creation)",
Type: "RFC 3339",
Example: "2006-01-02T15:04:05.999999999Z07:00",
},
}
// parse a time string from metadata with key
func (o *Object) parseMetadataTime(m fs.Metadata, key string) (t time.Time, ok bool) {
value, ok := m[key]
if ok {
var err error
t, err = time.Parse(metadataTimeFormat, value)
if err != nil {
fs.Debugf(o, "failed to parse metadata %s: %q: %v", key, value, err)
ok = false
}
}
return t, ok
}
// parse am int from metadata with key and base
func (o *Object) parseMetadataInt(m fs.Metadata, key string, base int) (result int, ok bool) {
value, ok := m[key]
if ok {
var err error
result64, err := strconv.ParseInt(value, base, 64)
if err != nil {
fs.Debugf(o, "failed to parse metadata %s: %q: %v", key, value, err)
ok = false
}
result = int(result64)
}
return result, ok
}
// Write the metadata into the file
//
// It isn't possible to set the ctime and btime under Unix
func (o *Object) writeMetadataToFile(m fs.Metadata) (outErr error) {
var err error
atime, atimeOK := o.parseMetadataTime(m, "atime")
mtime, mtimeOK := o.parseMetadataTime(m, "mtime")
btime, btimeOK := o.parseMetadataTime(m, "btime")
if atimeOK || mtimeOK {
if atimeOK && !mtimeOK {
mtime = atime
}
if !atimeOK && mtimeOK {
atime = mtime
}
err = o.setTimes(atime, mtime)
if err != nil {
outErr = fmt.Errorf("failed to set times: %w", err)
}
}
if haveSetBTime {
if btimeOK {
err = setBTime(o.path, btime)
if err != nil {
outErr = fmt.Errorf("failed to set birth (creation) time: %w", err)
}
}
}
uid, hasUID := o.parseMetadataInt(m, "uid", 10)
gid, hasGID := o.parseMetadataInt(m, "gid", 10)
if hasUID {
// FIXME should read UID and GID of current user and only attempt to set it if different
if !hasGID {
gid = uid
}
if runtime.GOOS == "windows" || runtime.GOOS == "plan9" {
fs.Debugf(o, "Ignoring request to set ownership %o.%o on this OS", gid, uid)
} else {
err = os.Chown(o.path, uid, gid)
if err != nil {
outErr = fmt.Errorf("failed to change ownership: %w", err)
}
}
}
mode, hasMode := o.parseMetadataInt(m, "mode", 8)
if hasMode {
err = os.Chmod(o.path, os.FileMode(mode))
if err != nil {
outErr = fmt.Errorf("failed to change permissions: %w", err)
}
}
// FIXME not parsing rdev yet
return outErr
}

View File

@@ -0,0 +1,38 @@
//go:build darwin || freebsd || netbsd
// +build darwin freebsd netbsd
package local
import (
"fmt"
"syscall"
"time"
"github.com/rclone/rclone/fs"
)
// Read the metadata from the file into metadata where possible
func (o *Object) readMetadataFromFile(m *fs.Metadata) (err error) {
info, err := o.fs.lstat(o.path)
if err != nil {
return err
}
stat, ok := info.Sys().(*syscall.Stat_t)
if !ok {
fs.Debugf(o, "didn't return Stat_t as expected")
return nil
}
m.Set("mode", fmt.Sprintf("%0o", stat.Mode))
m.Set("uid", fmt.Sprintf("%d", stat.Uid))
m.Set("gid", fmt.Sprintf("%d", stat.Gid))
if stat.Rdev != 0 {
m.Set("rdev", fmt.Sprintf("%x", stat.Rdev))
}
setTime := func(key string, t syscall.Timespec) {
m.Set(key, time.Unix(t.Unix()).Format(metadataTimeFormat))
}
setTime("atime", stat.Atimespec)
setTime("mtime", stat.Mtimespec)
setTime("btime", stat.Birthtimespec)
return nil
}

View File

@@ -0,0 +1,102 @@
//go:build linux
// +build linux
package local
import (
"fmt"
"sync"
"time"
"github.com/rclone/rclone/fs"
"golang.org/x/sys/unix"
)
var (
statxCheckOnce sync.Once
readMetadataFromFileFn func(o *Object, m *fs.Metadata) (err error)
)
// Read the metadata from the file into metadata where possible
func (o *Object) readMetadataFromFile(m *fs.Metadata) (err error) {
statxCheckOnce.Do(func() {
// Check statx() is available as it was only introduced in kernel 4.11
// If not, fall back to fstatat() which was introduced in 2.6.16 which is guaranteed for all Go versions
var stat unix.Statx_t
if unix.Statx(unix.AT_FDCWD, ".", 0, unix.STATX_ALL, &stat) != unix.ENOSYS {
readMetadataFromFileFn = readMetadataFromFileStatx
} else {
readMetadataFromFileFn = readMetadataFromFileFstatat
}
})
return readMetadataFromFileFn(o, m)
}
// Read the metadata from the file into metadata where possible
func readMetadataFromFileStatx(o *Object, m *fs.Metadata) (err error) {
flags := unix.AT_SYMLINK_NOFOLLOW
if o.fs.opt.FollowSymlinks {
flags = 0
}
var stat unix.Statx_t
// statx() was added to Linux in kernel 4.11
err = unix.Statx(unix.AT_FDCWD, o.path, flags, (0 |
unix.STATX_TYPE | // Want stx_mode & S_IFMT
unix.STATX_MODE | // Want stx_mode & ~S_IFMT
unix.STATX_UID | // Want stx_uid
unix.STATX_GID | // Want stx_gid
unix.STATX_ATIME | // Want stx_atime
unix.STATX_MTIME | // Want stx_mtime
unix.STATX_CTIME | // Want stx_ctime
unix.STATX_BTIME), // Want stx_btime
&stat)
if err != nil {
return err
}
m.Set("mode", fmt.Sprintf("%0o", stat.Mode))
m.Set("uid", fmt.Sprintf("%d", stat.Uid))
m.Set("gid", fmt.Sprintf("%d", stat.Gid))
if stat.Rdev_major != 0 || stat.Rdev_minor != 0 {
m.Set("rdev", fmt.Sprintf("%x", uint64(stat.Rdev_major)<<32|uint64(stat.Rdev_minor)))
}
setTime := func(key string, t unix.StatxTimestamp) {
m.Set(key, time.Unix(t.Sec, int64(t.Nsec)).Format(metadataTimeFormat))
}
setTime("atime", stat.Atime)
setTime("mtime", stat.Mtime)
setTime("btime", stat.Btime)
return nil
}
// Read the metadata from the file into metadata where possible
func readMetadataFromFileFstatat(o *Object, m *fs.Metadata) (err error) {
flags := unix.AT_SYMLINK_NOFOLLOW
if o.fs.opt.FollowSymlinks {
flags = 0
}
var stat unix.Stat_t
// fstatat() was added to Linux in kernel 2.6.16
// Go only supports 2.6.32 or later
err = unix.Fstatat(unix.AT_FDCWD, o.path, &stat, flags)
if err != nil {
return err
}
m.Set("mode", fmt.Sprintf("%0o", stat.Mode))
m.Set("uid", fmt.Sprintf("%d", stat.Uid))
m.Set("gid", fmt.Sprintf("%d", stat.Gid))
if stat.Rdev != 0 {
m.Set("rdev", fmt.Sprintf("%x", stat.Rdev))
}
setTime := func(key string, t unix.Timespec) {
// The types of t.Sec and t.Nsec vary from int32 to int64 on
// different Linux architectures so we need to cast them to
// int64 here and hence need to quiet the linter about
// unecessary casts.
//
// nolint: unconvert
m.Set(key, time.Unix(int64(t.Sec), int64(t.Nsec)).Format(metadataTimeFormat))
}
setTime("atime", stat.Atim)
setTime("mtime", stat.Mtim)
return nil
}

View File

@@ -0,0 +1,21 @@
//go:build plan9 || js
// +build plan9 js
package local
import (
"fmt"
"github.com/rclone/rclone/fs"
)
// Read the metadata from the file into metadata where possible
func (o *Object) readMetadataFromFile(m *fs.Metadata) (err error) {
info, err := o.fs.lstat(o.path)
if err != nil {
return err
}
m.Set("mode", fmt.Sprintf("%0o", info.Mode()))
m.Set("mtime", info.ModTime().Format(metadataTimeFormat))
return nil
}

View File

@@ -0,0 +1,37 @@
//go:build openbsd || solaris
// +build openbsd solaris
package local
import (
"fmt"
"syscall"
"time"
"github.com/rclone/rclone/fs"
)
// Read the metadata from the file into metadata where possible
func (o *Object) readMetadataFromFile(m *fs.Metadata) (err error) {
info, err := o.fs.lstat(o.path)
if err != nil {
return err
}
stat, ok := info.Sys().(*syscall.Stat_t)
if !ok {
fs.Debugf(o, "didn't return Stat_t as expected")
return nil
}
m.Set("mode", fmt.Sprintf("%0o", stat.Mode))
m.Set("uid", fmt.Sprintf("%d", stat.Uid))
m.Set("gid", fmt.Sprintf("%d", stat.Gid))
if stat.Rdev != 0 {
m.Set("rdev", fmt.Sprintf("%x", stat.Rdev))
}
setTime := func(key string, t syscall.Timespec) {
m.Set(key, time.Unix(t.Unix()).Format(metadataTimeFormat))
}
setTime("atime", stat.Atim)
setTime("mtime", stat.Mtim)
return nil
}

Some files were not shown because too many files have changed in this diff Show More