1
0
mirror of https://github.com/rclone/rclone.git synced 2025-12-06 00:03:32 +00:00

Compare commits

..

589 Commits

Author SHA1 Message Date
Nick Craig-Wood
583eee635f Version v1.64.1 2023-10-17 16:50:14 +01:00
Nick Craig-Wood
ba836e729e mount: fix automount not detecting drive is ready
With automount the target mount drive appears twice in /proc/self/mountinfo.

    379 27 0:70 / /mnt/rclone rw,relatime shared:433 - autofs systemd-1 rw,fd=57,...
    566 379 0:90 / /mnt/rclone rw,nosuid,nodev,relatime shared:488 - fuse.rclone remote: rw,...

Before this fix we only looked for the mount once in
/proc/self/mountinfo. It finds the automount line and since this
doesn't have fs type rclone it concludes the mount isn't ready yet.

This patch makes rclone look through all the mounts and if any of them
have fs type rclone it concludes the mount is ready.

See: https://forum.rclone.org/t/systemd-mount-works-but-automount-does-not/42287/
2023-10-16 12:14:34 +01:00
Nick Craig-Wood
a6db2e7320 serve sftp: return not supported error for not supported commands
Before this change, if a hardlink command was issued, rclone would
just ignore it and not return an error.

This changes any unknown operations (including hardlink) to return an
unsupported error.
2023-10-16 12:10:02 +01:00
Nick Craig-Wood
31486aa7e3 b2: fix chunked streaming uploads
Streaming uploads are used by rclone rcat and rclone mount
--vfs-cache-mode off.

After the multipart chunker refactor the multipart chunked streaming
upload was accidentally mixing the first and the second parts up which
was causing corrupted uploads.

This was caused by a simple off by one error in the refactoring where
we went from 1 based part number counting to 0 based part number
counting.

Fixing this revealed that the metadata wasn't being re-read for the
copied object either.

This fixes both of those issues and adds an integration tests so it
won't happen again.

Fixes #7367
2023-10-14 13:06:08 +01:00
Nick Craig-Wood
d24e5bc7e4 build: upgrade golang.org/x/net to v0.17.0 to fix HTTP/2 rapid reset
Vulnerability1: GO-2023-2102

HTTP/2 rapid reset can cause excessive work in net/http

More info: https://pkg.go.dev/vuln/GO-2023-2102
2023-10-12 20:08:30 +01:00
Nick Craig-Wood
077c1a0f57 b2: fix server side copies greater than 4GB
After the multipart chunker refactor the multipart chunked server side
copy was accidentally sending one part too many. The last part was 0
length which was rejected by b2.

This was caused by a simple off by one error in the refactoring where
we went from 1 based part number counting to 0 based part number
counting.

Fixing this revealed that the metadata wasn't being re-read for the
copied object either.

This fixes both of those issues and adds an integration tests so it
won't happen again.

See: https://forum.rclone.org/t/large-server-side-copy-in-b2-fails-due-to-bad-byte-range/42294
2023-10-12 20:08:24 +01:00
Nick Craig-Wood
3bb82b4dd5 cmd: Make --progress output logs in the same format as without
See: https://forum.rclone.org/t/using-progress-change-dates-from-2023-10-05-to-2023-10-05/42173
2023-10-11 12:13:55 +01:00
Nick Craig-Wood
1591592936 operations: fix error message on delete to have file name - fixes #7355 2023-10-11 12:13:55 +01:00
Vitor Gomes
cc036884d4 operations: fix OpenOptions ignored in copy if operation was a multiThreadCopy 2023-10-11 12:13:55 +01:00
Nick Craig-Wood
f6b9fdf7c6 build: fix docker beta build running out of space
This removes some unused SDKs from the build machine to free some
space up before building. It also adds some lines to show the free
space.
2023-10-11 12:13:55 +01:00
Nick Craig-Wood
c9fe2f75a8 oracleobjectstorage: fix OpenOptions being ignored in uploadMultipart with chunkWriter 2023-10-11 12:13:55 +01:00
Vitor Gomes
340a67c012 s3: fix OpenOptions being ignored in uploadMultipart with chunkWriter 2023-10-11 12:13:55 +01:00
Saleh Dindar
264b3f0c90 vfs: [bugfix] Update dir modification time
A subtle bug where dir modification time is not updated when the dir already exists
in the cache. It is only noticeable when some clients use dir modification time to
invalidate cache.
2023-10-11 12:13:55 +01:00
Nick Craig-Wood
a7978cea56 operations: close file in TestUploadFile test so it can be deleted on Windows 2023-10-11 12:13:55 +01:00
Nick Craig-Wood
bebd82c586 b2: reduce default --b2-upload-concurrency to 4 to reduce memory usage
In v1.63 memory usage in the b2 backend was limited to `--transfers` *
`--b2-chunk-size`

However in v1.64 this was raised to `--transfers` * `--b2-chunk-size`
* `--b2-upload-concurrency`.

The default value for this was accidently set quite high at 16 which
means by default rclone could use up to 6.4GB of memory!

The new default sets a more reasonable (but still high) max memory of 1.6GB.
2023-10-11 12:13:55 +01:00
Nick Craig-Wood
af02c3b2a7 b2: fix locking window when getting mutipart upload URL
Before this change, the lock was held while the upload URL was being
fetched from the server.

This meant that any other threads were blocked from getting upload
URLs unecessarily.

It also increased the potential for deadlock.
2023-10-11 12:13:55 +01:00
Nick Craig-Wood
77dfe5f1fd pacer: fix b2 deadlock by defaulting max connections to unlimited
Before this change, the maximum number of connections was set to 10.

This means that b2 could deadlock while uploading multipart uploads
due to a lock being held longer than it should have been.
2023-10-11 12:13:55 +01:00
Nick Craig-Wood
e9a95a78de s3: fix slice bounds out of range error when listing
In this commit:

5f938fb9ed s3: fix "Entry doesn't belong in directory" errors when using directory markers

We checked that the remote has the prefix and then changed the remote
before removing the prefix. This sometimes causes:

    panic: runtime error: slice bounds out of range [56:55]

The fix is to do the modification of the remote after removing the
prefix.

See: https://forum.rclone.org/t/cryptcheck-panic-runtime-error-slice-bounds-out-of-range/41977
2023-10-11 12:13:55 +01:00
Nick Craig-Wood
82ca5295f4 docs: fix backend doc generator to not output duplicate config names
This was always the intention, it was just implemented wrong.

This shortens the s3 docs by 1369 bringing them down to half the size
just about.

Fixes #7325
2023-10-11 12:13:55 +01:00
Dimitri Papadopoulos Orfanos
9d8a40b813 docs: fix typos found by codespell in docs and code comments 2023-10-11 12:13:55 +01:00
Nick Craig-Wood
12d80c5219 onedrive: fix the configurator to allow /teams/ID in the config
See: https://forum.rclone.org/t/sharepoint-to-google/41548/
2023-10-11 12:08:40 +01:00
Nick Craig-Wood
038a87c569 lsjson: make sure we set the global metadata flag too 2023-10-11 12:08:40 +01:00
Nick Craig-Wood
3ef97993ad b2: fix multipart upload: corrupted on transfer: sizes differ XXX vs 0
Before this change the b2 backend wasn't writing the metadata to the
object properly after a multipart upload.

The symptom of this was that sometimes it would give the error:

    corrupted on transfer: sizes differ XXX vs 0

This was fixed by returning the metadata in the chunk writer and setting it in Update.

See: https://forum.rclone.org/t/multipart-upload-to-b2-sometimes-failing-with-corrupted-on-transfer-sizes-differ/41829
2023-10-11 12:08:40 +01:00
Nick Craig-Wood
04bba67cd5 azureblob: fix "fatal error: concurrent map writes"
Before this change, the metadata map could be accessed from multiple
goroutines at once, sometimes causing this error.

This fix adds a global mutex for adjusting the metadata map to make
all accesses safe.

See: https://forum.rclone.org/t/azure-blob-storage-with-vfs-cache-concurrent-map-writes-exception/41686
2023-10-11 12:08:40 +01:00
dependabot[bot]
29dd29b9f3 build(deps): bump docker/setup-qemu-action from 2 to 3
Bumps [docker/setup-qemu-action](https://github.com/docker/setup-qemu-action) from 2 to 3.
- [Release notes](https://github.com/docker/setup-qemu-action/releases)
- [Commits](https://github.com/docker/setup-qemu-action/compare/v2...v3)

---
updated-dependencies:
- dependency-name: docker/setup-qemu-action
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
2023-10-11 12:08:40 +01:00
dependabot[bot]
532248352b build(deps): bump docker/setup-buildx-action from 2 to 3
Bumps [docker/setup-buildx-action](https://github.com/docker/setup-buildx-action) from 2 to 3.
- [Release notes](https://github.com/docker/setup-buildx-action/releases)
- [Commits](https://github.com/docker/setup-buildx-action/compare/v2...v3)

---
updated-dependencies:
- dependency-name: docker/setup-buildx-action
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
2023-10-11 12:08:40 +01:00
Kaloyan Raev
ab803942de storj: update storj.io/uplink to v1.12.0
The improved upload logic is active by default in uplink v1.12.0, so the
`testuplink.WithConcurrentSegmentUploadsDefaultConfig(ctx)` is not
required anymore.

See https://github.com/rclone/rclone/pull/7198
2023-10-11 12:08:40 +01:00
Nick Craig-Wood
f933e80258 docs: add notes on how to update the website between releases 2023-10-11 12:08:40 +01:00
Nick Craig-Wood
1c6f0101a5 docs: remove minio sponsor box for the moment 2023-10-11 12:08:40 +01:00
Nick Craig-Wood
c6f161de90 docs: update Storj partner link 2023-10-11 12:08:40 +01:00
Herby Gillot
bdcf7fe28c docs: add MacPorts install info
https://ports.macports.org/port/rclone/
2023-10-11 12:08:40 +01:00
dependabot[bot]
776dc47eb8 build(deps): bump docker/metadata-action from 4 to 5
Bumps [docker/metadata-action](https://github.com/docker/metadata-action) from 4 to 5.
- [Release notes](https://github.com/docker/metadata-action/releases)
- [Upgrade guide](https://github.com/docker/metadata-action/blob/master/UPGRADE.md)
- [Commits](https://github.com/docker/metadata-action/compare/v4...v5)

---
updated-dependencies:
- dependency-name: docker/metadata-action
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
2023-10-11 12:08:40 +01:00
dependabot[bot]
167046e21a build(deps): bump docker/login-action from 2 to 3
Bumps [docker/login-action](https://github.com/docker/login-action) from 2 to 3.
- [Release notes](https://github.com/docker/login-action/releases)
- [Commits](https://github.com/docker/login-action/compare/v2...v3)

---
updated-dependencies:
- dependency-name: docker/login-action
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
2023-10-11 12:08:40 +01:00
dependabot[bot]
98d50d545a build(deps): bump docker/build-push-action from 4 to 5
Bumps [docker/build-push-action](https://github.com/docker/build-push-action) from 4 to 5.
- [Release notes](https://github.com/docker/build-push-action/releases)
- [Commits](https://github.com/docker/build-push-action/compare/v4...v5)

---
updated-dependencies:
- dependency-name: docker/build-push-action
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
2023-10-11 12:08:40 +01:00
Manoj Ghosh
48242c5357 fix overview of oracle object storage as it supports multithreaded 2023-10-11 12:08:40 +01:00
Pat Patterson
e437e6c209 operations: ensure concurrency is no greater than the number of chunks - fixes #7299 2023-10-11 12:08:40 +01:00
Nick Craig-Wood
b813a01718 Start v1.64.1-DEV development 2023-10-11 11:55:24 +01:00
Nick Craig-Wood
77f7bb08af Version v1.64.0 2023-09-11 15:59:44 +01:00
Nick Craig-Wood
a5a61f4874 protondrive: make cached keys rclone style and not show with rclone config redacted 2023-09-11 15:57:08 +01:00
Nick Craig-Wood
e8879f3e77 docs: document release signing and verification
Fixes #7257
2023-09-11 12:28:23 +01:00
Nick Craig-Wood
2a6675cffd docs: fix typo in rc docs - fixes #7287 2023-09-11 09:34:22 +01:00
Nick Craig-Wood
fa4d171f62 protondrive: complete docs with all references to Proton Drive 2023-09-10 11:57:39 +01:00
Nick Craig-Wood
d4d530bd8e drive: add --drive-fast-list-bug-fix to control ListR bug workaround
See: https://forum.rclone.org/t/how-to-list-empty-directories-recursively/40995/12
2023-09-09 17:46:03 +01:00
Nick Craig-Wood
f4b011e4e4 s3: add rclone backend restore-status command
This command shows the restore status of objects being retrieved from GLACIER.

See: https://forum.rclone.org/t/aws-s3-glacier-monitor-restore-status-command-for-glacier-restoring-process/41373/7
2023-09-09 17:44:36 +01:00
Nick Craig-Wood
d80890bf32 Add Drew Stinnett to contributors 2023-09-09 17:44:36 +01:00
Nick Craig-Wood
39392d70dd Add David Pedersen to contributors 2023-09-09 17:44:36 +01:00
Drew Stinnett
643386f026 rc: Add operations/settier to API 2023-09-09 17:41:02 +01:00
Chun-Hung Tseng
ed755bf04f protondrive: implement two-password mode (#7279) 2023-09-08 22:54:46 +02:00
David Pedersen
071c3f28e5 vfs: Update parent directory modtimes on vfs actions
This isn't written back to the storage so might change on remount but
makes the VFS just a little more POSIX compatible.
2023-09-08 17:19:52 +01:00
Nick Craig-Wood
7453b7d5f3 hdfs: fix retry "replication in progress" errors when uploading
Before this change uploaded files could return the error "replication
in progress".

This error is harmless though and means the Close should be retried
which is what this patch does.
2023-09-08 15:35:50 +01:00
Nick Craig-Wood
c9350149d8 hdfs: fix uploading to the wrong object on Update with overriden remote name
In this commit we discovered a problem with objects being uploaded to
the incorrect object name. It added an integration test for the
problem.

65b2e378e0 drive: fix incorrect remote after Update on object

This test was tripped by the hdfs backend and this patch fixes the
problem.
2023-09-08 15:35:50 +01:00
Nick Craig-Wood
08789a5815 test_all: remove filefabric from integration tests
The filefabric test server doesn't seem to be working at all so remove
it from the integration tests pending revitalization.
2023-09-08 15:35:50 +01:00
Nick Craig-Wood
4037af9c1a Add Oksana and Volodymyr Kit to contributors 2023-09-08 15:35:50 +01:00
Oksana
628ff8e524 quatrix: add backend to support Quatrix
Co-authored-by: Volodymyr Kit <v.kit@maytech.net>
2023-09-08 14:31:29 +01:00
Chun-Hung Tseng
578c75cb1e protondrive: fix signature verification logic by accounting for legacy signing scheme (#7278) 2023-09-08 16:00:34 +08:00
Nick Craig-Wood
63ab250817 vfs: add --vfs-cache-min-free-space to control minimum free space on the disk containing the cache
See: https://forum.rclone.org/t/rclone-fails-to-control-disk-usage-and-its-filling-the-disk-to-100/41494/
2023-09-07 15:57:45 +01:00
Nick Craig-Wood
39f910a65d rc: add core/du to measure local disk usage 2023-09-07 15:57:45 +01:00
Nick Craig-Wood
0fb36562dd Add lib/diskusage to measure used/free on disks 2023-09-07 15:57:45 +01:00
Nick Craig-Wood
8c25a15a40 Add zjx20 to contributors 2023-09-07 15:57:45 +01:00
zjx20
f5ee16e201 local: rmdir return an error if the path is not a dir 2023-09-07 14:30:08 +01:00
Nick Craig-Wood
2bcbed30bd s3: implement backend set command to update running config 2023-09-07 12:26:48 +01:00
Chun-Hung Tseng
5026a9171d protondrive: improves 2fa and draft error messages (#7280) 2023-09-07 01:50:28 +08:00
Nick Craig-Wood
b750c50bfd zoho: remove Range requests workarounds to fix integration tests
Zoho are now responding to Range requests properly. The remnants of
our old workaround was breaking the integration tests so this removes
them.
2023-09-05 18:21:15 +01:00
Nick Craig-Wood
535acd0483 fstests: fix PublicLink failing on storj
Storj requires a minimum duration of 1 minute for the link expiry so
increase what we are asking for from 1 minute to 2 minutes.
2023-09-05 18:01:37 +01:00
Nick Craig-Wood
db37b3ef9e opendrive: fix List on a just deleted and remade directory
Sometimes opendrive reports "403 Folder is already deleted" on
directories which should exist.

This might be a bug in opendrive or in rclone however we work-around
here sufficient to get the tests passing.
2023-09-05 17:59:03 +01:00
Nick Craig-Wood
257607ab3d operations: fix TestCopyFileMaxTransfer test to not be quite so fussy 2023-09-05 17:42:17 +01:00
Nick Craig-Wood
3ea1c5c4d2 compress: fix ChangeNotify
ChangeNotify has been broken on the compress backend for a long time!

Before this change it was wrapping the file names received rather than
unwrapping them to discover the original names.

It is likely ChangeNotify was working adequately though for users as
the VFS just uses the directories rather than the file names.
2023-09-05 17:22:36 +01:00
Nick Craig-Wood
bd23ea028e azureblob: fix purging with directory markers 2023-09-05 17:07:44 +01:00
Nick Craig-Wood
c58d4fe939 test_all: ignore Rmdirs test failure on b2 as it fails because of versions 2023-09-05 15:47:14 +01:00
nielash
ddc7059a73 Add @nielash as bisync maintainer 2023-09-05 04:05:39 -04:00
dependabot[bot]
2677c43f26 build(deps): bump actions/checkout from 3 to 4
Bumps [actions/checkout](https://github.com/actions/checkout) from 3 to 4.
- [Release notes](https://github.com/actions/checkout/releases)
- [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md)
- [Commits](https://github.com/actions/checkout/compare/v3...v4)

---
updated-dependencies:
- dependency-name: actions/checkout
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
2023-09-05 08:55:32 +01:00
nielash
48ab67f090 bisync: fix dryRun rc parameter being ignored
Before this change, bisync ignored the dryRun parameter (only when specified
via the rc.)

This change fixes the issue, so that the dryRun rc parameter is equivalent to
the --dry-run flag.
2023-09-05 08:53:58 +01:00
nielash
089df7d977 bisync: add rc parameters for new flags
Added rc support for the flags recently introduced in #6971.

createEmptySrcDirs
ignoreListingChecksum
resilient
2023-09-05 08:53:58 +01:00
Nick Craig-Wood
4fbe0652c9 compress: fix integration tests by adding missing OpenChunkWriter exclude 2023-09-04 19:26:14 +01:00
Nick Craig-Wood
47665dad07 cache: fix integration tests by adding missing OpenChunkWriter exclude 2023-09-04 19:26:14 +01:00
eNV25
ad724463a5 cmd: refactor and use sysdnotify in more commands
* cmd: refactor and use sysdnotify in more commands

Fixes #5117
2023-09-04 16:32:04 +01:00
Nick Craig-Wood
6afd7088d3 box: add --box-impersonate to impersonate a user ID - fixes #7267 2023-09-04 12:09:54 +01:00
Nick Craig-Wood
b33140ddeb union: add :writback to act as a simple cache
This adds a :writeback tag to upstreams. If set on a single upstream
then it writes back objects not found into that upstream.

Fixes #6934
2023-09-04 12:03:26 +01:00
Nick Craig-Wood
b1c0ae5e7d azureblob: fix creation of directory markers
This also fixes the integration tests which is why we didn't notice this before!
2023-09-03 18:09:31 +01:00
Nick Craig-Wood
40bcc7a90b fstest: fix sftp ssh integration tests
This adds a private and public key to the SFTP SSH test so that it
works when it doesn't have access to my ssh agent!
2023-09-03 15:23:12 +01:00
Nick Craig-Wood
be17f1523a b2: fix ChunkWriter size return 2023-09-03 13:53:11 +01:00
Nick Craig-Wood
bb58040d9c s3: fix multpart streaming uploads of 0 length files 2023-09-03 12:37:20 +01:00
Nick Craig-Wood
2db0e23584 backends: change OpenChunkWriter interface to allow backend concurrency override
Before this change the concurrency used for an upload was rather
inconsistent.

- if size below `--backend-upload-cutoff` (default 200M) do single part upload.

- if size below `--multi-thread-cutoff` (default 256M) or using streaming
  uploads (eg `rclone rcat) do multipart upload using
  `--backend-upload-concurrency` to set the concurrency used by the uploader.

- otherwise do multipart upload using `--multi-thread-streams` to set the
  concurrency.

This change makes the default for the concurrency used be the
`--backend-upload-concurrency`. If `--multi-thread-streams` is set and larger
than the `--backend-upload-concurrency` then that will be used instead.

This means that if the user sets `--backend-upload-concurrency` then it will be
obeyed for all multipart/multi-thread transfers and the user can override them
all with `--multi-thread-streams`.

See: #7056
2023-09-03 11:47:05 +01:00
Nick Craig-Wood
a7337b0a95 Add Alishan Ladhani to contributors 2023-09-03 11:47:05 +01:00
Alishan Ladhani
7821cb884d b2: fix rclone link when object path contains special characters
Before this change, b2 would return an error when opening a link
generated by `rclone link`. The following error occurs when the object
path contains an ampersand that is not percent encoded:

{
  "code": "bad_request",
  "message": "Bad character in percent-encoded string: 38 (0x26)",
  "status": 400
}
2023-09-02 18:31:14 +01:00
Nick Craig-Wood
85c29e3629 serve dlna: fix MIME type if backend can't identify it
If the server returns the MIME type as application/octet-stream we
assume it doesn't really know what the MIME type. This patch tries
matching the MIME type from the file extension instead in this case.

This enables the use of servers (like OneDrive for Business) which
don't allow the setting of MIME types on upload and have a poor
selection of mime types.

Fixes #7259
2023-09-01 18:09:44 +01:00
Nick Craig-Wood
b7ec75aab6 docs: add Storj as a sponsor 2023-09-01 18:09:44 +01:00
Nick Craig-Wood
38309f2df2 Add Bjørn Smith to contributors 2023-09-01 18:09:44 +01:00
NoLooseEnds
7487d34c33 jotta: added Telia Sky whitelabel (Norway)
Duplicated Telia Cloud (Sweden) changed the URLs and added teliase and teliano
(instead of just telia) to differentiate.

See: #5153 #5016
2023-09-01 14:55:32 +01:00
kapitainsky
e45cb4fc75 docs: single character remote names in Windows
Clarify how single character remote names are interpreted in Windows (as drive letters)

See: https://forum.rclone.org/t/issue-with-single-character-configuration-on-windows-with-rclone/
2023-09-01 11:00:14 +01:00
Bjørn Smith
21008b4cd5 docs: sftp: add note regarding format of server_command
Elaborate exactly how server_command should be used in the configuration file
2023-09-01 10:57:42 +01:00
Nick Craig-Wood
cffe85e6c5 fshttp: fix --bind 0.0.0.0 allowing IPv6 and --bind ::0 allowing IPv4
Due to a bug/misfeature in the go standard library as described here:
https://github.com/golang/go/issues/48723 the go standard library
binds to both IPv4 and IPv6 when passed 0.0.0.0 or ::0.

This patch detects the bind address and forces the correct IP
protocol.

Fixes #6124
Fixes #6244
See: https://forum.rclone.org/t/issues-with-bind-0-0-0-0-and-onedrive-getting-etag-mismatch-when-using-ipv6/41379/
2023-09-01 10:47:39 +01:00
Nick Craig-Wood
d12a92eac9 box: fix unhelpful decoding of error messages into decimal numbers
Before this change the box backend could make errors like

    Error "not_found" (404): On-Behalf-Of User not found ([123 34 105 110
    118 97 108 105 100 95 117 115 101 114 95 105 100 34 58 123 34 105 100
    34 58 34 48 48 48 48 48 48 48 48 48 48 48 34 125 125])

This fixes it to produce this instead

    Error "not_found" (404): On-Behalf-Of User not found ({"invalid_user_id":{"id":"00000000000"}})
2023-08-31 23:03:27 +01:00
eNV25
11eeaaf792 cmd/ncdu: fix add keybinding to rescan filesystem 2023-08-30 14:29:46 +01:00
David Sze
a603efeaf4 box: add polling support 2023-08-30 09:25:00 +01:00
eNV25
0bd0a992a4 cmd/ncdu: add keybinding to rescan filesystem
Fixes #7255
2023-08-30 09:05:58 +01:00
Justin Hellings
82c8d78a44 docs: may not -> might not, to remove ambiguity
"may not" can be interpreted as "is not allowed".
Replaced with "might not" in both cases to remove ambiguity.
2023-08-29 15:10:50 +01:00
Nick Craig-Wood
a83fec756b build: fix lint errors when re-enabling revive var-naming 2023-08-29 13:03:49 +01:00
Nick Craig-Wood
e953598987 build: fix lint errors when re-enabling revive exported & package-comments 2023-08-29 13:03:13 +01:00
Nick Craig-Wood
feaa20d885 build: re-enable revive linters
In this commit:

75dfdbf211 ci: revert revive settings back to fix lint

We accidentally disabled all the revive linters. Unfortunately setting
the rules clears the default set of rules so it is necessary to
mention all rules that we need.
2023-08-29 13:01:15 +01:00
Nick Craig-Wood
967fc6d7f4 lib/multipart: fix accounting for multipart transfers
This change makes sure the accouting is done when bytes are taken out
of the buffer rather than put in.

See: https://forum.rclone.org/t/improve-transfer-stats-calculation-for-multipart-uploads/41172
2023-08-27 23:10:58 +01:00
Nick Craig-Wood
b95bda1e92 s3: fix purging of root directory with --s3-directory-markers - fixes #7247 2023-08-25 17:39:16 +01:00
Nick Craig-Wood
9c14562850 fstests: add backend integration test for purging root directory #7247 2023-08-25 17:39:07 +01:00
Nick Craig-Wood
f992742404 s3: fix accounting for multpart uploads 2023-08-25 16:31:31 +01:00
Nick Craig-Wood
f2467d07aa oracleobjectstorage: fix accounting for multpart uploads 2023-08-25 16:31:31 +01:00
Nick Craig-Wood
d69cdb79f7 b2: fix accounting for multpart uploads 2023-08-25 16:31:31 +01:00
Nick Craig-Wood
df5d92d709 operations: fix terminology in multi-thread copy 2023-08-25 16:31:31 +01:00
Nick Craig-Wood
1b5b36523b operations: fix accounting for multi-thread transfers
This stops the accounting moving in large chunks and gets it to move
as the data is read out of the buffer.
2023-08-24 17:51:36 +01:00
Nick Craig-Wood
2f424ceecf operations: don't buffer when a backend implements OpenWriterAt
In this case we don't seek on errors so no need for seeking.
2023-08-24 17:51:36 +01:00
Nick Craig-Wood
bc986b44b2 lib/pool: add DelayAccounting() to fix accounting when reading hashes 2023-08-24 16:42:09 +01:00
Nick Craig-Wood
f4b1a51af6 lib/pool: add SetAccounting to RW 2023-08-24 15:28:40 +01:00
Manoj Ghosh
25703ad20e oracleobjectstorage: implement OpenChunkWriter and multi-thread uploads #7056 2023-08-24 12:39:28 +01:00
Nick Craig-Wood
ab803d1278 b2: implement OpenChunkWriter and multi-thread uploads #7056
This implements the OpenChunkWriter interface for b2 which
enables multi-thread uploads.

This makes the memory controls of the s3 backend inoperative; they are
replaced with the global ones.

    --b2-memory-pool-flush-time
    --b2-memory-pool-use-mmap

By using the buffered reader this fixes excessive memory use when
uploading large files as it will share memory pages between all
readers.
2023-08-24 12:39:27 +01:00
Nick Craig-Wood
0427177857 azureblob: implement OpenChunkWriter and multi-thread uploads #7056
This implements the OpenChunkWriter interface for azureblob which
enables multi-thread uploads.

This makes the memory controls of the s3 backend inoperative; they are
replaced with the global ones.

    --azureblob-memory-pool-flush-time
    --azureblob-memory-pool-use-mmap

By using the buffered reader this fixes excessive memory use when
uploading large files as it will share memory pages between all
readers.
2023-08-24 12:39:27 +01:00
Nick Craig-Wood
3dfcfc2caa operations: document multi-thread copy and tweak defaults 2023-08-24 12:39:27 +01:00
Nick Craig-Wood
d4cff1ae19 operations: add abort on exit to multithread copy 2023-08-24 12:39:27 +01:00
Nick Craig-Wood
f5753369e4 operations: multipart: don't buffer transfers to local disk #7056 2023-08-24 12:39:27 +01:00
Nick Craig-Wood
4c76fac594 s3: factor generic multipart upload into lib/multipart #7056
This makes the memory controls of the s3 backend inoperative and
replaced with the global ones.

    --s3-memory-pool-flush-time
    --s3-memory-pool-use-mmap

By using the buffered reader this fixes excessive memory use when
uploading large files as it will share memory pages between all
readers.

Fixes #7141
2023-08-24 12:39:27 +01:00
Nick Craig-Wood
0d0bcdac31 fs: add context.Ctx to ChunkWriter methods
WriteChunk in particular needs a different context from that which
OpenChunkWriter was used with so add it to all the methods.
2023-08-24 12:39:27 +01:00
Nick Craig-Wood
f3bd02f0ef operations: fix and tidy multithread code
- fix docs and error messages for multithread
- use sync/errgroup built in concurrency limiting
- re-arrange multithread code
- don't continue multi-thread uploads if one part fails
2023-08-24 12:39:27 +01:00
Nick Craig-Wood
e6fde67491 s3: fix retry logic, logging and error reporting for chunk upload
- move retries into correct place into lowest level functions
- fix logging and error reporting
2023-08-24 12:39:27 +01:00
Nick Craig-Wood
b4e3332e02 fs: introduces aliases for OpenWriterAtFn and OpenChunkWriterFn 2023-08-24 12:39:27 +01:00
Nick Craig-Wood
0dea83a4aa pool: add page backed reader/writer for multi thread uploads 2023-08-24 12:39:27 +01:00
Nick Craig-Wood
e8f3f98aa0 lib/readers: add NoSeeker to adapt io.Reader to io.ReadSeeker 2023-08-24 12:39:27 +01:00
Nick Craig-Wood
d61328e459 serve ftp: fix race condition when using the auth proxy
In this commit we introduced a race condition when using the auth
proxy.

94a320f23c serve ftp: update to goftp.io/server v2.0.1

This was due to the re-organisation of the upstream library which made
the driver be a singleton rather than per session.

This means that when using the auth proxy we need to keep track of
which VFS to use by based on which FTP user is connected.

This also adjusts the locking so that the methods will run
concurrently.
2023-08-23 15:11:47 +01:00
r-ricci
9844704567 docs: remove contributor's old email 2023-08-23 12:31:48 +01:00
Nick Craig-Wood
94a320f23c serve ftp: update to goftp.io/server v2.0.1 - fixes #7237 2023-08-22 17:24:05 +01:00
Nick Craig-Wood
7fc573db27 serve sftp: fix hash calculations with --vfs-cache-mode full
Before this change uploading files with rclone to:

    rclone serve sftp --vfs-cache-mode full

Would return the error:

    command "md5sum XXX" failed with error: unexpected non file

This patch detects that the file is still in the VFS cache and reads
the MD5SUM from there rather from the remote.

Fixes #7241
2023-08-22 13:18:36 +01:00
Nick Craig-Wood
af95616122 Add Roberto Ricci to contributors 2023-08-22 13:18:29 +01:00
Roberto Ricci
72f9f1e9c0 vfs: make sure struct field is aligned for atomic access 2023-08-22 12:52:13 +01:00
Roberto Ricci
91b8152321 vfs: use atomic types 2023-08-22 12:52:13 +01:00
Roberto Ricci
552b6c47ff lib: use atomic types 2023-08-22 12:52:13 +01:00
Roberto Ricci
01a155fb00 fs: use atomic types 2023-08-22 12:52:13 +01:00
Roberto Ricci
50d0597d56 cmount: use atomic types 2023-08-22 12:52:13 +01:00
Roberto Ricci
123a030441 smb: use atomic types 2023-08-22 12:52:13 +01:00
Roberto Ricci
28ceb323ee sftp: use atomic types 2023-08-22 12:52:13 +01:00
Roberto Ricci
c624dd5c3a seafile: use atomic types 2023-08-22 12:52:13 +01:00
Roberto Ricci
a56c11753a local: use atomic types 2023-08-22 12:52:13 +01:00
Roberto Ricci
4341d472aa filefabric: use atomic types 2023-08-22 12:52:13 +01:00
Roberto Ricci
b6e7148daf box: use atomic types 2023-08-22 12:52:13 +01:00
Roberto Ricci
45458f2cdb union: use atomic types 2023-08-22 12:52:13 +01:00
Nick Craig-Wood
de147b6e54 sftp: fix --sftp-ssh looking for ssh agent - fixes #7235
Before this change if pass was empty this would attempt to connect to
the ssh agent even when using --sftp-ssh.

This patch prevents that.
2023-08-21 17:43:02 +01:00
Nick Craig-Wood
11de137660 sftp: fix spurious warning when using --sftp-ssh
When using --sftp-ssh we were warning about user/host/port even when
they were at their defaults.

See: #7235
2023-08-21 17:43:02 +01:00
Nick Craig-Wood
156c372cd7 sync: fix lockup with --cutoff-mode=soft and --max-duration
Before this change, when using --cutoff-mode=soft and --max-duration
rclone deadlocked when the cutoff limit was reached.

This was because the sync objects Pipe became full and nothing was
emptying it because the cutoff was reached.

This changes the context for putting items into the pipe to be the one
that gets cancelled when the cutoff is reached.

See: https://forum.rclone.org/t/sync-command-hanging-using-cutoff-mode-soft-with-max-duration-time-flags/40866
2023-08-18 17:33:54 +01:00
Nick Craig-Wood
c979cde002 ftp: fix 425 "TLS session of data connection not resumed" errors
As an extra security feature some FTP servers (eg FileZilla) require
that the data connection re-use the same TLS connection as the control
connection. This is a good thing for security.

The message "TLS session of data connection not resumed" means that it
was not done.

The problem turned out to be that rclone was re-using the TLS session
cache between concurrent connections so the resumed TLS data
connection could from any of the control connections.

This patch makes each TLS connection have its own session cache which
should fix the problem.

This also reverts the ftp library to the upstream version which now
contains all of our patches.

Fixes #7234
2023-08-18 14:44:13 +01:00
Nick Craig-Wood
03aab1a123 rmdirs: remove directories concurrently controlled by --checkers
See: https://forum.rclone.org/t/how-to-list-empty-directories-recursively/40995
2023-08-18 12:05:15 +01:00
Nick Craig-Wood
dc803b572c Add hideo aoyama to contributors 2023-08-18 12:05:15 +01:00
Nick Craig-Wood
4d19042a61 Add Jacob Hands to contributors 2023-08-18 12:05:15 +01:00
hideo aoyama
923989d1d7 build: add snap installation
I ( @boukendesho ) have volunteered to maintain the snap package so
this adds it back into the installation instructions.

It will set a `snap` tag visible in `rclone version` so we know where
it came from for support queries.
2023-08-18 11:57:25 +01:00
sitiom
cf65e36cf3 ci: change Winget Releaser job to ubuntu-latest 2023-08-17 11:36:28 +01:00
Jacob Hands
cf5457c2cd fs: Fix transferTime not being set in JSON logs
This was unintentionally broken in 04aa696
2023-08-17 11:19:35 +01:00
Jacob Hands
ea4aa696a5 fs: Don't stop calculating average transfer speed until the operation is complete
Currently, the average transfer speed will stop calculating 1 minute
after the last queued transfer completes. This causes the average to
stop calculating when checking is slow and the transfer queue becomes
empty.

This change will require all checks to complete before stopping the
average speed calculation.
2023-08-16 21:43:24 +01:00
Nick Craig-Wood
34195fd3e8 sync: fix erroneous test in TestSyncOverlapWithFilter
In this commit:

432d5d1e20 operations: fix overlapping check on case insensitive file systems

We introduced a test that makes no sense. This happens to pass without --fast-list and fail with it.

This removes the test.
2023-08-13 11:42:31 +01:00
Nick Craig-Wood
40b8167ab4 Add Vitor Gomes to contributors 2023-08-13 11:42:22 +01:00
Nick Craig-Wood
e365f237f5 Add nielash to contributors 2023-08-13 11:41:24 +01:00
Nick Craig-Wood
7d449572bd Add alexia to contributors 2023-08-13 11:41:24 +01:00
Vitor Gomes
181fecaec3 multithread: refactor multithread operation to use OpenChunkWriter if available #7056
If the feature OpenChunkWriter is not available, multithread tries to create an adapter from OpenWriterAt to OpenChunkWriter.
2023-08-12 17:55:01 +01:00
Vitor Gomes
7701d1d33d config: add "multi-thread-chunk-size" flag #7056 2023-08-12 17:55:01 +01:00
Vitor Gomes
6dd736fbdc s3: refactor MultipartUpload to use OpenChunkWriter and ChunkWriter #7056 2023-08-12 17:55:01 +01:00
Vitor Gomes
f36ca0cd25 features: add new interfaces OpenChunkWriter and ChunkWriter #7056 2023-08-12 17:55:01 +01:00
nielash
9b3b1c7067 bisync: typo corrections & other doc improvements 2023-08-12 17:24:21 +01:00
nielash
0dd0d6a13e bisync: Add support for --create-empty-src-dirs - Fixes #6109
Sync creation and deletion of empty directories.
https://forum.rclone.org/t/bisync-bugs-and-feature-requests/37636#:~:text=3.%20Bisync%20should%20create/delete%20empty%20directories%20as%20sync%20does%2C%20when%20%2D%2Dcreate%2Dempty%2Dsrc%2Ddirs%20is%20passed

Also fixed an issue causing --resync to erroneously delete empty folders and duplicate files unique to Path2
https://forum.rclone.org/t/bisync-bugs-and-feature-requests/37636#:~:text=2.%20%2D%2Dresync%20deletes%20data%2C%20contrary%20to%20docs
2023-08-12 17:24:21 +01:00
nielash
e5bde42303 bisync: Add experimental --resilient mode to allow recovery from self-correctable errors
https://forum.rclone.org/t/bisync-bugs-and-feature-requests/37636#:~:text=2.%20Bisync%20should%20be%20more%20resilient%20to%20self%2Dcorrectable%20errors
2023-08-12 17:24:21 +01:00
nielash
f01a50eb47 bisync: Add new --ignore-listing-checksum flag to distinguish from --ignore-checksum
https://forum.rclone.org/t/bisync-bugs-and-feature-requests/37636#:~:text=6.%20%2D%2Dignore%2Dchecksum%20should%20be%20split%20into%20two%20flags%20for%20separate%20purposes
2023-08-12 17:24:21 +01:00
nielash
5ca61ab705 bisync: equality check before renaming (leave identical files alone)
Improved detection of false positive change conflicts (identical files are now left alone instead of renamed)
https://forum.rclone.org/t/bisync-bugs-and-feature-requests/37636#:~:text=1.%20Identical%20files%20should%20be%20left%20alone%2C%20even%20if%20new/newer/changed%20on%20both%20sides
2023-08-12 17:24:21 +01:00
nielash
4ac4ce6afd bisync: apply filters correctly during deletes
Fixed an issue causing bisync to consider more files than necessary due to overbroad filters during delete operations
https://forum.rclone.org/t/bisync-bugs-and-feature-requests/37636#:~:text=5.%20Bisync%20reads%20files%20in%20excluded%20directories%20during%20delete%20operations
2023-08-12 17:24:21 +01:00
nielash
40a874a0d8 bisync: enforce --check-access during --resync
--check-access is now enforced during --resync, preventing data loss in certain user error scenarios
https://forum.rclone.org/t/bisync-bugs-and-feature-requests/37636#:~:text=%2D%2Dcheck%2Daccess%20doesn%27t%20always%20fail%20when%20it%20should
2023-08-12 17:24:21 +01:00
nielash
f4dd86238d bisync: dry runs no longer commit filter changes
Fixed an issue causing dry runs to inadvertently commit filter changes
https://forum.rclone.org/t/bisync-bugs-and-feature-requests/37636#:~:text=1.%20Dry%20runs%20are%20not%20completely%20dry
2023-08-12 17:24:21 +01:00
nielash
4f1eafb044 gitignore: add .DS_Store and remove *.log
.DS_Store is an irrelevant file created automatically by macOS.

*.log can't be ignored because bisync uses .log files in its test suite.
2023-08-12 17:24:21 +01:00
alexia
20c9e0cab6 fichier: fix error code parsing
This fixes the following error I encountered:

```
2023/08/09 16:18:49 DEBUG : failed parsing fichier error: strconv.Atoi: parsing "#374": invalid syntax
2023/08/09 16:18:49 DEBUG : pacer: low level retry 1/10 (error HTTP error 403 (403 Forbidden) returned body: "{\"status\":\"KO\",\"message\":\"Flood detected: IP Locked #374\"}")
```
2023-08-11 00:47:01 +09:00
Nick Craig-Wood
9c09cf9cf6 build: update to released go1.21 2023-08-09 22:41:19 +01:00
Nick Craig-Wood
3a3af00180 Add antoinetran to contributors 2023-08-09 22:41:19 +01:00
Nick Craig-Wood
281e0c2d62 Add James Braza to contributors 2023-08-09 22:41:19 +01:00
Nick Craig-Wood
25b81b8789 Add Masamune3210 to contributors 2023-08-09 22:41:19 +01:00
Nick Craig-Wood
90fdd97a7b Add Nihaal Sangha to contributors 2023-08-09 22:41:19 +01:00
Chun-Hung Tseng
3c58e0efe0 protondrive: update the information regarding the advance setting enable_caching (#7202) 2023-08-09 16:01:19 +02:00
antoinetran
db744f64f6 docs: clarify --checksum documentation - Fixes #7145 2023-08-09 20:00:46 +09:00
Nick Craig-Wood
480220a84a docs: add some more docs on making your own backend 2023-08-09 20:00:46 +09:00
James Braza
d0362171cf docs: environment variable remote name only supports letters, digits, or underscores 2023-08-09 11:42:04 +01:00
Masamune3210
45887d11f6 docs: local: fix typo 2023-08-09 11:32:45 +01:00
Eng Zer Jun
c4bad5c1bc lib/rest: remove unnecessary nil check
From the Go docs:

  "A `nil` map is equivalent to an empty map. [1]

Therefore, an additional nil check for `opts.ExtraHeaders` before the loop is
unnecessary because `opts.ExtraHeaders` is a `map`.

[1]: https://go.dev/ref/spec#Map_types

Signed-off-by: Eng Zer Jun <engzerjun@gmail.com>
2023-08-09 19:17:42 +09:00
Nihaal Sangha
40de89df73 drive: fix typo in docs 2023-08-05 12:51:51 +01:00
Manoj Ghosh
27f5297e8d oracleobjectstorage: Use rclone's rate limiter in mutipart transfers 2023-08-05 12:09:17 +09:00
Nick Craig-Wood
de185de215 accounting: show server side stats in own lines and not as bytes transferred
Before this change we showed both server side moves and server side
copies as bytes transferred.

This made a nice easy to use stats display, but also caused confusion
for users who saw unrealistic transfer times. It also caused a problem
with --max-transfer and chunker which renames each chunk after
uploading which was counted as a transfer byte.

This patch instead accounts the server side move and copy statistics
as a seperate lines in the stats display which will only appear if
there are any server side moves / copies. This is also output in the
rc.

This gives users something to look at when transfers are running which
was the point of the original change but it now means that transfer
bytes represents data transfers through this rclone instance only.

Fixes #7183
2023-08-05 03:54:01 +01:00
Nick Craig-Wood
d362db2e08 rclone test info: add --check-base32768 flag to check can store all base32768 characters
Fixes #7208
2023-08-05 02:59:58 +01:00
Nick Craig-Wood
db2a49e384 Add Raymond Berger to contributors 2023-08-05 02:59:58 +01:00
Kaloyan Raev
d63fcc6e44 storj: performance improvement for large file uploads
storj.io/uplink v1.11.0 comes with an improved logic for uploading large
files where file segments are uploaded concurrently instead of serially.
This allows to fully utilize the network connection during the entire
upload process.

This change enable the new upload logic.
2023-08-04 17:40:03 +09:00
kapitainsky
4444037f5c docs: box client_id creation
See: https://forum.rclone.org/t/box-getting-403-errors-when-using-chunker-working-fine-without-it/40388/22
2023-08-03 14:51:49 +01:00
Raymond Berger
a555513c26 docs: add missing comma to overview webdav footnote 2023-08-03 14:50:04 +01:00
Nick Craig-Wood
039c260216 build: update to go1.21rc4 2023-08-03 13:53:43 +01:00
Nick Craig-Wood
4577c08e05 Add Julian Lepinski to contributors 2023-08-03 13:53:43 +01:00
Nick Craig-Wood
c9ed691919 docs: add minio as a sponsor 2023-08-03 08:39:15 +01:00
Julian Lepinski
9f96c0d4ea swift: fix HEADing 0-length objects when --swift-no-large-objects set
The Swift backend does not always respect the flag telling it to skip
HEADing zero-length objects. This commit fixes that for ls/lsl/lsf.

Swift returns zero length for dynamic large object files when they're
included in a files lookup, which means that determining their size
requires HEADing each file that returns a size of zero. rclone's
--swift-no-large-objects instructs rclone that no large objects are
present and accordingly rclone should not HEAD files that return zero
length.

When rclone is performing an ls / lsf / lsl type lookup, however, it
continues to HEAD any zero length objects it encounters, even with
this flag set. Accordingly, this change causes rclone to respect the
flag in these situations.

NB: It is worth noting that this will cause rclone to incorrectly
report zero length for any dynamic large objects encountered with the
--swift-no-large-objects flag set.
2023-08-03 08:38:39 +01:00
Nick Craig-Wood
91d095f468 docs: update command docs to new style 2023-08-02 12:53:09 +01:00
Nick Craig-Wood
bff702a6f1 docs: group the global flags and make them appear on command and flags pages
This adds an additional parameter to the creation of each flag. This
specifies one or more flag groups. This **must** be set for global
flags and **must not** be set for local flags.

This causes flags.md to be built with sections to aid comprehension
and it causes the documentation pages for each command (and the
`--help`) to be built showing the flags groups as specified in the
`groups` annotation on the command.

See: https://forum.rclone.org/t/make-docs-for-mortals-not-only-rclone-gurus/39476/
2023-08-02 12:53:09 +01:00
Nick Craig-Wood
a1d6bbd31f Add rclone completion powershell - basic implementation only 2023-08-02 12:53:09 +01:00
Nick Craig-Wood
fb6a9dfbf3 docs: fix rclone config edit docs 2023-08-02 12:53:09 +01:00
Nick Craig-Wood
3f3c5f3ff4 build: remove unused package cmd/serve/http/data
This was superseded by lib/http/template.go
2023-08-02 12:53:09 +01:00
Nick Craig-Wood
89196cb353 Add nielash to contributors 2023-08-02 12:53:09 +01:00
Nick Craig-Wood
9284506b86 Add Zach to contributors 2023-08-02 12:53:09 +01:00
yuudi
88c72d1f4d http: fix webdav OPTIONS response (#6433) 2023-08-01 11:48:41 +09:00
Paul
5e3bf50b2e webdav: nextcloud: fix segment violation in low-level retry
Fix https://github.com/rclone/rclone/issues/7168

Co-authored-by: ncw <nick@craig-wood.com>
Co-authored-by: Paul <devnoname120@gmail.com>
2023-08-01 11:15:33 +09:00
nielash
982f76b4df sftp: support dynamic --sftp-path-override
Before this change, rclone always expected --sftp-path-override to be
the absolute SSH path to remote:path/subpath which effectively made it
unusable for wrapped remotes (for example, when used with a crypt
remote, the user would need to provide the full decrypted path.)

After this change, the old behavior remains the default, but dynamic
paths are now also supported, if the user adds '@' as the first
character of --sftp-path-override. Rclone will ignore the '@' and
treat the rest of the string as the path to the SFTP remote's root.
Rclone will then add any relative subpaths automatically (including
unwrapping/decrypting remotes as necessary).

In other words, the path_override config parameter can now be used to
specify the difference between the SSH and SFTP paths. Once specified
in the config, it is no longer necessary to re-specify for each
command.

See: https://forum.rclone.org/t/sftp-path-override-breaks-on-wrapped-remotes/40025
2023-07-30 03:12:07 +01:00
Zach
347812d1d3 ftp,sftp: add socks_proxy support for SOCKS5 proxies
Fixes #3558
2023-07-30 03:02:08 +01:00
yuudi
f4449440f8 http: CORS should not be send if not set (#6433) 2023-07-29 16:12:31 +09:00
kapitainsky
e66675d346 docs: rclone backend restore 2023-07-29 11:31:16 +09:00
Nick Craig-Wood
45228e2f18 build: update dependencies
This does not update bazil/fuse because it does not build on freebsd

https://github.com/bazil/fuse/issues/295

This partially updates the prometheus library as the latest no longer compiles with plan9

https://github.com/prometheus/procfs/issues/554
2023-07-29 01:57:23 +01:00
Nick Craig-Wood
b866850fdd Add yuudi to contributors 2023-07-29 01:57:23 +01:00
yuudi
5b63b9534f rc: add execute-id for job-id 2023-07-28 18:35:14 +09:00
Nick Craig-Wood
10449c86a4 sftp: add --sftp-ssh to specify an external ssh binary to use
This allows using an external ssh binary instead of the built in ssh
library for making SFTP connections.

This makes another integration test target TestSFTPRcloneSSH:

Fixes #7012
2023-07-28 10:29:02 +01:00
Nick Craig-Wood
26a9a9fed2 Add Niklas Hambüchen to contributors 2023-07-28 10:29:02 +01:00
Chun-Hung Tseng
602e42d334 protondrive: fix a bug in parsing User metadata (#7174) 2023-07-28 11:03:23 +02:00
Niklas Hambüchen
4c5a21703e docs: dropbox: Explain that Teams needs "Full Dropbox" 2023-07-28 17:52:29 +09:00
Nick Craig-Wood
f2ee949eff fichier: implement DirMove
See: https://forum.rclone.org/t/1fichier-rclone-does-not-allow-to-rename-files-and-folders-when-you-mount-a-1fichier-disk-drive/24726/
2023-07-28 01:25:42 +01:00
kapitainsky
3ad255172c docs: b2 versions names caveat 2023-07-28 09:23:34 +09:00
Nick Craig-Wood
29b1751d0e serve webdav: fix error: Expecting fs.Object or fs.Directory, got <nil>
Before this change rclone serve webdav would sometimes give this error

    Expecting fs.Object or fs.Directory, got <nil>

It turns out that when a file is being updated it doesn't have a
DirEntry and it is allowed to be <nil> so in this case we create the
mime type from the extension.

See: https://forum.rclone.org/t/webdav-union-of-onedrive-expecting-fs-object-or-fs-directory-got-nil/40298
2023-07-28 00:54:45 +01:00
kapitainsky
363da9aa82 docs: s3 versions names caveat 2023-07-27 12:36:50 +09:00
yuudi
6c8148ef39 http servers: allow CORS to be set with --allow-origin flag - fixes #5078
Some changes about test cases:
Because MiddlewareCORS will return early on OPTIONS request,
this middleware should only be used once at NewServer function.
Test cases should pass AllowOrigin config instead of adding
this middleware again.

A new test case was added to test CORS preflight request with
an authenticator. Preflight request should always return 200 OK
regardless of autentications.

Co-authored-by: yuudi <yuudi@users.noreply.github.com>
2023-07-26 10:15:54 +01:00
Nick Craig-Wood
3ed4a2e963 sftp: stop uploads re-using the same ssh connection to improve performance
Before this change we released the ssh connection back to the pool
before the upload was finished.

This meant that uploads were re-using the same ssh connection which
reduces throughput.

This releases the ssh connection back to the pool only after the
upload has finished, or on error state.

See: https://forum.rclone.org/t/sftp-backend-opens-less-connection-than-expected/40245
2023-07-25 13:05:37 +01:00
Anagh Kumar Baranwal
aaadb48d48 vfs: keep virtual directory status accurate and reduce deadlock potential
This changes hasVirtual to an atomic struct variable that's updated on
add or delete from the virtual map.

This keeps it up to date and avoids deadlocks.

Signed-off-by: Anagh Kumar Baranwal <6824881+darthShadow@users.noreply.github.com>
2023-07-25 09:08:16 +01:00
Anagh Kumar Baranwal
52e25c43b9 vfs: Added cache cleaner for directories to reduce memory usage
This empties the directory cache after twice the directory cache
period to release memory.

Signed-off-by: Anagh Kumar Baranwal <6824881+darthShadow@users.noreply.github.com>
2023-07-25 09:08:16 +01:00
Nick Craig-Wood
9a66563fc6 Add Edwin Mackenzie-Owen to contributors 2023-07-25 09:08:16 +01:00
Nick Craig-Wood
6ca670d66a Add Tiago Boeing to contributors 2023-07-25 09:08:16 +01:00
Nick Craig-Wood
809653055d Add gabriel-suela to contributors 2023-07-25 09:08:16 +01:00
Nick Craig-Wood
61325ce507 Add Ricardo D'O. Albanus to contributors 2023-07-25 09:08:16 +01:00
Edwin Mackenzie-Owen
c3989d1906 smb: implement multi-threaded writes for copies to smb
smb2.File implements the WriterAtCloser interface defined in
fs/types.go. Expose it via a OpenWriterAt method on
the fs struct to support multi-threaded writes.
2023-07-25 08:31:36 +01:00
Tiago Boeing
a79887171c docs: mega: update with solution when receiving killed on process 2023-07-25 04:21:37 +01:00
Chun-Hung Tseng
f29e284c90 protondrive: fix download signature verification bug (#7169) 2023-07-24 14:54:39 +02:00
Chun-Hung Tseng
9a66086fa0 protondrive: fix bug in digests parsing (#7164) 2023-07-24 09:00:18 +02:00
Chun-Hung Tseng
1845c261c6 protondrive: fix missing file sha1 and appstring issues (#7163) 2023-07-24 08:56:21 +02:00
Chun-Hung Tseng
70cbcef624 Add Chun-Hung Tseng to Maintainer (#7162) 2023-07-23 16:29:24 +02:00
gabriel-suela
9169b2b5ab cmd: fix log message typo 2023-07-23 08:43:03 +09:00
Ricardo D'O. Albanus
0957c8fb74 chunker: Update documentation to mention issue with small files
See: https://forum.rclone.org/t/chunker-not-deactivating-for-small-files-and-wasting-api-calls/40122
2023-07-23 00:40:50 +01:00
Anagh Kumar Baranwal
bb0cd76a5f fix: mount parsing for linux 2023-07-22 17:29:20 +05:30
Nick Craig-Wood
08240c8cf5 Add Chun-Hung Tseng to contributors 2023-07-22 10:54:21 +01:00
Chun-Hung Tseng
014acc902d protondrive: add protondrive backend - fixes #6072 2023-07-22 10:46:21 +01:00
Benjamin
33fec9c835 doc: Fix Leviia block 2023-07-18 19:58:19 +01:00
kapitainsky
3a5ffc7839 docs: mention Box as base32768 compatible
As suddenly many people move to Box - another "unlimited" cloud story migration saga there are frequent questions about crypt files encoding to be used.

Box is base32768 friendly.

It has been tested with:

https://pub.rclone.org/base32768.zip

and:

rclone test info --check-length boxremote:

maxFileLength = 255 // for 1 byte unicode characters
maxFileLength = 255 // for 2 byte unicode characters
maxFileLength = 255 // for 3 byte unicode characters
maxFileLength = -1 // for 4 byte unicode characters
2023-07-18 19:55:54 +01:00
Benjamin
8a6bf35481 Add Leviia Object Storage on index.md 2023-07-18 09:52:05 +01:00
Benjamin
f7d27f4bf2 Add Object storage to Leviia on README.md 2023-07-18 09:52:05 +01:00
kapitainsky
378a2d21ee --max-transfer - add new exit code (10)
It adds dedicated exit code (10) for --max-duration flag.

Rclone will exit with exit code 10 if the duration limit is reached.

It behaves in similar fashion as --max-transfer and exit code 8.

discussed on the forum:

https://forum.rclone.org/t/max-duration-option-is-triggering-exit-with-error/39917/6
2023-07-18 09:51:31 +01:00
Nick Craig-Wood
3404eb0444 Changelog updates from Version v1.63.1 2023-07-17 15:15:16 +01:00
Nick Craig-Wood
13e5701f2a build: add new sponsors page to docs 2023-07-17 14:28:40 +01:00
Nick Craig-Wood
432d5d1e20 operations: fix overlapping check on case insensitive file systems
Before this change, the overlapping check could erroneously give this
error on case insensitive file systems:

    Failed to sync: destination and parameter to --backup-dir mustn't overlap

The code was fixed and re-worked to be simpler and more reliable.

See: https://forum.rclone.org/t/backup-dir-cannot-be-in-root-even-when-excluded/39844/
2023-07-17 14:00:04 +01:00
Nick Craig-Wood
cc05159518 Add Benjamin to contributors 2023-07-17 14:00:04 +01:00
Benjamin
119ccb2b95 s3: add Leviia S3 Object Storage as provider 2023-07-16 18:08:47 +01:00
Anagh Kumar Baranwal
0ef0e908ca build: update to go1.21rc3 and make go1.19 the minimum required version
Signed-off-by: Anagh Kumar Baranwal <6824881+darthShadow@users.noreply.github.com>
2023-07-16 10:09:25 +01:00
Nick Craig-Wood
0063d14dbb Add darix to contributors 2023-07-14 10:27:20 +01:00
albertony
0d34efb10f box: fix reconnect failing with HTTP 400 Bad Request
The error is:

  Error: failed to configure token with jwt authentication: jwtutil: failed making auth request: 400 Bad Request

With the following additional debug information:

  jwtutil: Response Body: {"error":"invalid_grant","error_description":"Please check the 'aud' claim. Should be a string"}

Problem is that in jwt-go the RegisteredClaims type has Audience field (aud claim) that
is a list, while box apparantly expects it to be a singular string. In jwt-go v4 we
currently use there is an alternative type StandardClaims which matches what box wants.
Unfortunately StandardClaims is marked as deprecated, and is removed in the
newer v5 version, so we this is a short term fix only.

Fixes #7114
2023-07-14 10:24:33 +01:00
darix
415f4b2b93 webdav: nextcloud chunking: add more guidance for the user to check the config 2023-07-10 14:37:09 +01:00
Nick Craig-Wood
07cf5f1d25 operations: fix .rclonelink files not being converted back to symlinks
Before this change the new partial downloads code was causing symlinks
to be copied as regular files.

This was because the partial isn't named .rclonelink so the local
backend saves it as a normal file and renaming it to .rclonelink
doesn't cause it to become a symlink.

This fixes the problem by not copying .rclonelink files using the
partials mechanism but reverting to the previous --inplace behaviour.

This could potentially be fixed better in the future by changing the
local backend Move to change files to and from symlinks depending on
their name. However this was deemed too complicated for a point
release.

This also adds a test in the local backend. This test should ideally
be in operations but it isn't easy to put it there as operations knows
nothing of symlinks.

Fixes #7101
See: https://forum.rclone.org/t/reggression-in-v1-63-0-links-drops-the-rclonelink-extension/39483
2023-07-10 14:30:59 +01:00
Nick Craig-Wood
7d31956169 local: fix partial directory read for corrupted filesystem
Before this change if a directory entry could be listed but not
lstat-ed then rclone would give an error and abort the directory
listing with the error

    failed to read directory entry: failed to read directory "XXX": lstat XXX

This change makes sure that the directory listing carries on even
after this kind of error.

The sync will be failed but it will carry on.

This problem was caused by a programming error setting the err
variable in an outer scope when it should have been using a local err
variable.

See: https://forum.rclone.org/t/sync-aborts-if-even-one-single-unreadable-folder-is-encountered/39653
2023-07-09 17:58:03 +01:00
Nick Craig-Wood
473d443874 smb: fix "Statfs failed: bucket or container name is needed" when mounting
Before this change, if you mounted the root of the smb then it would
give an error on rclone about and periodically in the mount logs:

    Statfs failed: bucket or container name is needed in remote

This fix makes the smb backend return empty usage in this case which
will stop the errors and show the default 1P of free space.

See: https://forum.rclone.org/t/error-statfs-failed-bucket-or-container-name-is-needed-in-remote/39631
2023-07-08 12:24:46 +01:00
Nick Craig-Wood
e294b76121 Add Vladislav Vorobev to contributors 2023-07-08 12:24:46 +01:00
Vladislav Vorobev
8f3c583870 docs: no need to disable 2FA for Mail.ru Cloud anymore
This sentence was written at the time when backend used access token, nowadays, users need to generate and use application password instead, see #6398.
2023-07-08 10:27:40 +02:00
Nick Craig-Wood
d0d41fe847 rclone config redacted: implement support mechanism for showing redacted config
This introduces a new fs.Option flag, Sensitive and uses this along
with IsPassword to redact the info in the config file for support
purposes.

It adds this flag into backends where appropriate. It was necessary to
add oauthutil.SharedOptions to some backends as they were missing
them.

Fixes #5209
2023-07-07 16:25:14 +01:00
Nick Craig-Wood
297f15a3e3 docs: update the number of providers supported 2023-07-07 16:25:14 +01:00
Nick Craig-Wood
d5f0affd4b Add Mahad to contributors 2023-07-07 16:25:14 +01:00
Nick Craig-Wood
0598aafbfd Add BakaWang to contributors 2023-07-07 16:25:14 +01:00
Mahad
528e22f139 docs: drive: Fix step 4 in "Making your own client_id" 2023-07-06 21:24:17 +01:00
BakaWang
f1a8420814 s3: add synology to s3 provider list 2023-07-06 10:54:07 +01:00
Nick Craig-Wood
e250f1afcd docs: remove old donate page 2023-07-06 10:13:42 +01:00
Nick Craig-Wood
ebf24c9872 docs: update contact page on website 2023-07-05 16:57:07 +01:00
Paul
b4c7b240d8 webdav: nextcloud: fix must use /dav/files/USER endpoint not /webdav error
Fix https://github.com/rclone/rclone/issues/7103

Before this change the RegExp validating the endpoint URL was a bit
too strict allowing only /dav/files/USER due to chunking limitations.

This patch adds back support for /dav/files/USER/dir/subdir etc.

Co-authored-by: Nick Craig-Wood <nick@craig-wood.com>
2023-07-05 16:56:01 +01:00
Nick Craig-Wood
22a14a8c98 operations: fix deadlock when using lsd/ls with --progress - Fixes #7102
The --progress flag overrides operations.SyncPrintf in order to do its
magic on stdout without interfering with other output.

Before this change the syncFprintf routine in operations (which is
used to print all output to stdout) was taking the
operations.StdoutMutex and the printProgress function in the
--progress routine was also attempting to take the same mutex causing
a deadlock.

This patch fixes the problem by moving the locking from the
syncFprintf function to SyncPrintf. It is then up to the function
overriding this to lock the StdoutMutex. This ensures the StdoutMutex
can never cause a deadlock.
2023-07-03 15:07:00 +01:00
Nick Craig-Wood
07133b892d dirtree: fix performance with large directories of directories and --fast-list
Before this change if using --fast-list on a directory with more than
a few thousand directories in it DirTree.CheckParents became very slow
taking up to 24 hours for a directory with 1,000,000 directories in
it.

This is because it becomes an O(N²) operation as DirTree.Find has to
search each directory in a linear fashion as it is stored as a slice.

This patch fixes the problem by scanning the DirTree for directories
before starting the CheckParents process so it never has to call
DirTree.Find.

After the fix calling DirTree.CheckParents on a directory with
1,000,000 directories in it will take about 1 second.

Anything which calls DirTree.Find can potentially have bad performance
so in the future we should redesign the DirTree to use a different
underlying datastructure or have an index.

https://forum.rclone.org/t/almost-24-hours-cpu-compute-time-during-sync-between-two-large-s3-buckets/39375/
2023-07-03 14:09:21 +01:00
Nick Craig-Wood
a8ca18165e Add Fjodor42 to contributors 2023-07-03 14:09:21 +01:00
Nick Craig-Wood
8c4e71fc84 Add Dean Attali to contributors 2023-07-03 14:09:21 +01:00
Nick Craig-Wood
351e2db2ef Add Sawada Tsunayoshi to contributors 2023-07-03 14:09:21 +01:00
Fjodor42
2234feb23d jottacloud: add Onlime provider 2023-07-02 11:16:07 +01:00
Anagh Kumar Baranwal
fb5125ecee build: fix macos builds for versions < 12
Signed-off-by: Anagh Kumar Baranwal <6824881+darthShadow@users.noreply.github.com>
2023-07-01 18:03:50 +01:00
Dean Attali
e8cbc54a06 docs: dropbox get client id, clarify you need to click a button 2023-07-01 17:50:40 +01:00
Nick Craig-Wood
00512e1303 Start v1.64.0-DEV development 2023-06-30 15:39:03 +01:00
Nick Craig-Wood
fcfbd3153b docs: website: replace google analytics with plausible analytics 2023-06-30 14:32:53 +01:00
Nick Craig-Wood
9a8075b682 docs: rename donate page to sponsor page and rework 2023-06-30 14:32:53 +01:00
Sawada Tsunayoshi
996037bee9 docs: fixed typo in exclude example in filtering docs (#7097)
The exclude flag instructions had "without" written as "with" which changes the whole meaning of how the exclude flag works.
2023-06-30 15:28:38 +02:00
Nick Craig-Wood
e90537b2e9 Version v1.63.0 2023-06-30 14:11:17 +01:00
Nick Craig-Wood
42c211c6b2 Revert sponsors back to organization 2023-06-30 10:10:05 +01:00
Nick Craig-Wood
3d4f127b33 Revert "union: disable PartialUploads on integration tests failures"
This reverts commit 9065e921c1.

It turns out the problem for the failing fs/sync tests was the
policies being different for search and create which meant that the
file was being created in one union branch but a diferent one was
found in another branch.
2023-06-29 21:11:04 +01:00
Misty
ff966b37af dropbox: fix result chans not taken care by defer fun 2023-06-28 19:49:38 +01:00
Nick Craig-Wood
3b6effa81a uptobox: fix rmdir declaring that directories weren't empty
The API seems to have changed and the `totalFileCount` item no longer
tracks the number of files in the directory so is useless for seeing
if the directory is empty.

This patch fixes the problem by seeing whether there are any files or
directories in the folder instead.

This problem was detected by the integration tests.
2023-06-28 17:27:43 +01:00
Nick Craig-Wood
8308d5d640 putio: fix server side copy failures (400 errors)
For some unknown reason the API sometimes returns the name already
exists on a server side copy.

    {
      "error_id": null,
      "error_message": "Name already exist",
      "error_type": "NAME_ALREADY_EXIST",
      "error_uri": "http://api.put.io/v2/docs",
      "extra": {},
      "status": "ERROR",
      "status_code": 400
    }

This patch uploads to a temporary name then renames it which works
around the problem.

This was spotted by the integration tests.
2023-06-28 16:45:35 +01:00
Nick Craig-Wood
14024936a8 putio: fix modification times not being preserved for server side copy and move
The integration tests spotted that modification times are no longer
being preserved by the putio API in server side move and copy.

This patch explicitly sets the modtime after the server side move or
copy.
2023-06-28 11:03:19 +01:00
Nick Craig-Wood
9065e921c1 union: disable PartialUploads on integration tests failures
In this commit we enabled PartialUploads for the union backend.

3faa84b47c combine,compress,crypt,hasher,union: support wrapping backends with PartialUploads

This turns out to cause test failures in fs/sync so this commit
disables them again pending further investigation.
2023-06-27 17:31:01 +01:00
Nick Craig-Wood
99788b605e sharefile: disable streamed transfers as they no longer work
At some point the sharefile API changed to require the size of the
file in the initial transaction which makes the streaming upload fail
with this error:

    upload failed: file size does not match (-2)

This was discovered by the integration tests.
2023-06-27 17:08:37 +01:00
Nick Craig-Wood
d4cc3760e6 putio: fix uploading to the wrong object on Update with overriden remote name
In this commit we discovered a problem with objects being uploaded to
the incorrect object name. It added an integration test for the
problem.

65b2e378e0 drive: fix incorrect remote after Update on object

This test was tripped by the putio backend and this patch fixes the
problem.
2023-06-27 16:02:33 +01:00
Nick Craig-Wood
a6acbd1844 uptobox: fix Update returning the wrong object
Before this patch the Update method had a 50/50 chance of returning
the old object rather than the new updated object.

This was discovered in the integration tests.

This patch fixes the problem by deleting the duplicate object before
we look for the new object.
2023-06-27 16:02:33 +01:00
Nick Craig-Wood
389565f5e2 storj: fix uploading to the wrong object on Update with overriden remote name
In this commit we discovered a problem with objects being uploaded to
the incorrect object name. It added an integration test for the
problem.

65b2e378e0 drive: fix incorrect remote after Update on object

This test was tripped by the Storj backend and this patch fixes the
problem.
2023-06-27 16:02:33 +01:00
Nick Craig-Wood
4b4198522d storj: fix "uplink: too many requests" errors when uploading to the same file
Storj has a rate limit of 1 per second when uploading to the same
file.

This was being tripped by the integration tests.

This patch fixes it by detecting the error and sleeping for 1 second
before retrying.

See: https://github.com/storj/uplink/issues/149
2023-06-27 16:02:33 +01:00
Nick Craig-Wood
f7665300c0 fstests: allow ObjectUpdate test to retry upload 2023-06-27 16:02:33 +01:00
Nick Craig-Wood
73beae147f webdav: Fix modtime on server side copy for owncloud and nextcloud
Before this change a server side copy did not preserve the modtime.

This used to work on nextcloud but at some point it started ignoring
the `X-Oc-Mtime` header.

This patch sets the modtime explicitly after a server side copy if the
`X-Oc-Mtime` wasn't accepted.

This problem was discovered in the integration tests.
2023-06-26 20:23:28 +01:00
Nick Craig-Wood
92f8e476b7 Add mac-15 to contributors 2023-06-26 20:23:28 +01:00
Nick Craig-Wood
5849148d51 Add zzq to contributors 2023-06-26 20:23:28 +01:00
Nick Craig-Wood
37853ec412 Add Peter Fern to contributors 2023-06-26 20:23:28 +01:00
Nick Craig-Wood
ae7ff28714 Add danielkrajnik to contributors 2023-06-26 20:23:28 +01:00
Nick Craig-Wood
9873f4bc74 Add Mariusz Suchodolski to contributors 2023-06-26 20:23:28 +01:00
Nick Craig-Wood
1b200bf69a Add Paulo Schreiner to contributors 2023-06-26 20:23:28 +01:00
Nick Craig-Wood
e3fa6fe3cc swift: fix code formatting 2023-06-26 20:23:28 +01:00
mac-15
9e1b3861e7 docs: add blomp cloud storage guide 2023-06-26 17:49:27 +01:00
zzq
e9a753f678 s3: add Qiniu KODO quirks virtualHostStyle is false 2023-06-26 17:47:27 +01:00
Dimitri Papadopoulos
708391a5bf backend: fix misspellings found by codespell 2023-06-26 14:34:52 +01:00
Peter Fern
1cfed18aa7 http: add client certificate user auth middleware
This populates the authenticated user from the client certificate
common name.

Also added tests for the existing client certificate functionality.
2023-06-26 14:33:53 +01:00
kapitainsky
7751d5a00b rc: config/listremotes include from env vars
Fixes: 
#6540

Discussed:
https://forum.rclone.org/t/environment-variable-config-not-used-for-remote-control/39014
2023-06-26 12:30:44 +01:00
danielkrajnik
8274712c2c docs: s3: fix example for restoring single objects
See: https://forum.rclone.org/t/cant-restore-files-from-aws-glacier-deep-only-directories/39258/3
2023-06-26 11:41:15 +01:00
Mariusz Suchodolski
625a564ba3 docs: faq: add solution for port opening issues on Windows 2023-06-25 11:20:54 +01:00
Ehsan Tadayon
2dd2072cdb s3: Fix Arvancloud Domain and region changes and alphabetise the provider 2023-06-25 11:01:41 +01:00
kapitainsky
998d1d1727 docs: listremotes also includes remotes from env vars 2023-06-24 15:46:23 +01:00
Paulo Schreiner
fcb912a664 fs: allow setting a write buffer for multithread
when multi-thread downloading is enabled, rclone used
to send a write to disk after every read, resulting in a lot
of small writes to different locations of the file.

depending on the underlying filesystem or device, it can be more
efficient to send bigger writes.
2023-06-23 18:44:43 +01:00
Nick Craig-Wood
5f938fb9ed s3: fix "Entry doesn't belong in directory" errors when using directory markers
Before this change we were incorrectly identifying the root directory
of the listing and adding it into the listing.

This caused higher layers of rclone to emit the error above.

See #7038
2023-06-23 18:01:11 +01:00
Nick Craig-Wood
72b79504ea azureblob: fix "Entry doesn't belong in directory" errors when using directory markers
Before this change we were incorrectly identifying the root directory
of the listing and adding it into the listing.

This caused higher layers of rclone to emit the error above.

See #7038
2023-06-23 18:01:11 +01:00
Nick Craig-Wood
3e2a606adb gcs: fix "Entry doesn't belong in directory" errors when using directory markers
Before this change we were incorrectly identifying the root directory
of the listing and adding it into the listing.

This caused higher layers of rclone to emit the error above.

Fixes #7038
2023-06-23 18:01:11 +01:00
Nick Craig-Wood
95a6e3e338 Add Stanislav Gromov to contributors 2023-06-23 18:01:11 +01:00
Anagh Kumar Baranwal
d06bb55f3f mount: Added _netdev to the example mount so it gets treated as a remote-fs rather than local-fs
Signed-off-by: Anagh Kumar Baranwal <6824881+darthShadow@users.noreply.github.com>
2023-06-23 17:37:00 +01:00
Stanislav Gromov
9f3694cea3 docs: drive: fix typo 2023-06-23 14:40:47 +01:00
Nick Craig-Wood
2c50f26c36 mount: fix mount failure on macOS with on the fly remote
This commit

3567a47258 fs: make ConfigString properly reverse suffixed file systems

made fs.ConfigString() return the full config of the backend. Because
mount was using this to make a volume name it started to make volume
names with illegal characters in which couldn't be mounted by macOS.

This fixes the problem by making a separate fs.ConfigStringFull() and
using that where appropriate and leaving the original
fs.ConfigString() function untouched.

Fixes #7063
See: https://forum.rclone.org/t/1-63-beta-fails-to-mount-on-macos-with-on-the-fly-crypt-remote/39090
2023-06-23 14:12:03 +01:00
Nick Craig-Wood
22d6c8d30d Add URenko to contributors 2023-06-23 14:12:03 +01:00
Nick Craig-Wood
96fb75c5a7 Add Sam Lai to contributors 2023-06-23 14:12:03 +01:00
URenko
acd67edf9a docs: remove "After" in systemd mount example again 2023-06-22 18:03:04 +01:00
Sam Lai
b26db8e640 accounting: bwlimit signal handler should always start
The SIGUSR2 signal handler for bandwidth limits currently only starts
if rclone is started at a time when a bandwidth limit applies. This
means that if rclone starts _outside_ such a time, i.e. with no
bandwidth limits, then enters a time where bandwidth limits do apply,
it will not be possible to use SIGUSR2 to toggle it.

This fixes that by always starting the signal handler, but only
toggling the limiter if there is a bandwidth limit configured.
2023-06-22 17:59:24 +01:00
Nick Craig-Wood
da955e5d4f operations: remove partials when the copy fails
Before this change we were only removing partials when it was
corrupted rather than when the copy just failed.
2023-06-21 22:56:05 +01:00
Nick Craig-Wood
4f8dab8bce zoho: fix downloads with Range: header returning the wrong data
Zoho has started returning the results from Range: requests with a 200
response code rather than the technically correct 206 error code.

Before this change this triggered workaround code to deal with Zoho
not obeying Range: requests properly.

This fix tests the returned header for a Content-Range: header and if
it exists assumes it is a valid reply to the Range: request despite
the status being 200.

This problem was spotted by the integration tests.
2023-06-14 17:43:26 +01:00
Nick Craig-Wood
000ddc4951 s3: fix versions tests when running on minio 2023-06-14 17:30:36 +01:00
Nick Craig-Wood
3faa84b47c combine,compress,crypt,hasher,union: support wrapping backends with PartialUploads
This means that, for example, wrapping a sftp backend with crypt will
upload to a temporary name and then rename unless disabled with
--inplace.

See: https://forum.rclone.org/t/backup-versioning/38978/7
2023-06-14 10:52:03 +01:00
kapitainsky
e1162ec440 docs: clarify --server-side-across-configs 2023-06-13 17:58:27 +01:00
Nick Craig-Wood
30cccc7101 cache: fix backends shutting down when in use when used via the rc
Before this fix, if a long running task (eg a copy) was started by the
rc then the backend could expire before the copy had finished.

The typical symptom was with the dropbox backend giving "batcher is
shutting down" errors.

This patch fixes the problem by pinning the backend until the job has
finished.

See: https://forum.rclone.org/t/uploads-start-repeatedly-failing-after-a-while-using-rc-sync-copy-vs-rclone-copy-for-dropbox/38873/
2023-06-13 15:48:20 +01:00
Nick Craig-Wood
1f5a29209e rc: add Job to ctx so it can be used elsewhere
See: https://forum.rclone.org/t/uploads-start-repeatedly-failing-after-a-while-using-rc-sync-copy-vs-rclone-copy-for-dropbox/38873/
2023-06-13 15:48:20 +01:00
Nick Craig-Wood
45255bccb3 accounting: fix Prometheus metrics to be the same as core/stats
In 04aa6969a4 we updated the displayed speed to be a rolling
average in core/stats and the progress output but we didn't update the
Prometheus metrics.

This patch updates the Prometheus metrics too.

Fixes #7053
2023-06-12 17:42:29 +01:00
Nick Craig-Wood
055206c4ee yandex: fix 400 Bad Request on transfer failure
Before this fix, if the upload failed for some reason the yandex
backend would attempt to retry itself it which would fail immediately
with 400 Bad Request.

Normally we retry uploads at a higher level so they can be done with
new data and this patch does that.

See #7044
2023-06-11 11:11:43 +01:00
Nick Craig-Wood
f3070b82bc Add douchen to contributors 2023-06-11 11:11:43 +01:00
douchen
7e2deffc62 filter: fix deadlock with errors on --files-from
Before this change if doing a recursive directory listing with
`--files-from` if more than `--checkers` files errored (other than
file not found) then rclone would deadlock.

This fixes the problem by exiting on the first error.
2023-06-10 15:53:08 +01:00
Nick Craig-Wood
ae3ff50580 dropbox: implement --dropbox-pacer-min-sleep flag
See: https://forum.rclone.org/t/combine-mount-options-query/38080
2023-06-10 14:57:26 +01:00
Nick Craig-Wood
6486ba6344 operations: remove partially uploaded files on exit when not using --inplace
Before this change partially uploaded files (when --inplace is not in
effect) would be left lying around in the file system if rclone was
killed in the middle of a transfer.

This adds an exit handler to remove the file and removes it when the
file is complete.
2023-06-10 14:55:05 +01:00
Nick Craig-Wood
7842000f8a backend: for command not found errors, hint to look in the underlying remote
See: https://forum.rclone.org/t/rclone-cleanup-no-way-to-delete-pending-uploads-newer-than-24-hours/38416/6
2023-06-10 14:44:01 +01:00
Nick Craig-Wood
1f9c962183 operations: reopen downloads on error when using check --download and cat
Before this change, some parts of operations called the Open method on
objects directly, and some called NewReOpen to make an object which
can re-open itself on errors.

This adds a new function operations.Open which should be called
instead of fs.Object.Open to open a reliable stream of data and
changes all call sites to use that.

This means `rclone check --download` and `rclone cat` will re-open
files on failures.

See: https://forum.rclone.org/t/does-rclone-support-retries-for-check-when-using-download-flag/38641
2023-06-10 14:42:29 +01:00
Nick Craig-Wood
279d9ecc56 operations: fix pcloud can't set modified time
Before this change we tested special errors for straight equality.

This works for all normal backends, but the union backend may return
wrapped errors which contain the special error types.

In particular if a pcloud backend was part of a union when attempting
to set modification times the fs.ErrorCantSetModTime return wasn't
understood because it was wrapped in a union.Error.

This fixes the problem by using errors.Is instead in all the
comparisons in operations.

See: https://forum.rclone.org/t/failed-to-set-modification-time-1-error-pcloud-cant-set-modified-time/38596
2023-06-10 14:39:41 +01:00
Nick Craig-Wood
31773ecfbf union: allow errors to be unwrapped for inspection
Before this change the Errors type in the union backend produced
errors which could not be Unwrapped to test their type.

This adds the (go1.20) Unwrap method to the Errors type which allows
errors.Is to work on these errors.

It also adds unit tests for the Errors type and fixes a couple of
minor bugs thrown up in the process.

See: https://forum.rclone.org/t/failed-to-set-modification-time-1-error-pcloud-cant-set-modified-time/38596
2023-06-10 14:39:41 +01:00
kapitainsky
666e34cf69 s3: docs: old broken link updated 2023-06-09 18:15:54 +01:00
Nick Craig-Wood
5a84a08b3f build: fix build failure installing nfpm
Before this fix we used the bin/get-github-release.go script to
install nfpm.

However this script fails scraping the downloads page when the target
has more than a few download options. The alternative would be using
the GitHub API but this needs authentication so as not to be rate
limited on GitHub actions.

This patch switches over to go install which is less efficient but
should work in all circumstances.
2023-06-07 15:41:52 +01:00
Nick Craig-Wood
51a468b2ba genautocomplete: rename to completion with alias to old name
This brings it into line with cobra's naming scheme and stops cobra
writing another "completion" command which doesn't work as well which
confuses users.

See: https://forum.rclone.org/t/rclone-genautocomplete-bash-vs-rclone-completion-bash-neither-works-fully/38431
2023-05-25 14:32:40 +01:00
Nick Craig-Wood
fc798d800c vfs: fix backends being Shutdown too early when startup takes a long time
Before this change if the VFS took more than 5 to initialise (which
can happen if there is a lot of files or a lot of files which need
uploading) the backend was dropped out of the cache before the VFS was
fully created.

This was noticeable in the dropbox backend where the batcher Shutdown
too soon and prevented further uploads.

This fixes the problem by Pinning backends before the VFS cache is
created.

https://forum.rclone.org/t/if-more-than-251-elements-in-the-que-to-upload-fails-with-batcher-is-shutting-down/38076/2
2023-05-18 16:16:12 +01:00
Nick Craig-Wood
3115ede1d8 Add kapitainsky to contributors 2023-05-18 16:16:12 +01:00
kapitainsky
7a5491ba7b docs: chunker: fix typo 2023-05-17 17:10:53 +01:00
Nick Craig-Wood
a6cf4989b6 local: fix crash with --metadata on Android
Before this change we called statx which causes a

    SIGSYS: bad system call

fault.

After this we force Android to use fstatat

Fixes #7006
2023-05-17 17:03:26 +01:00
Nick Craig-Wood
f489b54fa0 operations: ignore partial tests on backends which don't support them 2023-05-17 17:03:26 +01:00
Nick Craig-Wood
6244d1729b Add Tareq Sharafy to contributors 2023-05-17 17:03:19 +01:00
Nick Craig-Wood
e97c2a2832 Add cc to contributors 2023-05-17 17:03:19 +01:00
albertony
56bf9b4a10 Add albertony to maintainers 2023-05-17 15:31:07 +02:00
WeidiDeng
ceb9406c2f serve webdav: implement owncloud checksum and modtime extensions
* implement owncloud checksum and modtime extensions for webdav server
* test rclone webdav server as owncloud webdav
2023-05-15 15:38:00 +01:00
Tareq Sharafy
1f887f7ba0 azblob: doc
Signed-off-by: Tareq Sharafy <tareq.sha@gmail.com>
2023-05-14 12:12:24 +01:00
Tareq Sharafy
7db26b6b34 azblob: support azure workload identities 2023-05-14 12:12:24 +01:00
cc
37a3309438 s3: v3sign: add missing subresource delete
The delete query string parameter must be included when you create the
CanonicalizedResource for a multi-object Delete request.
2023-05-14 11:25:52 +01:00
Nick Craig-Wood
97be9015a4 union: implement missing methods
Implement these missing methods:

- CleanUp

And declare these ones unimplementable:

- UnWrap
- WrapFs
- SetWrapper
- UserInfo
- Disconnect
- PublicLink
- PutUnchecked
- MergeDirs
- OpenWriterAt
2023-05-14 11:22:57 +01:00
Nick Craig-Wood
487e4f09b3 combine: implement missing methods
Implement these missing methods:

- PublicLink
- PutUnchecked
- MergeDirs
- CleanUp
- OpenWriterAt

And declare these ones unimplementable:

- UnWrap
- WrapFs
- SetWrapper
- UserInfo
- Disconnect

Fixes #6999
2023-05-14 11:22:57 +01:00
Nick Craig-Wood
09a408664d fs: create Overlay feature flag to indicate backend wraps others
Set this automatically for any backend which implements UnWrap and
manually for combine and union which can't implement UnWrap but do
overlay other backends.
2023-05-14 11:22:57 +01:00
Nick Craig-Wood
43fa256d56 fs: add OverrideDirectory for overriding path of directory 2023-05-14 11:22:57 +01:00
wiserain
6859c04772 pikpak: add validity check when using a media link
Before this change, the Pikpak backend would always download
the first media item whenever possible, regardless of whether
or not it was the original contents.

Now we check the validity of a media link using the `fid`
parameter in the link URL.

Fixes #6992
2023-05-13 03:41:59 +09:00
dependabot[bot]
38a0539096 build(deps): bump github.com/cloudflare/circl from 1.1.0 to 1.3.3
Bumps [github.com/cloudflare/circl](https://github.com/cloudflare/circl) from 1.1.0 to 1.3.3.
- [Release notes](https://github.com/cloudflare/circl/releases)
- [Commits](https://github.com/cloudflare/circl/compare/v1.1.0...v1.3.3)

---
updated-dependencies:
- dependency-name: github.com/cloudflare/circl
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>
2023-05-12 14:56:45 +01:00
Nick Craig-Wood
2cd85813b4 sftp: don't check remote points to a file if it ends with /
This avoids calling stat on the root directory which saves a call and
some servers don't like.

See: https://forum.rclone.org/t/stat-failed-error-on-sftp/38045
2023-05-11 07:58:20 +01:00
Nick Craig-Wood
e6e6069ecf sftp: don't stat directories before listing them
Before this change we ran stat on the directory to see if it existed.

Not only is this inefficient it isn't allowed by some SFTP servers.

See: https://forum.rclone.org/t/stat-failed-error-on-sftp/38045
2023-05-10 15:07:21 +01:00
Nick Craig-Wood
fcf47a8393 pikpak: set the NoMultiThreading feature flag to disable multi-thread copy
Before this change the pikpak backend changed the global
--multi-thread-streams flag which wasn't desirable.

Now the machinery is in place to use the NoMultiThreading feature flag
instead.

Fixes #6915
2023-05-09 17:46:19 +01:00
Nick Craig-Wood
46a323ae14 operations: Don't use multi-thread copy if the backend doesn't support it #6915 2023-05-09 17:40:58 +01:00
Nick Craig-Wood
72be80ddca fs: add new backend feature NoMultiThreading
This should be set for backends which can't support simultaneous reads
from different offsets in a single file.
2023-05-09 17:40:11 +01:00
Nick Craig-Wood
a9e7e7bcc2 ftp: Fix "501 Not a valid pathname." errors when creating directories
Some servers return a 501 error when using MLST on a non-existing
directory. This patch allows it.

I don't think this is correct usage according to the RFC, but the RFC
doesn't explicitly state which error code should be returned for
file/directory not found.
2023-05-09 17:27:35 +01:00
Nick Craig-Wood
925c4382e2 ftp: fix "unsupported LIST line" errors on startup
Before this fix a blank line in the MLST output from the FTP server
would cause the "unsupported LIST line" error.

This fixes the problem in the upstream fork.

Fixes #6879
2023-05-09 17:27:35 +01:00
Nick Craig-Wood
08c60c3091 Add Janne Hellsten to contributors 2023-05-09 17:27:35 +01:00
Janne Hellsten
5c594fea90 operations: implement uploads to temp name with --inplace to disable
When copying to a backend which has the PartialUploads feature flag
set and can Move files the file is copied into a temporary name first.
Once the copy is complete, the file is renamed to the real
destination.

This prevents other processes from seeing partially downloaded copies
of files being downloaded and prevents overwriting the old file until
the new one is complete.

This also adds --inplace flag that can be used to disable the partial
file copy/rename feature.

See #3770

Co-authored-by: Nick Craig-Wood <nick@craig-wood.com>
2023-05-09 16:28:10 +01:00
Janne Hellsten
cc01223535 fs: Implement PartialUploads feature flag
Implement a Partialuploads feature flag to mark backends for which
uploads are not atomic.

This is set for the following backends

- local
- ftp
- sftp

See #3770
2023-05-09 16:28:10 +01:00
Nick Craig-Wood
aaacfa51a0 sftp: fix move to allow overwriting existing files
Before this change rclone used a normal SFTP rename if present to
implement Move.

However the normal SFTP rename won't overwrite existing files.

This fixes it to either use the POSIX rename extension
("posix-rename@openssh.com") or to delete the source first before
renaming using the normal SFTP rename.

This isn't normally a problem as rclone always removes any existing
objects first, however to implement non --inplace operations we do
require overwriting an existing file.
2023-05-09 16:28:10 +01:00
Nick Craig-Wood
c18c66f167 fs: when creating new fs.OverrideRemotes don't layer overrides if not needed 2023-05-09 16:28:10 +01:00
Nick Craig-Wood
d6667d34e7 fs: fix String() method on fs.OverrideRemote
Before this fix it was returning the base objects string rather than
the overridden remote.
2023-05-09 16:28:10 +01:00
Nick Craig-Wood
e649cf4d50 uptobox: add --uptobox-private flag to make all uploaded files private
See: #6946
2023-05-08 17:50:50 +01:00
Nick Craig-Wood
f080ec437c azureblob: empty directory markers #3453 2023-05-07 12:47:09 +01:00
Nick Craig-Wood
4023eaebe0 gcs: fix directory marker code #3453
Use Update to upload the directory markers
2023-05-07 12:47:09 +01:00
Nick Craig-Wood
baf16a65f0 s3: fix directory marker code #3453
Use Update to upload the directory markers
2023-05-07 12:47:09 +01:00
Nick Craig-Wood
70fe2ac852 azureblob: fix azure blob uploads with multiple bits of metadata 2023-05-07 12:47:09 +01:00
Nick Craig-Wood
41cf7faea4 Add Andrei Smirnov to contributors 2023-05-07 12:47:09 +01:00
Andrei Smirnov
f226f2dfb1 s3: add petabox.io to s3 providers 2023-05-05 09:44:25 +01:00
Nick Craig-Wood
31caa019fa rc: fix output of Time values in options/get
Before this change these were output as `{}` after this change they
are output as time strings `"2022-03-26T17:48:19Z"` in standard
javascript format.
2023-05-04 15:04:11 +01:00
Nick Craig-Wood
0468375054 uptobox: ensure files and folders show the modtime configured by --default-time #6986 2023-05-04 15:03:11 +01:00
Nick Craig-Wood
6001f05a12 union: the root folder shows the modtime configured by --default-time #6986 2023-05-04 15:03:11 +01:00
Nick Craig-Wood
f7b87a8049 koofr: ensure folders show the modtime configured by --default-time #6986 2023-05-04 15:03:11 +01:00
Nick Craig-Wood
d379641021 http: ensure folders show the modtime configured by --default-time #6986 2023-05-04 15:03:11 +01:00
Nick Craig-Wood
84281c9089 dropbox: ensure folders show the modtime configured by --default-time #6986 2023-05-04 15:03:11 +01:00
Nick Craig-Wood
8e2dc069d2 fs: Add --default-time flag to control unknown modtime of files/dirs
Before this patch, files or directories with unknown modtime would
appear as the current date.

When mounted some systems look at modification dates of directories to
see if they change and having them change whenever they drop out of
the directory cache is not optimal.

See #6986
2023-05-04 15:03:11 +01:00
Nick Craig-Wood
61d6f538b3 onedrive: add --onedrive-av-override flag to download files flagged as virus
This also produces a warning when rclone detects files have been
blocked because of virus content

    server reports this file is infected with a virus - use --onedrive-av-override to download anyway

Fixes #557
2023-05-03 15:21:30 +01:00
Nick Craig-Wood
65b2e378e0 drive: fix incorrect remote after Update on object
Before this change, when Object.Update was called in the drive
backend, it overwrote the remote with that of the object info.

This is incorrect - the remote doesn't change on Update and this patch
fixes that and introduces a new test to make sure it is correct for
all backends.

This was noticed when doing Update of objects in a nested combine
backend.

See: https://forum.rclone.org/t/rclone-runtime-goroutine-stack-exceeds-1000000000-byte-limit/37912
2023-05-03 13:51:27 +01:00
Nick Craig-Wood
dea6bdf3df combine: fix goroutine stack overflow on bad object
If the Remote() call failed to do its path adjustment, then it would
recursively call Remote() as part of logging the failure and cause a
stack overflow.

This fixes it by logging the underlying object instead.

See: https://forum.rclone.org/t/rclone-runtime-goroutine-stack-exceeds-1000000000-byte-limit/37912
2023-05-03 13:51:27 +01:00
Nick Craig-Wood
27eb8c7f45 config: stop config create making invalid config files
If config create was passed a parameter with an embedded \n it wrote
it straight to the config file which made it invalid and caused a
fatal error reloading it.

This stops keys and values with \r and \n being added to the config
file.

See: https://forum.rclone.org/t/how-to-control-bad-remote-creation-which-takes-rclone-down/37856
2023-05-03 11:40:30 +01:00
Nick Craig-Wood
1607344613 Add Adam K to contributors 2023-05-03 11:40:30 +01:00
Adam K
5f138dd822 dropbox: syncing documentation with source for dropbox default batch_timeout - fixes #6984 2023-05-02 17:04:32 +01:00
Anagh Kumar Baranwal
2520c05c4b mount2: disable xattrs
Signed-off-by: Anagh Kumar Baranwal <6824881+darthShadow@users.noreply.github.com>
2023-04-30 17:56:47 +01:00
Anagh Kumar Baranwal
f7f5e87632 mount2: fixed statfs
Signed-off-by: Anagh Kumar Baranwal <6824881+darthShadow@users.noreply.github.com>
2023-04-30 17:56:47 +01:00
Anagh Kumar Baranwal
a7e6806f26 mount2: updated go-fuse version
Signed-off-by: Anagh Kumar Baranwal <6824881+darthShadow@users.noreply.github.com>
2023-04-30 17:56:47 +01:00
Anagh Kumar Baranwal
d0eb884262 mount: removed unnecessary byte slice allocation for reads
Signed-off-by: Anagh Kumar Baranwal <6824881+darthShadow@users.noreply.github.com>
2023-04-30 17:54:30 +01:00
WeidiDeng
ae6874170f webdav: set modtime using propset for owncloud and nextcloud 2023-04-28 17:38:49 +01:00
Nick Craig-Wood
f5bab284c3 s3: fix missing "tier" metadata
Before this change if the storage class wasn't set on the object, we
didn't set the "tier" metadata.

This made it impossible to filter on tier using the metadata filters.

This returns the "tier" metadata as STANDARD if the storage class
isn't set on the object.

See: https://forum.rclone.org/t/copy-from-s3-to-another-s3-filter-by-storage-class/37861
2023-04-28 14:33:01 +01:00
Nick Craig-Wood
c75dfa6436 Add Jānis Bebrītis to contributors 2023-04-28 14:33:01 +01:00
Nick Craig-Wood
56eb82bdfc Add Tobias Gion to contributors 2023-04-28 14:33:01 +01:00
Nick Craig-Wood
066e00b470 gcs: empty directory markers #3453
- Report correct feature flag
- Fix test failures due to that
- don't output the root directory marker
- Don't create the directory marker if it is the bucket or root
- Create directories when uploading files
2023-04-28 14:31:05 +01:00
Jānis Bebrītis
e0c445d36e gcs: empty directory markers - #3453 2023-04-28 14:31:05 +01:00
Nick Craig-Wood
74652bf318 s3: empty directory markers further work #3453
- Report correct feature flag
- Fix test failures due to that
- don't output the root directory marker
- Don't create the directory marker if it is the bucket or root
- Create directories when uploading files
2023-04-28 14:31:05 +01:00
Jānis Bebrītis
b6a95c70e9 s3: empty directory markers - #3453 2023-04-28 14:31:05 +01:00
Nick Craig-Wood
aca7d0fd22 s3: fix potential crash in integration tests 2023-04-28 14:31:05 +01:00
Nick Craig-Wood
12761b3058 fstests: make integration tests work with connection strings in remotes 2023-04-28 14:31:05 +01:00
Nick Craig-Wood
3567a47258 fs: make ConfigString properly reverse suffixed file systems
Before this change we renamed file systems with overridden config with
{suffix}.

However this meant that ConfigString produced a value which wouldn't
re-create the file system.

This uses an internal hash to keep note of what config goes which
which {suffix} in order to remake the config properly.
2023-04-28 14:31:05 +01:00
Nick Craig-Wood
6b670bd439 mockfs: make it so it can be registered as an Fs 2023-04-28 14:31:05 +01:00
Nick Craig-Wood
335ca6d572 lsjson: make --stat more efficient
Don't look for a file if the remote ends with /

This also makes it less likely to find a directory marker in bucket
based file systems.
2023-04-28 14:31:05 +01:00
Tobias Gion
c4a9e480c9 ftp: lower log message priority when SetModTime is not supported to debug
See: https://forum.rclone.org/t/ftp-fritz-box-setmodtime-is-not-supported/37781
2023-04-25 16:31:42 +02:00
Nick Craig-Wood
232d304c13 drive: fix trailing slash mis-identificaton of folder as file
Before this change, drive would mistakenly identify a folder with a
training slash as a file when passed to NewObject.

This was picked up by the integration tests
2023-04-25 12:10:15 +01:00
Nick Craig-Wood
44ac79e357 Add dlitster to contributors 2023-04-25 12:10:15 +01:00
dlitster
0487e465ee docs: s3: clarify that X-Amz-Meta-Md5chksum is really a base64-encoded hex 2023-04-25 11:39:36 +01:00
Nick Craig-Wood
bb6cfe109d crypt: fix reading 0 length files
In an earlier patch

d5afcf9e34 crypt: try not to return "unexpected EOF" error

This introduced a bug for 0 length files which this fixes which only
manifests if the io.Reader returns data and EOF which not all readers
do.

This was failing in the integration tests.
2023-04-24 16:54:40 +01:00
WeidiDeng
864eb89a67 webdav: fix server side copy/move not overwriting - fixes #6964 2023-04-24 14:35:42 +01:00
Nick Craig-Wood
4471e6f258 selfupdate: obey --no-check-certificate flag
This patch makes sure we use our own HTTP transport when fetching the
current rclone version.

This allows it to use --no-check-certificate (and any other features
of our own transport).

See: https://forum.rclone.org/t/rclone-selfupdate-no-check-certificate-flag-not-work/37501
2023-04-24 12:26:01 +01:00
Nick Craig-Wood
e82db0b7d5 vfs: fix potential data race - Fixes #6962
This fixes a data race that was found by static analysis.
2023-04-24 12:17:03 +01:00
Nick Craig-Wood
72e624c5e4 serve dlna: fix potential data race #6962
This fixes a data race that was found by static analysis.
2023-04-24 12:17:03 +01:00
Nick Craig-Wood
6092fa57c3 Add Loren Gordon to contributors 2023-04-24 12:17:03 +01:00
Loren Gordon
3e15a594b7 cat: adds --separator option to cat command
When using `rclone cat` to print the contents of several files, the
user may want to inject some separator between the files, such as a
comma or a newline. This patch adds a `--separator` option to the `cat`
command to make that possible. The default value remains an empty
string, `""`, maintaining the prior behavior of `rclone cat`.

Closes #6968
2023-04-24 12:01:53 +01:00
Nick Craig-Wood
db8c007983 swift: ignore 404 error when deleting an object
See: https://forum.rclone.org/t/rclone-should-optionally-ignore-404-for-delete/37592
2023-04-22 10:49:10 +01:00
dependabot[bot]
5836da14c2 build(deps): bump github.com/aws/aws-sdk-go from 1.44.236 to 1.44.246
Bumps [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) from 1.44.236 to 1.44.246.
- [Release notes](https://github.com/aws/aws-sdk-go/releases)
- [Commits](https://github.com/aws/aws-sdk-go/compare/v1.44.236...v1.44.246)

---
updated-dependencies:
- dependency-name: github.com/aws/aws-sdk-go
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2023-04-20 18:03:27 +01:00
dependabot[bot]
8ed07d11a0 build(deps): bump github.com/klauspost/compress from 1.16.3 to 1.16.5
Bumps [github.com/klauspost/compress](https://github.com/klauspost/compress) from 1.16.3 to 1.16.5.
- [Release notes](https://github.com/klauspost/compress/releases)
- [Changelog](https://github.com/klauspost/compress/blob/master/.goreleaser.yml)
- [Commits](https://github.com/klauspost/compress/compare/v1.16.3...v1.16.5)

---
updated-dependencies:
- dependency-name: github.com/klauspost/compress
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2023-04-20 18:03:18 +01:00
dependabot[bot]
1f2ee44c20 build(deps): bump golang.org/x/term from 0.6.0 to 0.7.0
Bumps [golang.org/x/term](https://github.com/golang/term) from 0.6.0 to 0.7.0.
- [Release notes](https://github.com/golang/term/releases)
- [Commits](https://github.com/golang/term/compare/v0.6.0...v0.7.0)

---
updated-dependencies:
- dependency-name: golang.org/x/term
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2023-04-20 18:02:43 +01:00
Nick Craig-Wood
32798dca25 build: remove Go updates from dependabot as it is too noisy 2023-04-20 17:58:10 +01:00
Nick Craig-Wood
075f98551f Add jladbrook to contributors 2023-04-20 17:58:10 +01:00
Nick Craig-Wood
963ab220f6 Add Brian Starkey to contributors 2023-04-20 17:58:10 +01:00
jladbrook
281a007b1a crypt: add suffix option to set a custom suffix for encrypted files - fixes #6392 2023-04-20 17:28:13 +01:00
Brian Starkey
589b7b4873 s3: update Scaleway storage classes
There are now 3 classes:
 * "STANDARD" - Multi-AZ, all regions
 * "ONEZONE_IA" - Single-AZ, FR-PAR only
 * "GLACIER" - Archive, FR-PAR and NL-AMS only
2023-04-19 17:20:30 +01:00
Nick Craig-Wood
04d2781fda fichier: add cdn option to use CDN for download - Fixes #6943 2023-04-18 17:35:21 +01:00
Nick Craig-Wood
5b95fd9588 Add WeidiDeng to contributors 2023-04-18 17:35:21 +01:00
Nick Craig-Wood
a42643101e Add Damo to contributors 2023-04-18 17:35:21 +01:00
Nick Craig-Wood
bcca67efd5 Add Rintze Zelle to contributors 2023-04-18 17:35:21 +01:00
WeidiDeng
7771aaacf6 vfs: fix writing to a read only directory creating spurious directory entries
Before this fix, when a write to a read only directory failed, rclone
would leav spurious directory entries in the directory.

This confuses `rclone serve webdav` into giving this error

    http: superfluous response.WriteHeader

This fixes the VFS layer to remove any directory entries where the
file creation did not succeed.

Fixes #5702
2023-04-18 17:33:04 +01:00
Damo
fda06fc17d docs: mount: add guidance for macFUSE installed via macports 2023-04-18 15:28:20 +01:00
Rintze Zelle
2faa4758e4 docs: azureblob: typo fix in "azureblob-account" command 2023-04-18 12:48:55 +01:00
Nick Craig-Wood
9a9ef040e3 vfs: fix reload: failed to add virtual dir entry: file does not exist
This error happened on a restart of the VFS with files to upload into
a new directory on a bucket based backend. Rclone was assuming that
directories created before the restart would still exist, but this is
a bad assumption for bucket based backends which don't really have
directories.

This change creates the pretend directory and thus the directory cache
if the parent directory does not exist when adding a virtual on a
backend which can't have empty directories.

See: https://forum.rclone.org/t/that-pesky-failed-to-reload-error-message/34527
2023-04-13 18:00:26 +01:00
Nick Craig-Wood
ca403dc90e vfs: add MkdirAll function to make a directory and all beneath 2023-04-13 18:00:22 +01:00
Nick Craig-Wood
451f4c2a8f onedrive: fix quickxorhash on 32 bit architectures
Before this fix quickxorhash would sometimes crash with an error like
this:

    panic: runtime error: slice bounds out of range [-1248:]

This was caused by an incorrect cast of a 64 bit number to a 32 bit
one on 32 bit platforms.

See: https://forum.rclone.org/t/panic-runtime-error-slice-bounds-out-of-range/37548
2023-04-13 15:14:46 +01:00
Nick Craig-Wood
5f6b105c3e Add Shyim to contributors 2023-04-13 15:14:46 +01:00
Nick Craig-Wood
d98837b7e6 Add Roel Arents to contributors 2023-04-13 15:14:46 +01:00
Shyim
99dd748fec serve restic: trigger systemd notify
Allow to use Type=notify together with serving restic api
2023-04-10 15:22:54 +01:00
albertony
bdfe213c47 version: fix reported os/kernel version for windows 2023-04-10 12:02:26 +02:00
albertony
52fbb10b47 config: add more unit tests of save 2023-04-08 21:48:21 +02:00
albertony
6cb584f455 config: do not overwrite config file symbolic link - fixes #6754 2023-04-08 21:48:21 +02:00
albertony
ec8bbb8d30 config: do not remove/overwrite other files during config file save - fixes #3759 2023-04-08 21:48:21 +02:00
wiserain
fcdffab480 Add @wiserain as the pikpak backend maintainer 2023-04-06 17:45:54 +09:00
dependabot[bot]
aeb568c494 build(deps): bump github.com/aws/aws-sdk-go from 1.44.228 to 1.44.236
Bumps [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) from 1.44.228 to 1.44.236.
- [Release notes](https://github.com/aws/aws-sdk-go/releases)
- [Commits](https://github.com/aws/aws-sdk-go/compare/v1.44.228...v1.44.236)

---
updated-dependencies:
- dependency-name: github.com/aws/aws-sdk-go
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2023-04-05 15:12:35 +01:00
dependabot[bot]
b07f575d07 build(deps): bump github.com/oracle/oci-go-sdk/v65
Bumps [github.com/oracle/oci-go-sdk/v65](https://github.com/oracle/oci-go-sdk) from 65.33.0 to 65.34.0.
- [Release notes](https://github.com/oracle/oci-go-sdk/releases)
- [Changelog](https://github.com/oracle/oci-go-sdk/blob/master/CHANGELOG.md)
- [Commits](https://github.com/oracle/oci-go-sdk/compare/v65.33.0...v65.34.0)

---
updated-dependencies:
- dependency-name: github.com/oracle/oci-go-sdk/v65
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2023-04-05 15:12:00 +01:00
dependabot[bot]
ebae647dfa build(deps): bump google.golang.org/api from 0.114.0 to 0.115.0
Bumps [google.golang.org/api](https://github.com/googleapis/google-api-go-client) from 0.114.0 to 0.115.0.
- [Release notes](https://github.com/googleapis/google-api-go-client/releases)
- [Changelog](https://github.com/googleapis/google-api-go-client/blob/main/CHANGES.md)
- [Commits](https://github.com/googleapis/google-api-go-client/compare/v0.114.0...v0.115.0)

---
updated-dependencies:
- dependency-name: google.golang.org/api
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2023-04-05 15:11:18 +01:00
dependabot[bot]
6fd5b469bc build(deps): bump github.com/spf13/cobra from 1.6.1 to 1.7.0
Bumps [github.com/spf13/cobra](https://github.com/spf13/cobra) from 1.6.1 to 1.7.0.
- [Release notes](https://github.com/spf13/cobra/releases)
- [Commits](https://github.com/spf13/cobra/compare/v1.6.1...v1.7.0)

---
updated-dependencies:
- dependency-name: github.com/spf13/cobra
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2023-04-05 15:10:31 +01:00
dependabot[bot]
78e822dd79 build(deps): bump github.com/shirou/gopsutil/v3 from 3.23.2 to 3.23.3
Bumps [github.com/shirou/gopsutil/v3](https://github.com/shirou/gopsutil) from 3.23.2 to 3.23.3.
- [Release notes](https://github.com/shirou/gopsutil/releases)
- [Commits](https://github.com/shirou/gopsutil/compare/v3.23.2...v3.23.3)

---
updated-dependencies:
- dependency-name: github.com/shirou/gopsutil/v3
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2023-04-05 15:09:55 +01:00
Roel Arents
a79db20bcd azureblob: send nil tier if empty string 2023-04-05 15:08:32 +01:00
Nick Craig-Wood
d67ef19f6e bisync: fix maxDelete parameter being ignored via the rc
See: https://forum.rclone.org/t/bisync-maxdelete-api/37215
2023-04-05 14:51:46 +01:00
Nick Craig-Wood
037a6bd1b0 crypt: recommend Dropbox for base32768 encoding
See: https://forum.rclone.org/t/base32768-filename-encoding-with-crypt-dropbox-remote/37375
2023-04-05 14:51:21 +01:00
Nick Craig-Wood
09b884aade Add wiserain to contributors 2023-04-05 14:51:21 +01:00
wiserain
243bcc9d07 pikpak: new backend
Fixes #6429
2023-04-04 16:33:48 +01:00
Nick Craig-Wood
64cf9ac911 local: fix /path/to/file.rclonelink when -l/--links is in use
Before this change using /path/to/file.rclonelink would not find the
file when using -l/--links.

This fixes the problem by doing another stat call if the file wasn't
found without the suffix if -l/--links is in use.

It will also give an error if you refer to a symlink without its
suffix which will not work because the limit to a single file
filtering will be using the file name without the .rclonelink suffix.

    need ".rclonelink" suffix to refer to symlink when using -l/--links

Before this change it would use the symlink as a directory which then
would fail when listed.

See: #6855
2023-04-04 10:22:00 +01:00
Nick Craig-Wood
15a3ec8fa1 local: fix filtering of symlinks with -l/--links flag
Before this fix, with the -l flag, the `.rclonelink` suffix wasn't
being added to the file names before filtering by name.

See #6855
2023-04-04 10:22:00 +01:00
Nick Craig-Wood
2b8af4d23f sync,copy,move: make sure we output a debug log on start of transfer
Before this change we weren't outputing a debug log on the start of a
transfer for files which existed on the source but not in the
destination.

This was different to the single file copy routine.
2023-04-04 09:41:36 +01:00
Nick Craig-Wood
5755e31ef0 Add Joel to contributors 2023-04-04 09:41:36 +01:00
Joel
f4c787ab74 sftp: add --sftp-host-key-algorithms to allow specifying SSH host key algorithms 2023-03-30 18:00:54 +01:00
Nick Craig-Wood
4d7b6e14b8 mount: clarify rclone mount error when installed via homebrew
See: https://forum.rclone.org/t/suggestion-for-error-message/37145
2023-03-29 13:59:27 +01:00
Nick Craig-Wood
9ea7d143dd Add Drew Parsons to contributors 2023-03-29 13:59:27 +01:00
Drew Parsons
927e721a25 docs: faq: clarify name resolver control
On Linux systems rclone builds with cgo but uses the internal Go
resolver for DNS by default.

This update the FAQ to suggest use of GODEBUG=netdns=cgo if there are
name resolution problems on Linux/BSD (with CGO_ENABLED rebuild from
source if necessary), or try GODEBUG=netdns=go on Windows/MacOS.

See: #683
2023-03-28 15:24:37 +01:00
Nick Craig-Wood
bd46f01eb4 cmount: add --mount-case-insensitive to force the mount to be case insensitive 2023-03-27 16:17:49 +01:00
Nick Craig-Wood
5f4d7154c0 fs: fix tristate conversion to JSON 2023-03-27 16:17:49 +01:00
Nick Craig-Wood
bad8a01850 fs: allow boolean features to be enabled with --disable !Feature 2023-03-27 16:17:49 +01:00
Nick Craig-Wood
d808c3848a Add ed to contributors 2023-03-27 16:17:49 +01:00
ed
3f0bec2ee9 webdav: make pacer minSleep configurable
This adds the config argument --webdav-pacer-min-sleep which specifies
the http-request rate limit. Lowering this from the default 10ms can
greatly improve performance when synchronizing small files.

See: https://forum.rclone.org/t/webdav-with-persistent-connections/37024/10
2023-03-27 15:30:02 +02:00
Nick Craig-Wood
8fb9eb2fee sync: make --suffix-keep-extension preserve 2 part extensions like .tar.gz
If a file has two (or more) extensions and the second (or subsequent)
extension is recognised as a valid mime type, then the suffix will go
before that extension. So `file.tar.gz` would be backed up to
`file-2019-01-01.tar.gz` whereas `file.badextension.gz` would be
backed up to `file.badextension-2019-01-01.gz`

Fixes #6892
2023-03-27 14:24:21 +01:00
Nick Craig-Wood
01fa15a7d9 Add Aditya Basu to contributors 2023-03-27 14:24:21 +01:00
Nick Craig-Wood
6aaa5d7a75 Add jumbi77 to contributors 2023-03-27 14:24:21 +01:00
Nick Craig-Wood
b4d3411637 Add Juang, Yi-Lin to contributors 2023-03-27 14:24:21 +01:00
Nick Craig-Wood
01ddc8ca6c Add NickIAm to contributors 2023-03-27 14:24:21 +01:00
Nick Craig-Wood
16c1e7149e Add yuudi to contributors 2023-03-27 14:24:21 +01:00
albertony
0374ea2c79 Use jwt-go (golang-jwt) instead of deprecated jws (x/oauth2/jws)
golang.org/x/oauth2/jws is deprecated: this package is not intended for public use and
might be removed in the future. It exists for internal use only. Please switch to another
JWS package or copy this package into your own source tree.

github.com/golang-jwt/jwt/v4 seems to be a good alternative, and was already
an implicit dependency.
2023-03-26 19:20:50 +02:00
Nick Craig-Wood
2e2451f8ec lib/rest: fix problems re-using HTTP connections
Before this fix, it was noticed that the rclone webdav client did not
re-use HTTP connections when it should have been.

This turned out to be because rclone was not draining the HTTP bodies
when it was not expecting a response.

From the Go docs:

> If the returned error is nil, the Response will contain a non-nil
> Body which the user is expected to close. If the Body is not both
> read to EOF and closed, the Client's underlying RoundTripper
> (typically Transport) may not be able to re-use a persistent TCP
> connection to the server for a subsequent "keep-alive" request.

This fixes the problem by draining up to 10MB of data from an HTTP
response if the NoResponse flag is set, or at the end of a JSON or XML
response (which could have some whitespace on the end).

See: https://forum.rclone.org/t/webdav-with-persistent-connections/37024/
2023-03-26 17:19:48 +01:00
albertony
bd1e3448b3 build: add exclude for misspell linter 2023-03-26 17:05:24 +02:00
albertony
20909fa294 build: enable misspell linter 2023-03-26 17:05:24 +02:00
albertony
c502e00c87 fs: fix infinite recursive call in pacer ModifyCalculator (fixes issue reported by the staticcheck linter) 2023-03-26 14:28:15 +02:00
albertony
9172c9b3dd crypt: reduce allocations
This changes crypt's use of sync.Pool: Instead of storing slices
it now stores pointers pointers fixed sized arrays.

This issue was reported by staticcheck:

SA6002 - Storing non-pointer values in sync.Pool allocates memory

A sync.Pool is used to avoid unnecessary allocations and reduce
the amount of work the garbage collector has to do.

When passing a value that is not a pointer to a function that accepts
an interface, the value needs to be placed on the heap, which means
an additional allocation. Slices are a common thing to put in sync.Pools,
and they're structs with 3 fields (length, capacity, and a pointer to
an array). In order to avoid the extra allocation, one should store
a pointer to the slice instead.

See: https://staticcheck.io/docs/checks#SA6002
2023-03-26 14:28:15 +02:00
albertony
78deab05f9 netstorage: ignore false positive from the staticcheck linter regarding header name not being canonical 2023-03-26 14:28:15 +02:00
albertony
6c9d377bbb vfs: ignore false positive from the unused linter 2023-03-26 14:28:15 +02:00
albertony
62ddc9b7f9 vfscache: remove unused code (fixes issue reported by the unused linter) 2023-03-26 14:28:15 +02:00
albertony
448ae49fa4 webgui: remove unused code (fixes issue reported by the unused linter) 2023-03-26 14:28:15 +02:00
albertony
5f3c276d0a zoho: remove unused code (fixes issue reported by the unused linter) 2023-03-26 14:28:15 +02:00
albertony
9cea493f58 union: remove unused code (fixes issue reported by the unused linter) 2023-03-26 14:28:15 +02:00
albertony
400d1a4468 swift: remove unused code (fixes issue reported by the unused linter) 2023-03-26 14:28:15 +02:00
albertony
851ce0f4fe seafile: remove unused code for legacy API v2 (fixes issue reported by the unused linter) 2023-03-26 14:28:15 +02:00
albertony
cc885bd39a hidrive: remove unused code (fixes issue reported by the unused linter) 2023-03-26 14:28:15 +02:00
albertony
a1a8c21c70 dropbox: remove unused code (fixes issue reported by the unused linter) 2023-03-26 14:28:15 +02:00
albertony
6ef4bd8c45 cache: remove unused code (fixes issue reported by the unused linter) 2023-03-26 14:28:15 +02:00
albertony
fb316123ec azureblob: remove unused code (fixes issue reported by the unused linter) 2023-03-26 14:28:15 +02:00
albertony
270af61665 smb: code cleanup to avoid overwriting ctx before first use (fixes issue reported by the staticcheck linter) 2023-03-26 14:28:15 +02:00
albertony
155f4f2e21 mount: replace deprecated bazil/fuse specific constants with syscall constants 2023-03-26 14:28:15 +02:00
albertony
eaf593884b serve/ftp: use io.SeekEnd instead of os.SEEK_END (deprecated since Go 1.7) 2023-03-26 14:28:15 +02:00
albertony
930574c6e9 oracleobjectstorage: remove empty branch (fixes issue reported by the staticcheck linter) 2023-03-26 14:28:15 +02:00
albertony
c1586a9866 onedrive: report any list errors during cleanup 2023-03-26 14:28:15 +02:00
albertony
432eb74814 lib: avoid unnecessary use of fmt.Sprintf for string constant 2023-03-26 14:28:15 +02:00
albertony
92fb644fb6 test: use decompressed.String() instead of string(decompressed.Bytes()) 2023-03-26 14:28:15 +02:00
albertony
bb92af693a test: do not test deprecated and unused Dial and DialTLS functions on http Transport type 2023-03-26 14:28:15 +02:00
albertony
eb5fd07131 mount: error strings should not be capitalized 2023-03-26 14:28:15 +02:00
albertony
b2ce7c9aa6 hidrive: error strings should not be capitalized 2023-03-26 14:28:15 +02:00
albertony
d6b46e41dd build: replace deprecated linters deadcode, structcheck and varcheckadd with unused
The three linters deadcode, structcheck and varcheck we have been using are as of
gitlabci-lint version 1.49.0 (24 Aug 2022) marked as deprecated, and replaced by unused.

The linters staticcheck, gosimple, stylecheck and unused should combined correspond to
the checks performed by the stand-alone staticcheck tool, which is by default used for
linting in Visual Studio Code with the Go extension. We previously enabled the first
three, but skipped unused due to many reported issues.

See #6387 for more information.
2023-03-26 14:28:15 +02:00
albertony
254c6ef1dd build: add lint ignore comment required for golangci-staticcheck in addition to stand-alone staticcheck 2023-03-26 14:28:15 +02:00
albertony
547f943851 build: exclude known issues from the staticcheck linting in ci 2023-03-26 14:28:15 +02:00
albertony
8611c9f6f7 build: add staticcheck, gosimple and stylecheck linting to the build pipeline - fixes #6273
These combined should correspond to the checks performed by the stand-alone
staticcheck tool, which is by default used for linting in Visual Studio Code
with the Go extension. One exception is the unused checks, which staticcheck
tool performs, but chose to not enabled here in rclone due to many reported
occurrences.

See #6387 for more information.
2023-03-26 14:28:15 +02:00
Dimitri Papadopoulos
f6576237a4 fs: fix typos found by codespell 2023-03-25 12:51:04 +01:00
Dimitri Papadopoulos
207b64865e fstest: fix typo found by codespell 2023-03-25 09:34:10 +01:00
Dimitri Papadopoulos
9ee1b21ec2 vfs: fix typos found by codespell 2023-03-25 09:33:34 +01:00
Dimitri Papadopoulos
55a12bd639 backend: fix repeated words typos 2023-03-25 09:31:36 +01:00
dependabot[bot]
3b4a57dab9 build(deps): bump github.com/aws/aws-sdk-go from 1.44.227 to 1.44.228
Bumps [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) from 1.44.227 to 1.44.228.
- [Release notes](https://github.com/aws/aws-sdk-go/releases)
- [Commits](https://github.com/aws/aws-sdk-go/compare/v1.44.227...v1.44.228)

---
updated-dependencies:
- dependency-name: github.com/aws/aws-sdk-go
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2023-03-24 20:49:50 +00:00
Dimitri Papadopoulos
afe158f878 docs: fix typos found by codespell 2023-03-24 20:49:00 +00:00
Dimitri Papadopoulos
722a3f32cc cmdtest: fix typos found by codespell 2023-03-24 20:44:25 +00:00
Dimitri Papadopoulos
9183618082 backend: fix typos found by codespell 2023-03-24 20:42:45 +00:00
Dimitri Papadopoulos
18ebca3979 lib: fix typos found by codespell 2023-03-24 20:40:52 +00:00
Nick Craig-Wood
e84d2c9e5f docs: add info about # of parallel checks for rclone check/cryptcheck
The original commit 7dbf1ab66f put the changes in the auto
generated docs - this fixes that.
2023-03-24 12:43:45 +00:00
Aditya Basu
e98b61ceeb docs: update install with docker interactive use
* Install with docker: interactive use
* remove extra mount from command
* update listremotes
2023-03-24 11:42:58 +00:00
albertony
19f9fca2f6 docs: document how the configuration file is written, and that an .old file will be deleted 2023-03-24 11:40:34 +00:00
jumbi77
7dbf1ab66f docs: add info about # of parallel checks for rclone check/cryptcheck 2023-03-24 11:35:58 +00:00
Dimitri Papadopoulos
bfe272bf67 backend: fix typos found by codespell 2023-03-24 11:34:14 +00:00
Dimitri Papadopoulos
cce8936802 cmd: fix typos found by codespell 2023-03-24 11:32:59 +00:00
Juang, Yi-Lin
043bf3567d drive: update drive service account guide 2023-03-24 11:31:46 +00:00
NickIAm
1b2f2c0d69 docs: add section about --vfs-cache-max-age
This change adds a section to clarify how exactly the --vfs-cache-max-age flag affects caching
2023-03-24 11:28:34 +00:00
yuudi
4b376514a6 doc: Clarify the srcFs and dstRs when using local filesystem
Co-authored-by: yuudi <yuudi@users.noreply.github.com>
2023-03-24 11:25:39 +00:00
Peter Brunner
c27e6a89b0 drive: add env_auth to drive provider
This change provides the ability to pass `env_auth` as a parameter to
the drive provider. This enables the provider to pull IAM
credentials from the environment or instance metadata. Previously if no
auth method was given it would default to requesting oauth.
2023-03-24 11:11:21 +00:00
albertony
76c6e3b15c build: set Rclone as file description since it is shown as process name in task manager 2023-03-23 18:55:57 +01:00
Nick Craig-Wood
48ec00cc1a rc: fix missing --rc flags
In this commit we accidentally removed the global --rc flags.

0df7466d2b cmd/rcd: Fix command docs to include command specific prefix (#6675)

This re-instates them.
2023-03-23 12:05:31 +00:00
dependabot[bot]
866600a73b build(deps): bump github.com/aws/aws-sdk-go from 1.44.226 to 1.44.227
Bumps [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) from 1.44.226 to 1.44.227.
- [Release notes](https://github.com/aws/aws-sdk-go/releases)
- [Commits](https://github.com/aws/aws-sdk-go/compare/v1.44.226...v1.44.227)

---
updated-dependencies:
- dependency-name: github.com/aws/aws-sdk-go
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2023-03-23 11:27:14 +00:00
Nick Craig-Wood
d8f4cd4d5f drive: fix change notify picking up files outside the root
Before this change, change notify would pick up files which were
shared with us as well as file within the drive.

When using an encrypted mount this caused errors like:

    ChangeNotify was unable to decrypt "Plain file name": illegal base32 data at input byte 5

The fix tells drive to restrict changes to the drive in use.

Fixes #6771
2023-03-22 16:24:07 +00:00
Nick Craig-Wood
d0810b602a crypt: add --crypt-pass-bad-blocks to allow corrupted file output 2023-03-22 16:23:37 +00:00
Nick Craig-Wood
d5afcf9e34 crypt: try not to return "unexpected EOF" error
Before this change the code wasn't taking into account the error
io.ErrUnexpectedEOF that io.ReadFull can return properly. Sometimes
that error was being returned instead of a more specific and useful
error.

To fix this, io.ReadFull was replaced with the simpler
readers.ReadFill which is much easier to use correctly.
2023-03-22 16:23:37 +00:00
Nick Craig-Wood
07c4d95f38 crypt: fix tests assert.Error which should have been assert.EqualError 2023-03-22 16:23:37 +00:00
Nick Craig-Wood
fd83071b6b rc: fix operations/stat with trailing /
Before this change using operations/stat with a remote pointing to a
dir with a trailing / would return a null output rather than the
correct info.

This was because the directory was not found with a trailing slash in
the directory listing.

Fixes #6817
2023-03-22 16:22:45 +00:00
Nick Craig-Wood
e042d9089f fs: Fix interaction between --progress and --interactive
Before this change if both --progress and --interactive were set then
the screen display could become muddled.

This change makes --progress and --interactive use the same lock so
while rclone is asking for interactive questions, the progress will be
paused.

Fixes #6755
2023-03-22 16:18:41 +00:00
Nick Craig-Wood
cdfa0beafb lib/atexit: ensure OnError only calls cancel function once
Before this change the cancelFunc could be called twice, once while
handling the interrupt (CTRL-C) and once while unwinding the stack if
the function happened to finish.

This change ensure the cancelFunc is only called once by wrapping it
in a sync.Once
2023-03-22 12:50:58 +00:00
Nick Craig-Wood
ddb3b17e96 s3: fix hang on aborting multpart upload with iDrive e2
Apparently the abort multipart upload call doesn't return while
multipart uploads are in progress on iDrive e2.

This means that if we CTRL-C a multpart upload rclone hangs until the
all parts uploading have completed. However since rclone is uploading
multiple parts at once this doesn't happen until after the entire file
is uploaded.

This was fixed by cancelling the upload context which causes all the
uploads to stop instantly.
2023-03-22 12:50:58 +00:00
Nick Craig-Wood
32f71c97ea Add Zach Kipp to contributors 2023-03-22 12:50:58 +00:00
dependabot[bot]
53853116fb build(deps): bump github.com/aws/aws-sdk-go from 1.44.223 to 1.44.226
Bumps [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) from 1.44.223 to 1.44.226.
- [Release notes](https://github.com/aws/aws-sdk-go/releases)
- [Commits](https://github.com/aws/aws-sdk-go/compare/v1.44.223...v1.44.226)

---
updated-dependencies:
- dependency-name: github.com/aws/aws-sdk-go
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2023-03-22 11:51:03 +00:00
dependabot[bot]
a887856998 build(deps): bump github.com/oracle/oci-go-sdk/v65
Bumps [github.com/oracle/oci-go-sdk/v65](https://github.com/oracle/oci-go-sdk) from 65.32.1 to 65.33.0.
- [Release notes](https://github.com/oracle/oci-go-sdk/releases)
- [Changelog](https://github.com/oracle/oci-go-sdk/blob/master/CHANGELOG.md)
- [Commits](https://github.com/oracle/oci-go-sdk/compare/v65.32.1...v65.33.0)

---
updated-dependencies:
- dependency-name: github.com/oracle/oci-go-sdk/v65
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2023-03-22 11:50:24 +00:00
Zach Kipp
0df7466d2b cmd/rcd: Fix command docs to include command specific prefix (#6675)
This change addresses two issues with commands that re-used
flags from common packages:

1) cobra.Command definitions did not include the command specific
   prefix in doc strings.
2) Command specific flag prefixes were added after generating
   command doc strings.
2023-03-22 11:47:35 +00:00
eNV25
23579e3b99 cmd/ncdu: refactor redraw handling 2023-03-21 16:41:22 +00:00
Nick Craig-Wood
3affba6fa6 build: remove duplicate linux/arm64 build 2023-03-21 16:25:46 +00:00
Nick Craig-Wood
542677d807 s3: fix --s3-versions on individual objects
Before this fix attempting to access an s3 versioned object by name in
a subdirectory of root would not find the object.

This fixes the problem and introduced an integraton test.

See: https://forum.rclone.org/t/s3-versions-cant-retrieve-old-version/36900
2023-03-21 12:44:45 +00:00
Nick Craig-Wood
d481aa8613 Revert "s3: fix InvalidRequest copying to a locked bucket from a source with no MD5SUM"
This reverts commit e5a1bcb1ce.

This causes a lot of integration test failures so may need to be optional.
2023-03-21 11:43:43 +00:00
Nick Craig-Wood
15e633fa8b build: disable provenance in docker build
To attempt to fix this error:

buildx failed with: ERROR: failed to solve: missing provenance for owlcc15myb2dpmxrz6dl5bzqc
2023-03-20 18:09:54 +00:00
dependabot[bot]
732c24c624 build(deps): bump docker/build-push-action from 3 to 4
Bumps [docker/build-push-action](https://github.com/docker/build-push-action) from 3 to 4.
- [Release notes](https://github.com/docker/build-push-action/releases)
- [Commits](https://github.com/docker/build-push-action/compare/v3...v4)

---
updated-dependencies:
- dependency-name: docker/build-push-action
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
2023-03-20 16:21:22 +00:00
Nick Craig-Wood
75dfdbf211 ci: revert revive settings back to fix lint
The upstream revive repo changed the default settings for this linter.
We use this through golangci-lint.

This change meant lots of errors appearing all at once. We should
probably fix these in due course, but for the time being this disables
those settings.

See: https://github.com/mgechev/revive/pull/799
2023-03-20 15:26:21 +00:00
asdffdsazqqq
5f07113a4b docs: install: how to uninstall rclone via winget 2023-03-20 14:51:42 +00:00
Richard Tweed
6a380bcc67 build: fix dockerfile reference in beta image pipeline 2023-03-20 11:54:31 +00:00
dependabot[bot]
97276ce765 build(deps): bump google.golang.org/api from 0.112.0 to 0.114.0
Bumps [google.golang.org/api](https://github.com/googleapis/google-api-go-client) from 0.112.0 to 0.114.0.
- [Release notes](https://github.com/googleapis/google-api-go-client/releases)
- [Changelog](https://github.com/googleapis/google-api-go-client/blob/main/CHANGES.md)
- [Commits](https://github.com/googleapis/google-api-go-client/compare/v0.112.0...v0.114.0)

---
updated-dependencies:
- dependency-name: google.golang.org/api
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2023-03-17 16:25:06 +00:00
dependabot[bot]
a23a7a807f build(deps): bump github.com/klauspost/compress from 1.16.0 to 1.16.3
Bumps [github.com/klauspost/compress](https://github.com/klauspost/compress) from 1.16.0 to 1.16.3.
- [Release notes](https://github.com/klauspost/compress/releases)
- [Changelog](https://github.com/klauspost/compress/blob/master/.goreleaser.yml)
- [Commits](https://github.com/klauspost/compress/compare/v1.16.0...v1.16.3)

---
updated-dependencies:
- dependency-name: github.com/klauspost/compress
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2023-03-17 16:24:20 +00:00
dependabot[bot]
c6a4caaf7e build(deps): bump goftp.io/server
Bumps goftp.io/server from 0.4.2-0.20210615155358-d07a820aac35 to 1.0.0-rc1.

---
updated-dependencies:
- dependency-name: goftp.io/server
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
2023-03-17 16:23:37 +00:00
dependabot[bot]
5574733dcb build(deps): bump github.com/oracle/oci-go-sdk/v65
Bumps [github.com/oracle/oci-go-sdk/v65](https://github.com/oracle/oci-go-sdk) from 65.32.0 to 65.32.1.
- [Release notes](https://github.com/oracle/oci-go-sdk/releases)
- [Changelog](https://github.com/oracle/oci-go-sdk/blob/master/CHANGELOG.md)
- [Commits](https://github.com/oracle/oci-go-sdk/compare/v65.32.0...v65.32.1)

---
updated-dependencies:
- dependency-name: github.com/oracle/oci-go-sdk/v65
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2023-03-17 16:22:54 +00:00
dependabot[bot]
49c21d0b6e build(deps): bump github.com/aws/aws-sdk-go from 1.44.218 to 1.44.223
Bumps [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) from 1.44.218 to 1.44.223.
- [Release notes](https://github.com/aws/aws-sdk-go/releases)
- [Commits](https://github.com/aws/aws-sdk-go/compare/v1.44.218...v1.44.223)

---
updated-dependencies:
- dependency-name: github.com/aws/aws-sdk-go
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2023-03-17 16:22:08 +00:00
eNV25
0ea2ce3674 cmd/ncdu: fix screen corruption when logging
Before this change if logs were not redirected, logging would
corrupt the terminal screen.

This commit stores the logs (max ~100 lines) in an array and
print them when the program exits.
2023-03-17 14:52:34 +00:00
dependabot[bot]
3ddf824251 build(deps): bump actions/setup-go from 3 to 4
Bumps [actions/setup-go](https://github.com/actions/setup-go) from 3 to 4.
- [Release notes](https://github.com/actions/setup-go/releases)
- [Commits](https://github.com/actions/setup-go/compare/v3...v4)

---
updated-dependencies:
- dependency-name: actions/setup-go
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
2023-03-17 14:41:49 +00:00
Nick Craig-Wood
68fdff3c27 build: ensure users with no secrets (dependabot) don't run android upload step 2023-03-17 14:04:46 +00:00
Nick Craig-Wood
c003485ae3 build: ensure users with no secrets (dependabot) don't run deploy step 2023-03-17 13:49:11 +00:00
Nick Craig-Wood
99d5080191 Add Richard Tweed to contributors 2023-03-17 13:49:11 +00:00
alankrit
2ad217eedd librclone:Added example on using librclone with golang. 2023-03-17 12:00:27 +00:00
albertony
a3eb7f1142 jottacloud: fix vfs writeback stuck in a failed upload loop with file versioning disabled
Avoid returning error when option no_versions and remove fail

Fixes #6857
2023-03-17 11:54:43 +00:00
Richard Tweed
6d620b6d88 build: update docker beta build to latest actions and to push to ghcr
* Add ghcr option for docker images
* Update to use the upstream build actions
* Add ability to push beta images manually.
2023-03-17 11:54:01 +00:00
Arnav Singh
9f8357ada7 sftp: fix using key_use_agent and key_file together needing private key file
When using ssh-agent to hold multiple keys, it is common practice to configure
openssh to use a specific key by setting the corresponding public key as
the `IdentityFile`. This change makes a similar behavior possible in rclone
by having it parse the `key_file` config as the public key when
`key_use_agent` is `true`.

rclone already attempted this behavior before this change, but it assumed that
`key_file` is the private key and that the public key is specified in
`${key_file}.pub`. So for parity with the openssh behavior, this change makes
rclone first attempt to read the public key from `${key_file}.pub` as before
(for the sake of backward compatibility), then fall back to reading it from
`key_file`.

Fixes #6791
2023-03-17 11:44:19 +00:00
Nick Craig-Wood
e5a1bcb1ce s3: fix InvalidRequest copying to a locked bucket from a source with no MD5SUM
Before this change, we would upload files as single part uploads even
if the source MD5SUM was not available.

AWS won't let you upload a file to a locket bucket without some sort
of hash protection of the upload which we don't have with no MD5SUM.

So we switch to multipart upload when the source does not have an
MD5SUM.

This means that if --s3-disable-checksum is set or we are copying from
a source with no MD5SUMs we will copy with multipart uploads.

This patch changes all uploads, not just those to locked buckets
because having no MD5SUM protection on uploads is undesirable.

Fixes #6846
2023-03-17 11:34:20 +00:00
Nick Craig-Wood
46484022b0 fs: add size to JSON logs when moving or copying an object #6849 2023-03-17 11:22:57 +00:00
Nick Craig-Wood
ab746ef891 Add Thibault Coupin to contributors 2023-03-17 11:22:57 +00:00
Paul
6241c1ae43 Add devnoname120 to contributors 2023-03-17 11:09:08 +00:00
Paul
0f8d3fe6a3 webdav: add support for chunked uploads — fix #3666
Co-authored-by: Thibault Coupin <thibault.coupin@gmail.com>
Co-authored-by: Nick Craig-Wood <nick@craig-wood.com>
2023-03-17 11:09:08 +00:00
Paul
07afb9e700 webdav: add chunking helper file 2023-03-17 11:09:08 +00:00
Thibault Coupin
3165093feb fstests: add option to skip chunked upload 2023-03-17 11:09:08 +00:00
Paul
4af0c1d902 rest: add optional GetBody function for HTTP call 2023-03-17 11:09:08 +00:00
Nick Craig-Wood
82f9554474 docs: note that rcat will retry chunks when multipart uploading
See: https://forum.rclone.org/t/the-rclone-rcat-reliability-for-the-uploading-files-to-s3/36830
2023-03-17 10:52:21 +00:00
Nick Craig-Wood
d8d53b7aa0 Add Christopher Merry to contributors 2023-03-17 10:52:21 +00:00
Nick Craig-Wood
8c9048259a Add Arnavion to contributors 2023-03-17 10:52:21 +00:00
Christopher Merry
0361acbde4 googlecloudstorage: added gcs requester pays 2023-03-16 17:13:37 +00:00
Aaron Gokaslan
f5bf0a48f3 uptobox: fix improper regex 2023-03-16 17:12:27 +00:00
albertony
cec843dd8c build: run workflow even if tag/branch name contains slash 2023-03-16 17:07:07 +00:00
Anthony Pessy
54a9488e59 s3: add GCS to provider list 2023-03-16 14:24:21 +00:00
Arnavion
29fe0177bd webdav: add "fastmail" provider for Fastmail Files
This provider:

- supports the `X-OC-Mtime` header to set the mtime

- calculates SHA1 checksum server side and returns it as a `ME:sha1hex` prop

To differentiate the new hasMESHA1 quirk, the existing hasMD5 and hasSHA1
quirks for Owncloud have been renamed to hasOCMD5 and hasOCSHA1.

Fixes #6837
2023-03-16 14:20:29 +00:00
Nick Craig-Wood
0e134364ac Changelog updates from Version v1.62.2 2023-03-16 12:00:06 +00:00
Lesmiscore
0d8350d95d ftp: fix 426 errors on downloads with vsftpd
Sometimes vsftpd returns a 426 error when closing the stream even when
all the data has been transferred successfully. This is some TLS
protocol mismatch.

Rclone has code to deal with this already, but the error returned from
Close was wrapped in a multierror so the detection didn't work.

This properly extract `textproto.Error` from the errors returned by
`github.com/jlaffaye/ftp` in all the cases.

See: https://forum.rclone.org/t/vsftpd-vs-rclone-part-2/36774
2023-03-15 18:09:29 +00:00
asdffdsazqqq
497e373e31 docs: fix size documentation
change `Google Drive` to `Google Docs`
2023-03-15 16:21:37 +00:00
Nick Craig-Wood
ed8fea4aa5 docker volume plugin: add missing fuse3 dependency #6844 2023-03-15 15:57:53 +00:00
Nick Craig-Wood
4d7f75dd76 Changelog updates from Version v1.62.1 2023-03-15 14:53:21 +00:00
Nick Craig-Wood
53e757aea9 build: update release docs to be more careful with the tag 2023-03-15 14:53:21 +00:00
Nick Craig-Wood
f578896745 Set Github release to draft while uploading binaries 2023-03-15 14:53:21 +00:00
Nick Craig-Wood
13be03cb86 Add cycneuramus to contributors 2023-03-15 14:53:21 +00:00
cycneuramus
864e02409e docker: add missing fuse3 dependency - fixes #6844 2023-03-15 10:54:30 +00:00
Nick Craig-Wood
fccc779a15 Start v1.63.0-DEV development 2023-03-14 15:18:54 +00:00
598 changed files with 92825 additions and 62915 deletions

4
.github/FUNDING.yml vendored
View File

@@ -1,4 +0,0 @@
github: [ncw]
patreon: njcw
liberapay: ncw
custom: ["https://rclone.org/donate/"]

View File

@@ -1,10 +1,6 @@
version: 2
updates:
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: "daily"
- package-ecosystem: "gomod"
directory: "/"
schedule:
interval: "daily"
version: 2
updates:
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: "daily"

View File

@@ -8,9 +8,9 @@ name: build
on:
push:
branches:
- '*'
- '**'
tags:
- '*'
- '**'
pull_request:
workflow_dispatch:
inputs:
@@ -27,12 +27,12 @@ jobs:
strategy:
fail-fast: false
matrix:
job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.18', 'go1.19']
job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.19', 'go1.20']
include:
- job_name: linux
os: ubuntu-latest
go: '1.20'
go: '1.21'
gotags: cmount
build_flags: '-include "^linux/"'
check: true
@@ -43,14 +43,14 @@ jobs:
- job_name: linux_386
os: ubuntu-latest
go: '1.20'
go: '1.21'
goarch: 386
gotags: cmount
quicktest: true
- job_name: mac_amd64
os: macos-11
go: '1.20'
go: '1.21'
gotags: 'cmount'
build_flags: '-include "^darwin/amd64" -cgo'
quicktest: true
@@ -59,14 +59,14 @@ jobs:
- job_name: mac_arm64
os: macos-11
go: '1.20'
go: '1.21'
gotags: 'cmount'
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
deploy: true
- job_name: windows
os: windows-latest
go: '1.20'
go: '1.21'
gotags: cmount
cgo: '0'
build_flags: '-include "^windows/"'
@@ -76,35 +76,35 @@ jobs:
- job_name: other_os
os: ubuntu-latest
go: '1.20'
go: '1.21'
build_flags: '-exclude "^(windows/|darwin/|linux/)"'
compile_all: true
deploy: true
- job_name: go1.18
os: ubuntu-latest
go: '1.18'
quicktest: true
racequicktest: true
- job_name: go1.19
os: ubuntu-latest
go: '1.19'
quicktest: true
racequicktest: true
- job_name: go1.20
os: ubuntu-latest
go: '1.20'
quicktest: true
racequicktest: true
name: ${{ matrix.job_name }}
runs-on: ${{ matrix.os }}
steps:
- name: Checkout
uses: actions/checkout@v3
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Install Go
uses: actions/setup-go@v3
uses: actions/setup-go@v4
with:
go-version: ${{ matrix.go }}
check-latest: true
@@ -130,6 +130,11 @@ jobs:
- name: Install Libraries on macOS
shell: bash
run: |
# https://github.com/Homebrew/brew/issues/15621#issuecomment-1619266788
# https://github.com/orgs/Homebrew/discussions/4612#discussioncomment-6319008
unset HOMEBREW_NO_INSTALL_FROM_API
brew untap --force homebrew/core
brew untap --force homebrew/cask
brew update
brew install --cask macfuse
if: matrix.os == 'macos-11'
@@ -217,7 +222,7 @@ jobs:
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
# working-directory: '$(modulePath)'
# Deploy binaries if enabled in config && not a PR && not a fork
if: matrix.deploy && github.head_ref == '' && github.repository == 'rclone/rclone'
if: env.RCLONE_CONFIG_PASS != '' && matrix.deploy && github.head_ref == '' && github.repository == 'rclone/rclone'
lint:
if: ${{ github.event.inputs.manual == 'true' || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name)) }}
@@ -227,7 +232,7 @@ jobs:
steps:
- name: Checkout
uses: actions/checkout@v3
uses: actions/checkout@v4
- name: Code quality test
uses: golangci/golangci-lint-action@v3
@@ -237,9 +242,9 @@ jobs:
# Run govulncheck on the latest go version, the one we build binaries with
- name: Install Go
uses: actions/setup-go@v3
uses: actions/setup-go@v4
with:
go-version: '1.20'
go-version: '1.21'
check-latest: true
- name: Install govulncheck
@@ -256,15 +261,15 @@ jobs:
steps:
- name: Checkout
uses: actions/checkout@v3
uses: actions/checkout@v4
with:
fetch-depth: 0
# Upgrade together with NDK version
- name: Set up Go
uses: actions/setup-go@v3
uses: actions/setup-go@v4
with:
go-version: '1.20'
go-version: '1.21'
- name: Go module cache
uses: actions/cache@v3
@@ -352,4 +357,4 @@ jobs:
env:
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
# Upload artifacts if not a PR && not a fork
if: github.head_ref == '' && github.repository == 'rclone/rclone'
if: env.RCLONE_CONFIG_PASS != '' && github.head_ref == '' && github.repository == 'rclone/rclone'

View File

@@ -0,0 +1,77 @@
name: Docker beta build
on:
push:
branches:
- master
jobs:
build:
if: github.repository == 'rclone/rclone'
runs-on: ubuntu-latest
name: Build image job
steps:
- name: Free some space
shell: bash
run: |
df -h .
# Remove android SDK
sudo rm -rf /usr/local/lib/android || true
# Remove .net runtime
sudo rm -rf /usr/share/dotnet || true
df -h .
- name: Checkout master
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Login to Docker Hub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Extract metadata (tags, labels) for Docker
id: meta
uses: docker/metadata-action@v5
with:
images: ghcr.io/${{ github.repository }}
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Login to GitHub Container Registry
uses: docker/login-action@v3
with:
registry: ghcr.io
# This is the user that triggered the Workflow. In this case, it will
# either be the user whom created the Release or manually triggered
# the workflow_dispatch.
username: ${{ github.actor }}
# `secrets.GITHUB_TOKEN` is a secret that's automatically generated by
# GitHub Actions at the start of a workflow run to identify the job.
# This is used to authenticate against GitHub Container Registry.
# See https://docs.github.com/en/actions/security-guides/automatic-token-authentication#about-the-github_token-secret
# for more detailed information.
password: ${{ secrets.GITHUB_TOKEN }}
- name: Show disk usage
shell: bash
run: |
df -h .
- name: Build and publish image
uses: docker/build-push-action@v5
with:
file: Dockerfile
context: .
push: true # push the image to ghcr
tags: |
ghcr.io/rclone/rclone:beta
rclone/rclone:beta
labels: ${{ steps.meta.outputs.labels }}
platforms: linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6
cache-from: type=gha, scope=${{ github.workflow }}
cache-to: type=gha, mode=max, scope=${{ github.workflow }}
provenance: false
# Eventually cache will need to be cleared if builds more frequent than once a week
# https://github.com/docker/build-push-action/issues/252
- name: Show disk usage
shell: bash
run: |
df -h .

View File

@@ -1,26 +0,0 @@
name: Docker beta build
on:
push:
branches:
- master
jobs:
build:
if: github.repository == 'rclone/rclone'
runs-on: ubuntu-latest
name: Build image job
steps:
- name: Checkout master
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Build and publish image
uses: ilteoood/docker_buildx@1.1.0
with:
tag: beta
imageName: rclone/rclone
platform: linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6
publish: true
dockerHubUser: ${{ secrets.DOCKER_HUB_USER }}
dockerHubPassword: ${{ secrets.DOCKER_HUB_PASSWORD }}

View File

@@ -11,7 +11,7 @@ jobs:
name: Build image job
steps:
- name: Checkout master
uses: actions/checkout@v3
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Get actual patch version
@@ -40,7 +40,7 @@ jobs:
name: Build docker plugin job
steps:
- name: Checkout master
uses: actions/checkout@v3
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Build and publish docker plugin

View File

@@ -5,7 +5,7 @@ on:
jobs:
publish:
runs-on: windows-latest # Action can only run on Windows
runs-on: ubuntu-latest
steps:
- uses: vedantmgoyal2009/winget-releaser@v2
with:

2
.gitignore vendored
View File

@@ -8,10 +8,10 @@ rclone.iml
.idea
.history
*.test
*.log
*.iml
fuzz-build.zip
*.orig
*.rej
Thumbs.db
__pycache__
.DS_Store

View File

@@ -2,15 +2,17 @@
linters:
enable:
- deadcode
- errcheck
- goimports
- revive
- ineffassign
- structcheck
- varcheck
- govet
- unconvert
- staticcheck
- gosimple
- stylecheck
- unused
- misspell
#- prealloc
#- maligned
disable-all: true
@@ -25,6 +27,74 @@ issues:
# Maximum count of issues with the same text. Set to 0 to disable. Default is 3.
max-same-issues: 0
exclude-rules:
- linters:
- staticcheck
text: 'SA1019: "github.com/rclone/rclone/cmd/serve/httplib" is deprecated'
# don't disable the revive messages about comments on exported functions
include:
- EXC0012
- EXC0013
- EXC0014
- EXC0015
run:
# timeout for analysis, e.g. 30s, 5m, default is 1m
timeout: 10m
linters-settings:
revive:
# setting rules seems to disable all the rules, so re-enable them here
rules:
- name: blank-imports
disabled: false
- name: context-as-argument
disabled: false
- name: context-keys-type
disabled: false
- name: dot-imports
disabled: false
- name: empty-block
disabled: true
- name: error-naming
disabled: false
- name: error-return
disabled: false
- name: error-strings
disabled: false
- name: errorf
disabled: false
- name: exported
disabled: false
- name: increment-decrement
disabled: true
- name: indent-error-flow
disabled: false
- name: package-comments
disabled: false
- name: range
disabled: false
- name: receiver-naming
disabled: false
- name: redefines-builtin-id
disabled: true
- name: superfluous-else
disabled: true
- name: time-naming
disabled: false
- name: unexported-return
disabled: false
- name: unreachable-code
disabled: true
- name: unused-parameter
disabled: true
- name: var-declaration
disabled: false
- name: var-naming
disabled: false
stylecheck:
# Only enable the checks performed by the staticcheck stand-alone tool,
# as documented here: https://staticcheck.io/docs/configuration/options/#checks
checks: ["all", "-ST1000", "-ST1003", "-ST1016", "-ST1020", "-ST1021", "-ST1022", "-ST1023"]

View File

@@ -419,7 +419,7 @@ remote or an fs.
Research
* Look at the interfaces defined in `fs/fs.go`
* Look at the interfaces defined in `fs/types.go`
* Study one or more of the existing remotes
Getting going
@@ -428,14 +428,19 @@ Getting going
* box is a good one to start from if you have a directory-based remote
* b2 is a good one to start from if you have a bucket-based remote
* Add your remote to the imports in `backend/all/all.go`
* HTTP based remotes are easiest to maintain if they use rclone's rest module, but if there is a really good go SDK then use that instead.
* HTTP based remotes are easiest to maintain if they use rclone's [lib/rest](https://pkg.go.dev/github.com/rclone/rclone/lib/rest) module, but if there is a really good go SDK then use that instead.
* Try to implement as many optional methods as possible as it makes the remote more usable.
* Use lib/encoder to make sure we can encode any path name and `rclone info` to help determine the encodings needed
* Use [lib/encoder](https://pkg.go.dev/github.com/rclone/rclone/lib/encoder) to make sure we can encode any path name and `rclone info` to help determine the encodings needed
* `rclone purge -v TestRemote:rclone-info`
* `rclone test info --all --remote-encoding None -vv --write-json remote.json TestRemote:rclone-info`
* `go run cmd/test/info/internal/build_csv/main.go -o remote.csv remote.json`
* open `remote.csv` in a spreadsheet and examine
Important:
* Please use [lib/rest](https://pkg.go.dev/github.com/rclone/rclone/lib/rest) if you are implementing a REST like backend and parsing XML/JSON in the backend. It makes maintenance much easier.
* If your backend is HTTP based then please use rclone's Client or Transport from [fs/fshttp](https://pkg.go.dev/github.com/rclone/rclone/fs/fshttp) - this adds features like `--dump bodies`, `--tpslimit`, `--user-agent` without you having to code anything!
Unit tests
* Create a config entry called `TestRemote` for the unit tests to use
@@ -469,7 +474,7 @@ alphabetical order of full name of remote (e.g. `drive` is ordered as
* `README.md` - main GitHub page
* `docs/content/remote.md` - main docs page (note the backend options are automatically added to this file with `make backenddocs`)
* make sure this has the `autogenerated options` comments in (see your reference backend docs)
* update them with `make backenddocs` - revert any changes in other backends
* update them in your backend with `bin/make_backend_docs.py remote`
* `docs/content/overview.md` - overview docs
* `docs/content/docs.md` - list of remotes in config section
* `docs/content/_index.md` - front page of rclone.org

View File

@@ -16,6 +16,11 @@ Current active maintainers of rclone are:
| Max Sum | @Max-Sum | union backend |
| Fred | @creativeprojects | seafile backend |
| Caleb Case | @calebcase | storj backend |
| wiserain | @wiserain | pikpak backend |
| albertony | @albertony | |
| Chun-Hung Tseng | @henrybear327 | Proton Drive Backend |
| Hideo Aoyama | @boukendesho | snap packaging |
| nielash | @nielash | bisync |
**This is a work in progress Draft**

33126
MANUAL.html generated

File diff suppressed because it is too large Load Diff

9592
MANUAL.md generated

File diff suppressed because it is too large Load Diff

34157
MANUAL.txt generated

File diff suppressed because it is too large Load Diff

View File

@@ -96,7 +96,7 @@ build_dep:
# Get the release dependencies we only install on linux
release_dep_linux:
go run bin/get-github-release.go -extract nfpm goreleaser/nfpm 'nfpm_.*_Linux_x86_64\.tar\.gz'
go install github.com/goreleaser/nfpm/v2/cmd/nfpm@latest
# Get the release dependencies we only install on Windows
release_dep_windows:

View File

@@ -25,18 +25,19 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
* Alibaba Cloud (Aliyun) Object Storage System (OSS) [:page_facing_up:](https://rclone.org/s3/#alibaba-oss)
* Amazon Drive [:page_facing_up:](https://rclone.org/amazonclouddrive/) ([See note](https://rclone.org/amazonclouddrive/#status))
* Amazon S3 [:page_facing_up:](https://rclone.org/s3/)
* ArvanCloud Object Storage (AOS) [:page_facing_up:](https://rclone.org/s3/#arvan-cloud-object-storage-aos)
* Backblaze B2 [:page_facing_up:](https://rclone.org/b2/)
* Box [:page_facing_up:](https://rclone.org/box/)
* Ceph [:page_facing_up:](https://rclone.org/s3/#ceph)
* China Mobile Ecloud Elastic Object Storage (EOS) [:page_facing_up:](https://rclone.org/s3/#china-mobile-ecloud-eos)
* Cloudflare R2 [:page_facing_up:](https://rclone.org/s3/#cloudflare-r2)
* Arvan Cloud Object Storage (AOS) [:page_facing_up:](https://rclone.org/s3/#arvan-cloud-object-storage-aos)
* Citrix ShareFile [:page_facing_up:](https://rclone.org/sharefile/)
* DigitalOcean Spaces [:page_facing_up:](https://rclone.org/s3/#digitalocean-spaces)
* Digi Storage [:page_facing_up:](https://rclone.org/koofr/#digi-storage)
* Dreamhost [:page_facing_up:](https://rclone.org/s3/#dreamhost)
* Dropbox [:page_facing_up:](https://rclone.org/dropbox/)
* Enterprise File Fabric [:page_facing_up:](https://rclone.org/filefabric/)
* Fastmail Files [:page_facing_up:](https://rclone.org/webdav/#fastmail-files)
* FTP [:page_facing_up:](https://rclone.org/ftp/)
* Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/)
* Google Drive [:page_facing_up:](https://rclone.org/drive/)
@@ -50,6 +51,7 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
* IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3)
* IONOS Cloud [:page_facing_up:](https://rclone.org/s3/#ionos)
* Koofr [:page_facing_up:](https://rclone.org/koofr/)
* Leviia Object Storage [:page_facing_up:](https://rclone.org/s3/#leviia)
* Liara Object Storage [:page_facing_up:](https://rclone.org/s3/#liara-object-storage)
* Mail.ru Cloud [:page_facing_up:](https://rclone.org/mailru/)
* Memset Memstore [:page_facing_up:](https://rclone.org/swift/)
@@ -60,16 +62,21 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
* Minio [:page_facing_up:](https://rclone.org/s3/#minio)
* Nextcloud [:page_facing_up:](https://rclone.org/webdav/#nextcloud)
* OVH [:page_facing_up:](https://rclone.org/swift/)
* Blomp Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
* OpenDrive [:page_facing_up:](https://rclone.org/opendrive/)
* OpenStack Swift [:page_facing_up:](https://rclone.org/swift/)
* Oracle Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
* Oracle Object Storage [:page_facing_up:](https://rclone.org/oracleobjectstorage/)
* ownCloud [:page_facing_up:](https://rclone.org/webdav/#owncloud)
* pCloud [:page_facing_up:](https://rclone.org/pcloud/)
* Petabox [:page_facing_up:](https://rclone.org/s3/#petabox)
* PikPak [:page_facing_up:](https://rclone.org/pikpak/)
* premiumize.me [:page_facing_up:](https://rclone.org/premiumizeme/)
* put.io [:page_facing_up:](https://rclone.org/putio/)
* Proton Drive [:page_facing_up:](https://rclone.org/protondrive/)
* QingStor [:page_facing_up:](https://rclone.org/qingstor/)
* Qiniu Cloud Object Storage (Kodo) [:page_facing_up:](https://rclone.org/s3/#qiniu)
* Quatrix [:page_facing_up:](https://rclone.org/quatrix/)
* Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
* RackCorp Object Storage [:page_facing_up:](https://rclone.org/s3/#RackCorp)
* Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)
@@ -80,6 +87,7 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
* StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
* Storj [:page_facing_up:](https://rclone.org/storj/)
* SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)
* Synology C2 Object Storage [:page_facing_up:](https://rclone.org/s3/#synology-c2)
* Tencent Cloud Object Storage (COS) [:page_facing_up:](https://rclone.org/s3/#tencent-cos)
* Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi)
* WebDAV [:page_facing_up:](https://rclone.org/webdav/)

View File

@@ -90,6 +90,28 @@ Now
* git commit -a -v -m "Changelog updates from Version ${NEW_TAG}"
* git push
## Update the website between releases
Create an update website branch based off the last release
git co -b update-website
If the branch already exists, double check there are no commits that need saving.
Now reset the branch to the last release
git reset --hard v1.64.0
Create the changes, check them in, test with `make serve` then
make upload_test_website
Check out https://test.rclone.org and when happy
make upload_website
Cherry pick any changes back to master and the stable branch if it is active.
## Making a manual build of docker
The rclone docker image should autobuild on via GitHub actions. If it doesn't

View File

@@ -1 +1 @@
v1.62.1
v1.64.1

View File

@@ -36,9 +36,12 @@ import (
_ "github.com/rclone/rclone/backend/opendrive"
_ "github.com/rclone/rclone/backend/oracleobjectstorage"
_ "github.com/rclone/rclone/backend/pcloud"
_ "github.com/rclone/rclone/backend/pikpak"
_ "github.com/rclone/rclone/backend/premiumizeme"
_ "github.com/rclone/rclone/backend/protondrive"
_ "github.com/rclone/rclone/backend/putio"
_ "github.com/rclone/rclone/backend/qingstor"
_ "github.com/rclone/rclone/backend/quatrix"
_ "github.com/rclone/rclone/backend/s3"
_ "github.com/rclone/rclone/backend/seafile"
_ "github.com/rclone/rclone/backend/sftp"

File diff suppressed because it is too large Load Diff

View File

@@ -1,5 +1,5 @@
//go:build !plan9 && !solaris && !js && go1.18
// +build !plan9,!solaris,!js,go1.18
//go:build !plan9 && !solaris && !js
// +build !plan9,!solaris,!js
package azureblob
@@ -20,17 +20,18 @@ func (f *Fs) InternalTest(t *testing.T) {
func TestIncrement(t *testing.T) {
for _, test := range []struct {
in []byte
want []byte
in [8]byte
want [8]byte
}{
{[]byte{0, 0, 0, 0}, []byte{1, 0, 0, 0}},
{[]byte{0xFE, 0, 0, 0}, []byte{0xFF, 0, 0, 0}},
{[]byte{0xFF, 0, 0, 0}, []byte{0, 1, 0, 0}},
{[]byte{0, 1, 0, 0}, []byte{1, 1, 0, 0}},
{[]byte{0xFF, 0xFF, 0xFF, 0xFE}, []byte{0, 0, 0, 0xFF}},
{[]byte{0xFF, 0xFF, 0xFF, 0xFF}, []byte{0, 0, 0, 0}},
{[8]byte{0, 0, 0, 0}, [8]byte{1, 0, 0, 0}},
{[8]byte{0xFE, 0, 0, 0}, [8]byte{0xFF, 0, 0, 0}},
{[8]byte{0xFF, 0, 0, 0}, [8]byte{0, 1, 0, 0}},
{[8]byte{0, 1, 0, 0}, [8]byte{1, 1, 0, 0}},
{[8]byte{0xFF, 0xFF, 0xFF, 0xFE}, [8]byte{0, 0, 0, 0xFF}},
{[8]byte{0xFF, 0xFF, 0xFF, 0xFF}, [8]byte{0, 0, 0, 0, 1}},
{[8]byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, [8]byte{0, 0, 0, 0, 0, 0, 0}},
} {
increment(test.in)
increment(&test.in)
assert.Equal(t, test.want, test.in)
}
}

View File

@@ -1,7 +1,7 @@
// Test AzureBlob filesystem interface
//go:build !plan9 && !solaris && !js && go1.18
// +build !plan9,!solaris,!js,go1.18
//go:build !plan9 && !solaris && !js
// +build !plan9,!solaris,!js
package azureblob
@@ -9,6 +9,7 @@ import (
"testing"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
"github.com/stretchr/testify/assert"
)
@@ -25,6 +26,25 @@ func TestIntegration(t *testing.T) {
})
}
// TestIntegration2 runs integration tests against the remote
func TestIntegration2(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
name := "TestAzureBlob"
fstests.Run(t, &fstests.Opt{
RemoteName: name + ":",
NilObject: (*Object)(nil),
TiersToTest: []string{"Hot", "Cool"},
ChunkedUpload: fstests.ChunkedUploadConfig{
MinChunkSize: defaultChunkSize,
},
ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "directory_markers", Value: "true"},
},
})
}
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setUploadChunkSize(cs)
}

View File

@@ -1,7 +1,7 @@
// Build for azureblob for unsupported platforms to stop go complaining
// about "no buildable Go source files "
//go:build plan9 || solaris || js || !go1.18
// +build plan9 solaris js !go1.18
//go:build plan9 || solaris || js
// +build plan9 solaris js
package azureblob

View File

@@ -32,6 +32,7 @@ import (
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/bucket"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/multipart"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/pool"
"github.com/rclone/rclone/lib/rest"
@@ -57,9 +58,7 @@ const (
minChunkSize = 5 * fs.Mebi
defaultChunkSize = 96 * fs.Mebi
defaultUploadCutoff = 200 * fs.Mebi
largeFileCopyCutoff = 4 * fs.Gibi // 5E9 is the max
memoryPoolFlushTime = fs.Duration(time.Minute) // flush the cached buffers after this long
memoryPoolUseMmap = false
largeFileCopyCutoff = 4 * fs.Gibi // 5E9 is the max
)
// Globals
@@ -75,13 +74,15 @@ func init() {
Description: "Backblaze B2",
NewFs: NewFs,
Options: []fs.Option{{
Name: "account",
Help: "Account ID or Application Key ID.",
Required: true,
Name: "account",
Help: "Account ID or Application Key ID.",
Required: true,
Sensitive: true,
}, {
Name: "key",
Help: "Application Key.",
Required: true,
Name: "key",
Help: "Application Key.",
Required: true,
Sensitive: true,
}, {
Name: "endpoint",
Help: "Endpoint for the service.\n\nLeave blank normally.",
@@ -147,6 +148,18 @@ might a maximum of "--transfers" chunks in progress at once.
5,000,000 Bytes is the minimum size.`,
Default: defaultChunkSize,
Advanced: true,
}, {
Name: "upload_concurrency",
Help: `Concurrency for multipart uploads.
This is the number of chunks of the same file that are uploaded
concurrently.
Note that chunks are stored in memory and there may be up to
"--transfers" * "--b2-upload-concurrency" chunks stored at once
in memory.`,
Default: 4,
Advanced: true,
}, {
Name: "disable_checksum",
Help: `Disable checksums for large (> upload cutoff) files.
@@ -186,16 +199,16 @@ The minimum value is 1 second. The maximum value is one week.`,
Advanced: true,
}, {
Name: "memory_pool_flush_time",
Default: memoryPoolFlushTime,
Default: fs.Duration(time.Minute),
Advanced: true,
Help: `How often internal memory buffer pools will be flushed.
Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations.
This option controls how often unused buffers will be removed from the pool.`,
Hide: fs.OptionHideBoth,
Help: `How often internal memory buffer pools will be flushed. (no longer used)`,
}, {
Name: "memory_pool_use_mmap",
Default: memoryPoolUseMmap,
Default: false,
Advanced: true,
Help: `Whether to use mmap buffers in internal memory pool.`,
Hide: fs.OptionHideBoth,
Help: `Whether to use mmap buffers in internal memory pool. (no longer used)`,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
@@ -222,11 +235,10 @@ type Options struct {
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
CopyCutoff fs.SizeSuffix `config:"copy_cutoff"`
ChunkSize fs.SizeSuffix `config:"chunk_size"`
UploadConcurrency int `config:"upload_concurrency"`
DisableCheckSum bool `config:"disable_checksum"`
DownloadURL string `config:"download_url"`
DownloadAuthorizationDuration fs.Duration `config:"download_auth_duration"`
MemoryPoolFlushTime fs.Duration `config:"memory_pool_flush_time"`
MemoryPoolUseMmap bool `config:"memory_pool_use_mmap"`
Enc encoder.MultiEncoder `config:"encoding"`
}
@@ -251,7 +263,6 @@ type Fs struct {
authMu sync.Mutex // lock for authorizing the account
pacer *fs.Pacer // To pace and retry the API calls
uploadToken *pacer.TokenDispenser // control concurrency
pool *pool.Pool // memory pool
}
// Object describes a b2 object
@@ -456,12 +467,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
uploads: make(map[string][]*api.GetUploadURLResponse),
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
uploadToken: pacer.NewTokenDispenser(ci.Transfers),
pool: pool.New(
time.Duration(opt.MemoryPoolFlushTime),
int(opt.ChunkSize),
ci.Transfers,
opt.MemoryPoolUseMmap,
),
}
f.setRoot(root)
f.features = (&fs.Features{
@@ -595,23 +600,24 @@ func (f *Fs) clearUploadURL(bucketID string) {
f.uploadMu.Unlock()
}
// getBuf gets a buffer of f.opt.ChunkSize and an upload token
// getRW gets a RW buffer and an upload token
//
// If noBuf is set then it just gets an upload token
func (f *Fs) getBuf(noBuf bool) (buf []byte) {
func (f *Fs) getRW(noBuf bool) (rw *pool.RW) {
f.uploadToken.Get()
if !noBuf {
buf = f.pool.Get()
rw = multipart.NewRW()
}
return buf
return rw
}
// putBuf returns a buffer to the memory pool and an upload token
// putRW returns a RW buffer to the memory pool and returns an upload
// token
//
// If noBuf is set then it just returns the upload token
func (f *Fs) putBuf(buf []byte, noBuf bool) {
if !noBuf {
f.pool.Put(buf)
// If buf is nil then it just returns the upload token
func (f *Fs) putRW(rw *pool.RW) {
if rw != nil {
_ = rw.Close()
}
f.uploadToken.Put()
}
@@ -1291,7 +1297,11 @@ func (f *Fs) copy(ctx context.Context, dstObj *Object, srcObj *Object, newInfo *
if err != nil {
return err
}
return up.Upload(ctx)
err = up.Copy(ctx)
if err != nil {
return err
}
return dstObj.decodeMetaDataFileInfo(up.info)
}
dstBucket, dstPath := dstObj.split()
@@ -1420,7 +1430,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
if err != nil {
return "", err
}
absPath := "/" + bucketPath
absPath := "/" + urlEncode(bucketPath)
link = RootURL + "/file/" + urlEncode(bucket) + absPath
bucketType, err := f.getbucketType(ctx, bucket)
if err != nil {
@@ -1859,11 +1869,11 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
if err != nil {
return err
}
if size == -1 {
if size < 0 {
// Check if the file is large enough for a chunked upload (needs to be at least two chunks)
buf := o.fs.getBuf(false)
rw := o.fs.getRW(false)
n, err := io.ReadFull(in, buf)
n, err := io.CopyN(rw, in, int64(o.fs.opt.ChunkSize))
if err == nil {
bufReader := bufio.NewReader(in)
in = bufReader
@@ -1874,26 +1884,34 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
fs.Debugf(o, "File is big enough for chunked streaming")
up, err := o.fs.newLargeUpload(ctx, o, in, src, o.fs.opt.ChunkSize, false, nil)
if err != nil {
o.fs.putBuf(buf, false)
o.fs.putRW(rw)
return err
}
// NB Stream returns the buffer and token
return up.Stream(ctx, buf)
} else if err == io.EOF || err == io.ErrUnexpectedEOF {
err = up.Stream(ctx, rw)
if err != nil {
return err
}
return o.decodeMetaDataFileInfo(up.info)
} else if err == io.EOF {
fs.Debugf(o, "File has %d bytes, which makes only one chunk. Using direct upload.", n)
defer o.fs.putBuf(buf, false)
size = int64(n)
in = bytes.NewReader(buf[:n])
defer o.fs.putRW(rw)
size = n
in = rw
} else {
o.fs.putBuf(buf, false)
o.fs.putRW(rw)
return err
}
} else if size > int64(o.fs.opt.UploadCutoff) {
up, err := o.fs.newLargeUpload(ctx, o, in, src, o.fs.opt.ChunkSize, false, nil)
chunkWriter, err := multipart.UploadMultipart(ctx, src, in, multipart.UploadMultipartOptions{
Open: o.fs,
OpenOptions: options,
})
if err != nil {
return err
}
return up.Upload(ctx)
up := chunkWriter.(*largeUpload)
return o.decodeMetaDataFileInfo(up.info)
}
modTime := src.ModTime(ctx)
@@ -2001,6 +2019,41 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return o.decodeMetaDataFileInfo(&response)
}
// OpenChunkWriter returns the chunk size and a ChunkWriter
//
// Pass in the remote and the src object
// You can also use options to hint at the desired chunk size
func (f *Fs) OpenChunkWriter(ctx context.Context, remote string, src fs.ObjectInfo, options ...fs.OpenOption) (info fs.ChunkWriterInfo, writer fs.ChunkWriter, err error) {
// FIXME what if file is smaller than 1 chunk?
if f.opt.Versions {
return info, nil, errNotWithVersions
}
if f.opt.VersionAt.IsSet() {
return info, nil, errNotWithVersionAt
}
//size := src.Size()
// Temporary Object under construction
o := &Object{
fs: f,
remote: src.Remote(),
}
bucket, _ := o.split()
err = f.makeBucket(ctx, bucket)
if err != nil {
return info, nil, err
}
info = fs.ChunkWriterInfo{
ChunkSize: int64(f.opt.ChunkSize),
Concurrency: o.fs.opt.UploadConcurrency,
//LeavePartsOnError: o.fs.opt.LeavePartsOnError,
}
up, err := f.newLargeUpload(ctx, o, nil, src, f.opt.ChunkSize, false, nil)
return info, up, err
}
// Remove an object
func (o *Object) Remove(ctx context.Context) error {
bucket, bucketPath := o.split()
@@ -2028,14 +2081,15 @@ func (o *Object) ID() string {
// Check the interfaces are satisfied
var (
_ fs.Fs = &Fs{}
_ fs.Purger = &Fs{}
_ fs.Copier = &Fs{}
_ fs.PutStreamer = &Fs{}
_ fs.CleanUpper = &Fs{}
_ fs.ListRer = &Fs{}
_ fs.PublicLinker = &Fs{}
_ fs.Object = &Object{}
_ fs.MimeTyper = &Object{}
_ fs.IDer = &Object{}
_ fs.Fs = &Fs{}
_ fs.Purger = &Fs{}
_ fs.Copier = &Fs{}
_ fs.PutStreamer = &Fs{}
_ fs.CleanUpper = &Fs{}
_ fs.ListRer = &Fs{}
_ fs.PublicLinker = &Fs{}
_ fs.OpenChunkWriter = &Fs{}
_ fs.Object = &Object{}
_ fs.MimeTyper = &Object{}
_ fs.IDer = &Object{}
)

View File

@@ -1,10 +1,19 @@
package b2
import (
"bytes"
"context"
"fmt"
"testing"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
"github.com/rclone/rclone/lib/random"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// Test b2 string encoding
@@ -168,3 +177,100 @@ func TestParseTimeString(t *testing.T) {
}
}
// The integration tests do a reasonable job of testing the normal
// copy but don't test the chunked copy.
func (f *Fs) InternalTestChunkedCopy(t *testing.T) {
ctx := context.Background()
contents := random.String(8 * 1024 * 1024)
item := fstest.NewItem("chunked-copy", contents, fstest.Time("2001-05-06T04:05:06.499999999Z"))
src := fstests.PutTestContents(ctx, t, f, &item, contents, true)
defer func() {
assert.NoError(t, src.Remove(ctx))
}()
var itemCopy = item
itemCopy.Path += ".copy"
// Set copy cutoff to mininum value so we make chunks
origCutoff := f.opt.CopyCutoff
f.opt.CopyCutoff = minChunkSize
defer func() {
f.opt.CopyCutoff = origCutoff
}()
// Do the copy
dst, err := f.Copy(ctx, src, itemCopy.Path)
require.NoError(t, err)
defer func() {
assert.NoError(t, dst.Remove(ctx))
}()
// Check size
assert.Equal(t, src.Size(), dst.Size())
// Check modtime
srcModTime := src.ModTime(ctx)
dstModTime := dst.ModTime(ctx)
assert.True(t, srcModTime.Equal(dstModTime))
// Make sure contents are correct
gotContents := fstests.ReadObject(ctx, t, dst, -1)
assert.Equal(t, contents, gotContents)
}
// The integration tests do a reasonable job of testing the normal
// streaming upload but don't test the chunked streaming upload.
func (f *Fs) InternalTestChunkedStreamingUpload(t *testing.T, size int) {
ctx := context.Background()
contents := random.String(size)
item := fstest.NewItem(fmt.Sprintf("chunked-streaming-upload-%d", size), contents, fstest.Time("2001-05-06T04:05:06.499Z"))
// Set chunk size to mininum value so we make chunks
origOpt := f.opt
f.opt.ChunkSize = minChunkSize
f.opt.UploadCutoff = 0
defer func() {
f.opt = origOpt
}()
// Do the streaming upload
src := object.NewStaticObjectInfo(item.Path, item.ModTime, -1, true, item.Hashes, f)
in := bytes.NewBufferString(contents)
dst, err := f.PutStream(ctx, in, src)
require.NoError(t, err)
defer func() {
assert.NoError(t, dst.Remove(ctx))
}()
// Check size
assert.Equal(t, int64(size), dst.Size())
// Check modtime
srcModTime := src.ModTime(ctx)
dstModTime := dst.ModTime(ctx)
assert.Equal(t, srcModTime, dstModTime)
// Make sure contents are correct
gotContents := fstests.ReadObject(ctx, t, dst, -1)
assert.Equal(t, contents, gotContents, "Contents incorrect")
}
// -run TestIntegration/FsMkdir/FsPutFiles/Internal
func (f *Fs) InternalTest(t *testing.T) {
t.Run("ChunkedCopy", f.InternalTestChunkedCopy)
for _, size := range []fs.SizeSuffix{
minChunkSize - 1,
minChunkSize,
minChunkSize + 1,
(3 * minChunkSize) / 2,
(5 * minChunkSize) / 2,
} {
t.Run(fmt.Sprintf("ChunkedStreamingUpload/%d", size), func(t *testing.T) {
f.InternalTestChunkedStreamingUpload(t, int(size))
})
}
}
var _ fstests.InternalTester = (*Fs)(nil)

View File

@@ -5,7 +5,6 @@
package b2
import (
"bytes"
"context"
"crypto/sha1"
"encoding/hex"
@@ -14,7 +13,6 @@ import (
"io"
"strings"
"sync"
"time"
"github.com/rclone/rclone/backend/b2/api"
"github.com/rclone/rclone/fs"
@@ -80,12 +78,14 @@ type largeUpload struct {
wrap accounting.WrapFn // account parts being transferred
id string // ID of the file being uploaded
size int64 // total size
parts int64 // calculated number of parts, if known
parts int // calculated number of parts, if known
sha1smu sync.Mutex // mutex to protect sha1s
sha1s []string // slice of SHA1s for each part
uploadMu sync.Mutex // lock for upload variable
uploads []*api.GetUploadPartURLResponse // result of get upload URL calls
chunkSize int64 // chunk size to use
src *Object // if copying, object we are reading from
info *api.FileInfo // final response with info about the object
}
// newLargeUpload starts an upload of object o from in with metadata in src
@@ -93,18 +93,16 @@ type largeUpload struct {
// If newInfo is set then metadata from that will be used instead of reading it from src
func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs.ObjectInfo, defaultChunkSize fs.SizeSuffix, doCopy bool, newInfo *api.File) (up *largeUpload, err error) {
size := src.Size()
parts := int64(0)
sha1SliceSize := int64(maxParts)
parts := 0
chunkSize := defaultChunkSize
if size == -1 {
fs.Debugf(o, "Streaming upload with --b2-chunk-size %s allows uploads of up to %s and will fail only when that limit is reached.", f.opt.ChunkSize, maxParts*f.opt.ChunkSize)
} else {
chunkSize = chunksize.Calculator(o, size, maxParts, defaultChunkSize)
parts = size / int64(chunkSize)
parts = int(size / int64(chunkSize))
if size%int64(chunkSize) != 0 {
parts++
}
sha1SliceSize = parts
}
opts := rest.Opts{
@@ -152,7 +150,7 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
id: response.ID,
size: size,
parts: parts,
sha1s: make([]string, sha1SliceSize),
sha1s: make([]string, 0, 16),
chunkSize: int64(chunkSize),
}
// unwrap the accounting from the input, we use wrap to put it
@@ -171,24 +169,26 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
// This should be returned with returnUploadURL when finished
func (up *largeUpload) getUploadURL(ctx context.Context) (upload *api.GetUploadPartURLResponse, err error) {
up.uploadMu.Lock()
defer up.uploadMu.Unlock()
if len(up.uploads) == 0 {
opts := rest.Opts{
Method: "POST",
Path: "/b2_get_upload_part_url",
}
var request = api.GetUploadPartURLRequest{
ID: up.id,
}
err := up.f.pacer.Call(func() (bool, error) {
resp, err := up.f.srv.CallJSON(ctx, &opts, &request, &upload)
return up.f.shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, fmt.Errorf("failed to get upload URL: %w", err)
}
} else {
if len(up.uploads) > 0 {
upload, up.uploads = up.uploads[0], up.uploads[1:]
up.uploadMu.Unlock()
return upload, nil
}
up.uploadMu.Unlock()
opts := rest.Opts{
Method: "POST",
Path: "/b2_get_upload_part_url",
}
var request = api.GetUploadPartURLRequest{
ID: up.id,
}
err = up.f.pacer.Call(func() (bool, error) {
resp, err := up.f.srv.CallJSON(ctx, &opts, &request, &upload)
return up.f.shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, fmt.Errorf("failed to get upload URL: %w", err)
}
return upload, nil
}
@@ -203,10 +203,39 @@ func (up *largeUpload) returnUploadURL(upload *api.GetUploadPartURLResponse) {
up.uploadMu.Unlock()
}
// Transfer a chunk
func (up *largeUpload) transferChunk(ctx context.Context, part int64, body []byte) error {
err := up.f.pacer.Call(func() (bool, error) {
fs.Debugf(up.o, "Sending chunk %d length %d", part, len(body))
// Add an sha1 to the being built up sha1s
func (up *largeUpload) addSha1(chunkNumber int, sha1 string) {
up.sha1smu.Lock()
defer up.sha1smu.Unlock()
if len(up.sha1s) < chunkNumber+1 {
up.sha1s = append(up.sha1s, make([]string, chunkNumber+1-len(up.sha1s))...)
}
up.sha1s[chunkNumber] = sha1
}
// WriteChunk will write chunk number with reader bytes, where chunk number >= 0
func (up *largeUpload) WriteChunk(ctx context.Context, chunkNumber int, reader io.ReadSeeker) (size int64, err error) {
// Only account after the checksum reads have been done
if do, ok := reader.(pool.DelayAccountinger); ok {
// To figure out this number, do a transfer and if the accounted size is 0 or a
// multiple of what it should be, increase or decrease this number.
do.DelayAccounting(1)
}
err = up.f.pacer.Call(func() (bool, error) {
// Discover the size by seeking to the end
size, err = reader.Seek(0, io.SeekEnd)
if err != nil {
return false, err
}
// rewind the reader on retry and after reading size
_, err = reader.Seek(0, io.SeekStart)
if err != nil {
return false, err
}
fs.Debugf(up.o, "Sending chunk %d length %d", chunkNumber, size)
// Get upload URL
upload, err := up.getUploadURL(ctx)
@@ -214,8 +243,8 @@ func (up *largeUpload) transferChunk(ctx context.Context, part int64, body []byt
return false, err
}
in := newHashAppendingReader(bytes.NewReader(body), sha1.New())
size := int64(len(body)) + int64(in.AdditionalLength())
in := newHashAppendingReader(reader, sha1.New())
sizeWithHash := size + int64(in.AdditionalLength())
// Authorization
//
@@ -245,10 +274,10 @@ func (up *largeUpload) transferChunk(ctx context.Context, part int64, body []byt
Body: up.wrap(in),
ExtraHeaders: map[string]string{
"Authorization": upload.AuthorizationToken,
"X-Bz-Part-Number": fmt.Sprintf("%d", part),
"X-Bz-Part-Number": fmt.Sprintf("%d", chunkNumber+1),
sha1Header: "hex_digits_at_end",
},
ContentLength: &size,
ContentLength: &sizeWithHash,
}
var response api.UploadPartResponse
@@ -256,7 +285,7 @@ func (up *largeUpload) transferChunk(ctx context.Context, part int64, body []byt
resp, err := up.f.srv.CallJSON(ctx, &opts, nil, &response)
retry, err := up.f.shouldRetry(ctx, resp, err)
if err != nil {
fs.Debugf(up.o, "Error sending chunk %d (retry=%v): %v: %#v", part, retry, err, err)
fs.Debugf(up.o, "Error sending chunk %d (retry=%v): %v: %#v", chunkNumber, retry, err, err)
}
// On retryable error clear PartUploadURL
if retry {
@@ -264,30 +293,30 @@ func (up *largeUpload) transferChunk(ctx context.Context, part int64, body []byt
upload = nil
}
up.returnUploadURL(upload)
up.sha1s[part-1] = in.HexSum()
up.addSha1(chunkNumber, in.HexSum())
return retry, err
})
if err != nil {
fs.Debugf(up.o, "Error sending chunk %d: %v", part, err)
fs.Debugf(up.o, "Error sending chunk %d: %v", chunkNumber, err)
} else {
fs.Debugf(up.o, "Done sending chunk %d", part)
fs.Debugf(up.o, "Done sending chunk %d", chunkNumber)
}
return err
return size, err
}
// Copy a chunk
func (up *largeUpload) copyChunk(ctx context.Context, part int64, partSize int64) error {
func (up *largeUpload) copyChunk(ctx context.Context, part int, partSize int64) error {
err := up.f.pacer.Call(func() (bool, error) {
fs.Debugf(up.o, "Copying chunk %d length %d", part, partSize)
opts := rest.Opts{
Method: "POST",
Path: "/b2_copy_part",
}
offset := (part - 1) * up.chunkSize // where we are in the source file
offset := int64(part) * up.chunkSize // where we are in the source file
var request = api.CopyPartRequest{
SourceID: up.src.id,
LargeFileID: up.id,
PartNumber: part,
PartNumber: int64(part + 1),
Range: fmt.Sprintf("bytes=%d-%d", offset, offset+partSize-1),
}
var response api.UploadPartResponse
@@ -296,7 +325,7 @@ func (up *largeUpload) copyChunk(ctx context.Context, part int64, partSize int64
if err != nil {
fs.Debugf(up.o, "Error copying chunk %d (retry=%v): %v: %#v", part, retry, err, err)
}
up.sha1s[part-1] = response.SHA1
up.addSha1(part, response.SHA1)
return retry, err
})
if err != nil {
@@ -307,8 +336,8 @@ func (up *largeUpload) copyChunk(ctx context.Context, part int64, partSize int64
return err
}
// finish closes off the large upload
func (up *largeUpload) finish(ctx context.Context) error {
// Close closes off the large upload
func (up *largeUpload) Close(ctx context.Context) error {
fs.Debugf(up.o, "Finishing large file %s with %d parts", up.what, up.parts)
opts := rest.Opts{
Method: "POST",
@@ -326,11 +355,12 @@ func (up *largeUpload) finish(ctx context.Context) error {
if err != nil {
return err
}
return up.o.decodeMetaDataFileInfo(&response)
up.info = &response
return nil
}
// cancel aborts the large upload
func (up *largeUpload) cancel(ctx context.Context) error {
// Abort aborts the large upload
func (up *largeUpload) Abort(ctx context.Context) error {
fs.Debugf(up.o, "Cancelling large file %s", up.what)
opts := rest.Opts{
Method: "POST",
@@ -355,157 +385,99 @@ func (up *largeUpload) cancel(ctx context.Context) error {
// reaches EOF.
//
// Note that initialUploadBlock must be returned to f.putBuf()
func (up *largeUpload) Stream(ctx context.Context, initialUploadBlock []byte) (err error) {
defer atexit.OnError(&err, func() { _ = up.cancel(ctx) })()
func (up *largeUpload) Stream(ctx context.Context, initialUploadBlock *pool.RW) (err error) {
defer atexit.OnError(&err, func() { _ = up.Abort(ctx) })()
fs.Debugf(up.o, "Starting streaming of large file (id %q)", up.id)
var (
g, gCtx = errgroup.WithContext(ctx)
hasMoreParts = true
)
up.size = int64(len(initialUploadBlock))
g.Go(func() error {
for part := int64(1); hasMoreParts; part++ {
// Get a block of memory from the pool and token which limits concurrency.
var buf []byte
if part == 1 {
buf = initialUploadBlock
} else {
buf = up.f.getBuf(false)
}
// Fail fast, in case an errgroup managed function returns an error
// gCtx is cancelled. There is no point in uploading all the other parts.
if gCtx.Err() != nil {
up.f.putBuf(buf, false)
return nil
}
// Read the chunk
var n int
if part == 1 {
n = len(buf)
} else {
n, err = io.ReadFull(up.in, buf)
if err == io.ErrUnexpectedEOF {
fs.Debugf(up.o, "Read less than a full chunk, making this the last one.")
buf = buf[:n]
hasMoreParts = false
} else if err == io.EOF {
fs.Debugf(up.o, "Could not read any more bytes, previous chunk was the last.")
up.f.putBuf(buf, false)
return nil
} else if err != nil {
// other kinds of errors indicate failure
up.f.putBuf(buf, false)
return err
}
}
// Keep stats up to date
up.parts = part
up.size += int64(n)
if part > maxParts {
up.f.putBuf(buf, false)
return fmt.Errorf("%q too big (%d bytes so far) makes too many parts %d > %d - increase --b2-chunk-size", up.o, up.size, up.parts, maxParts)
}
part := part // for the closure
g.Go(func() (err error) {
defer up.f.putBuf(buf, false)
return up.transferChunk(gCtx, part, buf)
})
up.size = initialUploadBlock.Size()
up.parts = 0
for part := 0; hasMoreParts; part++ {
// Get a block of memory from the pool and token which limits concurrency.
var rw *pool.RW
if part == 0 {
rw = initialUploadBlock
} else {
rw = up.f.getRW(false)
}
return nil
})
// Fail fast, in case an errgroup managed function returns an error
// gCtx is cancelled. There is no point in uploading all the other parts.
if gCtx.Err() != nil {
up.f.putRW(rw)
break
}
// Read the chunk
var n int64
if part == 0 {
n = rw.Size()
} else {
n, err = io.CopyN(rw, up.in, up.chunkSize)
if err == io.EOF {
fs.Debugf(up.o, "Read less than a full chunk, making this the last one.")
hasMoreParts = false
} else if err != nil {
// other kinds of errors indicate failure
up.f.putRW(rw)
return err
}
}
// Keep stats up to date
up.parts += 1
up.size += n
if part > maxParts {
up.f.putRW(rw)
return fmt.Errorf("%q too big (%d bytes so far) makes too many parts %d > %d - increase --b2-chunk-size", up.o, up.size, up.parts, maxParts)
}
part := part // for the closure
g.Go(func() (err error) {
defer up.f.putRW(rw)
_, err = up.WriteChunk(gCtx, part, rw)
return err
})
}
err = g.Wait()
if err != nil {
return err
}
up.sha1s = up.sha1s[:up.parts]
return up.finish(ctx)
return up.Close(ctx)
}
// Upload uploads the chunks from the input
func (up *largeUpload) Upload(ctx context.Context) (err error) {
defer atexit.OnError(&err, func() { _ = up.cancel(ctx) })()
// Copy the chunks from the source to the destination
func (up *largeUpload) Copy(ctx context.Context) (err error) {
defer atexit.OnError(&err, func() { _ = up.Abort(ctx) })()
fs.Debugf(up.o, "Starting %s of large file in %d chunks (id %q)", up.what, up.parts, up.id)
var (
g, gCtx = errgroup.WithContext(ctx)
remaining = up.size
uploadPool *pool.Pool
ci = fs.GetConfig(ctx)
g, gCtx = errgroup.WithContext(ctx)
remaining = up.size
)
// If using large chunk size then make a temporary pool
if up.chunkSize <= int64(up.f.opt.ChunkSize) {
uploadPool = up.f.pool
} else {
uploadPool = pool.New(
time.Duration(up.f.opt.MemoryPoolFlushTime),
int(up.chunkSize),
ci.Transfers,
up.f.opt.MemoryPoolUseMmap,
)
defer uploadPool.Flush()
}
// Get an upload token and a buffer
getBuf := func() (buf []byte) {
up.f.getBuf(true)
if !up.doCopy {
buf = uploadPool.Get()
g.SetLimit(up.f.opt.UploadConcurrency)
for part := 0; part < up.parts; part++ {
// Fail fast, in case an errgroup managed function returns an error
// gCtx is cancelled. There is no point in copying all the other parts.
if gCtx.Err() != nil {
break
}
return buf
}
// Put an upload token and a buffer
putBuf := func(buf []byte) {
if !up.doCopy {
uploadPool.Put(buf)
reqSize := remaining
if reqSize >= up.chunkSize {
reqSize = up.chunkSize
}
up.f.putBuf(nil, true)
part := part // for the closure
g.Go(func() (err error) {
return up.copyChunk(gCtx, part, reqSize)
})
remaining -= reqSize
}
g.Go(func() error {
for part := int64(1); part <= up.parts; part++ {
// Get a block of memory from the pool and token which limits concurrency.
buf := getBuf()
// Fail fast, in case an errgroup managed function returns an error
// gCtx is cancelled. There is no point in uploading all the other parts.
if gCtx.Err() != nil {
putBuf(buf)
return nil
}
reqSize := remaining
if reqSize >= up.chunkSize {
reqSize = up.chunkSize
}
if !up.doCopy {
// Read the chunk
buf = buf[:reqSize]
_, err = io.ReadFull(up.in, buf)
if err != nil {
putBuf(buf)
return err
}
}
part := part // for the closure
g.Go(func() (err error) {
defer putBuf(buf)
if !up.doCopy {
err = up.transferChunk(gCtx, part, buf)
} else {
err = up.copyChunk(gCtx, part, reqSize)
}
return err
})
remaining -= reqSize
}
return nil
})
err = g.Wait()
if err != nil {
return err
}
return up.finish(ctx)
return up.Close(ctx)
}

View File

@@ -52,7 +52,7 @@ func (e *Error) Error() string {
out += ": " + e.Message
}
if e.ContextInfo != nil {
out += fmt.Sprintf(" (%+v)", e.ContextInfo)
out += fmt.Sprintf(" (%s)", string(e.ContextInfo))
}
return out
}
@@ -63,7 +63,7 @@ var _ error = (*Error)(nil)
// ItemFields are the fields needed for FileInfo
var ItemFields = "type,id,sequence_id,etag,sha1,name,size,created_at,modified_at,content_created_at,content_modified_at,item_status,shared_link,owned_by"
// Types of things in Item
// Types of things in Item/ItemMini
const (
ItemTypeFolder = "folder"
ItemTypeFile = "file"
@@ -72,20 +72,31 @@ const (
ItemStatusDeleted = "deleted"
)
// ItemMini is a subset of the elements in a full Item returned by some API calls
type ItemMini struct {
Type string `json:"type"`
ID string `json:"id"`
SequenceID int64 `json:"sequence_id,string"`
Etag string `json:"etag"`
SHA1 string `json:"sha1"`
Name string `json:"name"`
}
// Item describes a folder or a file as returned by Get Folder Items and others
type Item struct {
Type string `json:"type"`
ID string `json:"id"`
SequenceID string `json:"sequence_id"`
Etag string `json:"etag"`
SHA1 string `json:"sha1"`
Name string `json:"name"`
Size float64 `json:"size"` // box returns this in xEyy format for very large numbers - see #2261
CreatedAt Time `json:"created_at"`
ModifiedAt Time `json:"modified_at"`
ContentCreatedAt Time `json:"content_created_at"`
ContentModifiedAt Time `json:"content_modified_at"`
ItemStatus string `json:"item_status"` // active, trashed if the file has been moved to the trash, and deleted if the file has been permanently deleted
Type string `json:"type"`
ID string `json:"id"`
SequenceID int64 `json:"sequence_id,string"`
Etag string `json:"etag"`
SHA1 string `json:"sha1"`
Name string `json:"name"`
Size float64 `json:"size"` // box returns this in xEyy format for very large numbers - see #2261
CreatedAt Time `json:"created_at"`
ModifiedAt Time `json:"modified_at"`
ContentCreatedAt Time `json:"content_created_at"`
ContentModifiedAt Time `json:"content_modified_at"`
ItemStatus string `json:"item_status"` // active, trashed if the file has been moved to the trash, and deleted if the file has been permanently deleted
Parent ItemMini `json:"parent"`
SharedLink struct {
URL string `json:"url,omitempty"`
Access string `json:"access,omitempty"`
@@ -281,3 +292,30 @@ type User struct {
Address string `json:"address"`
AvatarURL string `json:"avatar_url"`
}
// FileTreeChangeEventTypes are the events that can require cache invalidation
var FileTreeChangeEventTypes = map[string]struct{}{
"ITEM_COPY": {},
"ITEM_CREATE": {},
"ITEM_MAKE_CURRENT_VERSION": {},
"ITEM_MODIFY": {},
"ITEM_MOVE": {},
"ITEM_RENAME": {},
"ITEM_TRASH": {},
"ITEM_UNDELETE_VIA_TRASH": {},
"ITEM_UPLOAD": {},
}
// Event is an array element in the response returned from /events
type Event struct {
EventType string `json:"event_type"`
EventID string `json:"event_id"`
Source Item `json:"source"`
}
// Events is returned from /events
type Events struct {
ChunkSize int64 `json:"chunk_size"`
Entries []Event `json:"entries"`
NextStreamPosition int64 `json:"next_stream_position"`
}

View File

@@ -27,6 +27,7 @@ import (
"sync/atomic"
"time"
"github.com/golang-jwt/jwt/v4"
"github.com/rclone/rclone/backend/box/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
@@ -45,7 +46,6 @@ import (
"github.com/rclone/rclone/lib/rest"
"github.com/youmark/pkcs8"
"golang.org/x/oauth2"
"golang.org/x/oauth2/jws"
)
const (
@@ -76,6 +76,11 @@ var (
}
)
type boxCustomClaims struct {
jwt.StandardClaims
BoxSubType string `json:"box_sub_type,omitempty"`
}
// Register with Fs
func init() {
fs.Register(&fs.RegInfo{
@@ -102,16 +107,18 @@ func init() {
return nil, nil
},
Options: append(oauthutil.SharedOptions, []fs.Option{{
Name: "root_folder_id",
Help: "Fill in for rclone to use a non root folder as its starting point.",
Default: "0",
Advanced: true,
Name: "root_folder_id",
Help: "Fill in for rclone to use a non root folder as its starting point.",
Default: "0",
Advanced: true,
Sensitive: true,
}, {
Name: "box_config_file",
Help: "Box App config.json location\n\nLeave blank normally." + env.ShellExpandHelp,
}, {
Name: "access_token",
Help: "Box App Primary Access Token\n\nLeave blank normally.",
Name: "access_token",
Help: "Box App Primary Access Token\n\nLeave blank normally.",
Sensitive: true,
}, {
Name: "box_sub_type",
Default: "user",
@@ -142,6 +149,23 @@ func init() {
Default: "",
Help: "Only show items owned by the login (email address) passed in.",
Advanced: true,
}, {
Name: "impersonate",
Default: "",
Help: `Impersonate this user ID when using a service account.
Setting this flag allows rclone, when using a JWT service account, to
act on behalf of another user by setting the as-user header.
The user ID is the Box identifier for a user. User IDs can found for
any user via the GET /users endpoint, which is only available to
admins, or by calling the GET /users/me endpoint with an authenticated
user session.
See: https://developer.box.com/guides/authentication/jwt/as-user/
`,
Advanced: true,
Sensitive: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
@@ -178,7 +202,7 @@ func refreshJWTToken(ctx context.Context, jsonFile string, boxSubType string, na
signingHeaders := getSigningHeaders(boxConfig)
queryParams := getQueryParams(boxConfig)
client := fshttp.NewClient(ctx)
err = jwtutil.Config("box", name, claims, signingHeaders, queryParams, privateKey, m, client)
err = jwtutil.Config("box", name, tokenURL, *claims, signingHeaders, queryParams, privateKey, m, client)
return err
}
@@ -194,34 +218,31 @@ func getBoxConfig(configFile string) (boxConfig *api.ConfigJSON, err error) {
return boxConfig, nil
}
func getClaims(boxConfig *api.ConfigJSON, boxSubType string) (claims *jws.ClaimSet, err error) {
func getClaims(boxConfig *api.ConfigJSON, boxSubType string) (claims *boxCustomClaims, err error) {
val, err := jwtutil.RandomHex(20)
if err != nil {
return nil, fmt.Errorf("box: failed to generate random string for jti: %w", err)
}
claims = &jws.ClaimSet{
Iss: boxConfig.BoxAppSettings.ClientID,
Sub: boxConfig.EnterpriseID,
Aud: tokenURL,
Exp: time.Now().Add(time.Second * 45).Unix(),
PrivateClaims: map[string]interface{}{
"box_sub_type": boxSubType,
"aud": tokenURL,
"jti": val,
claims = &boxCustomClaims{
//lint:ignore SA1019 since we need to use jwt.StandardClaims even if deprecated in jwt-go v4 until a more permanent solution is ready in time before jwt-go v5 where it is removed entirely
//nolint:staticcheck // Don't include staticcheck when running golangci-lint to avoid SA1019
StandardClaims: jwt.StandardClaims{
Id: val,
Issuer: boxConfig.BoxAppSettings.ClientID,
Subject: boxConfig.EnterpriseID,
Audience: tokenURL,
ExpiresAt: time.Now().Add(time.Second * 45).Unix(),
},
BoxSubType: boxSubType,
}
return claims, nil
}
func getSigningHeaders(boxConfig *api.ConfigJSON) *jws.Header {
signingHeaders := &jws.Header{
Algorithm: "RS256",
Typ: "JWT",
KeyID: boxConfig.BoxAppSettings.AppAuth.PublicKeyID,
func getSigningHeaders(boxConfig *api.ConfigJSON) map[string]interface{} {
signingHeaders := map[string]interface{}{
"kid": boxConfig.BoxAppSettings.AppAuth.PublicKeyID,
}
return signingHeaders
}
@@ -258,19 +279,29 @@ type Options struct {
AccessToken string `config:"access_token"`
ListChunk int `config:"list_chunk"`
OwnedBy string `config:"owned_by"`
Impersonate string `config:"impersonate"`
}
// ItemMeta defines metadata we cache for each Item ID
type ItemMeta struct {
SequenceID int64 // the most recent event processed for this item
ParentID string // ID of the parent directory of this item
Name string // leaf name of this item
}
// Fs represents a remote box
type Fs struct {
name string // name of this remote
root string // the path we are working on
opt Options // parsed options
features *fs.Features // optional features
srv *rest.Client // the connection to the server
dirCache *dircache.DirCache // Map of directory path to directory id
pacer *fs.Pacer // pacer for API calls
tokenRenewer *oauthutil.Renew // renew the token on expiry
uploadToken *pacer.TokenDispenser // control concurrency
name string // name of this remote
root string // the path we are working on
opt Options // parsed options
features *fs.Features // optional features
srv *rest.Client // the connection to the server
dirCache *dircache.DirCache // Map of directory path to directory id
pacer *fs.Pacer // pacer for API calls
tokenRenewer *oauthutil.Renew // renew the token on expiry
uploadToken *pacer.TokenDispenser // control concurrency
itemMetaCacheMu *sync.Mutex // protects itemMetaCache
itemMetaCache map[string]ItemMeta // map of Item ID to selected metadata
}
// Object describes a box object
@@ -418,12 +449,14 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
ci := fs.GetConfig(ctx)
f := &Fs{
name: name,
root: root,
opt: *opt,
srv: rest.NewClient(client).SetRoot(rootURL),
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
uploadToken: pacer.NewTokenDispenser(ci.Transfers),
name: name,
root: root,
opt: *opt,
srv: rest.NewClient(client).SetRoot(rootURL),
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
uploadToken: pacer.NewTokenDispenser(ci.Transfers),
itemMetaCacheMu: new(sync.Mutex),
itemMetaCache: make(map[string]ItemMeta),
}
f.features = (&fs.Features{
CaseInsensitive: true,
@@ -436,6 +469,11 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
f.srv.SetHeader("Authorization", "Bearer "+f.opt.AccessToken)
}
// If using impersonate set an as-user header
if f.opt.Impersonate != "" {
f.srv.SetHeader("as-user", f.opt.Impersonate)
}
jsonFile, ok := m.Get("box_config_file")
boxSubType, boxSubTypeOk := m.Get("box_sub_type")
@@ -678,6 +716,17 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
}
entries = append(entries, o)
}
// Cache some metadata for this Item to help us process events later
// on. In particular, the box event API does not provide the old path
// of the Item when it is renamed/deleted/moved/etc.
f.itemMetaCacheMu.Lock()
cachedItemMeta, found := f.itemMetaCache[info.ID]
if !found || cachedItemMeta.SequenceID < info.SequenceID {
f.itemMetaCache[info.ID] = ItemMeta{SequenceID: info.SequenceID, ParentID: directoryID, Name: info.Name}
}
f.itemMetaCacheMu.Unlock()
return false
})
if err != nil {
@@ -1117,7 +1166,7 @@ func (f *Fs) deletePermanently(ctx context.Context, itemType, id string) error {
// CleanUp empties the trash
func (f *Fs) CleanUp(ctx context.Context) (err error) {
var (
deleteErrors = int64(0)
deleteErrors atomic.Uint64
concurrencyControl = make(chan struct{}, fs.GetConfig(ctx).Checkers)
wg sync.WaitGroup
)
@@ -1133,7 +1182,7 @@ func (f *Fs) CleanUp(ctx context.Context) (err error) {
err := f.deletePermanently(ctx, item.Type, item.ID)
if err != nil {
fs.Errorf(f, "failed to delete trash item %q (%q): %v", item.Name, item.ID, err)
atomic.AddInt64(&deleteErrors, 1)
deleteErrors.Add(1)
}
}()
} else {
@@ -1142,12 +1191,250 @@ func (f *Fs) CleanUp(ctx context.Context) (err error) {
return false
})
wg.Wait()
if deleteErrors != 0 {
return fmt.Errorf("failed to delete %d trash items", deleteErrors)
if deleteErrors.Load() != 0 {
return fmt.Errorf("failed to delete %d trash items", deleteErrors.Load())
}
return err
}
// ChangeNotify calls the passed function with a path that has had changes.
// If the implementation uses polling, it should adhere to the given interval.
//
// Automatically restarts itself in case of unexpected behavior of the remote.
//
// Close the returned channel to stop being notified.
func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) {
go func() {
// get the `stream_position` early so all changes from now on get processed
streamPosition, err := f.changeNotifyStreamPosition(ctx)
if err != nil {
fs.Infof(f, "Failed to get StreamPosition: %s", err)
}
var ticker *time.Ticker
var tickerC <-chan time.Time
for {
select {
case pollInterval, ok := <-pollIntervalChan:
if !ok {
if ticker != nil {
ticker.Stop()
}
return
}
if ticker != nil {
ticker.Stop()
ticker, tickerC = nil, nil
}
if pollInterval != 0 {
ticker = time.NewTicker(pollInterval)
tickerC = ticker.C
}
case <-tickerC:
if streamPosition == "" {
streamPosition, err = f.changeNotifyStreamPosition(ctx)
if err != nil {
fs.Infof(f, "Failed to get StreamPosition: %s", err)
continue
}
}
streamPosition, err = f.changeNotifyRunner(ctx, notifyFunc, streamPosition)
if err != nil {
fs.Infof(f, "Change notify listener failure: %s", err)
}
}
}
}()
}
func (f *Fs) changeNotifyStreamPosition(ctx context.Context) (streamPosition string, err error) {
opts := rest.Opts{
Method: "GET",
Path: "/events",
Parameters: fieldsValue(),
}
opts.Parameters.Set("stream_position", "now")
opts.Parameters.Set("stream_type", "changes")
var result api.Events
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
return shouldRetry(ctx, resp, err)
})
if err != nil {
return "", err
}
return strconv.FormatInt(result.NextStreamPosition, 10), nil
}
// Attempts to construct the full path for an object, given the ID of its
// parent directory and the name of the object.
//
// Can return "" if the parentID is not currently in the directory cache.
func (f *Fs) getFullPath(parentID string, childName string) (fullPath string) {
fullPath = ""
name := f.opt.Enc.ToStandardName(childName)
if parentID != "" {
if parentDir, ok := f.dirCache.GetInv(parentID); ok {
if len(parentDir) > 0 {
fullPath = parentDir + "/" + name
} else {
fullPath = name
}
}
} else {
// No parent, this object is at the root
fullPath = name
}
return fullPath
}
func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.EntryType), streamPosition string) (nextStreamPosition string, err error) {
nextStreamPosition = streamPosition
// box can send duplicate Event IDs; filter any in a single notify run
processedEventIDs := make(map[string]bool)
for {
limit := f.opt.ListChunk
// box only allows a max of 500 events
if limit > 500 {
limit = 500
}
opts := rest.Opts{
Method: "GET",
Path: "/events",
Parameters: fieldsValue(),
}
opts.Parameters.Set("stream_position", nextStreamPosition)
opts.Parameters.Set("stream_type", "changes")
opts.Parameters.Set("limit", strconv.Itoa(limit))
var result api.Events
var resp *http.Response
fs.Debugf(f, "Checking for changes on remote (next_stream_position: %q)", nextStreamPosition)
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
return shouldRetry(ctx, resp, err)
})
if err != nil {
return "", err
}
if result.ChunkSize != int64(len(result.Entries)) {
return "", fmt.Errorf("invalid response to event request, chunk_size (%v) not equal to number of entries (%v)", result.ChunkSize, len(result.Entries))
}
nextStreamPosition = strconv.FormatInt(result.NextStreamPosition, 10)
if result.ChunkSize == 0 {
return nextStreamPosition, nil
}
type pathToClear struct {
path string
entryType fs.EntryType
}
var pathsToClear []pathToClear
newEventIDs := 0
for _, entry := range result.Entries {
if entry.EventID == "" || processedEventIDs[entry.EventID] { // missing Event ID, or already saw this one
continue
}
processedEventIDs[entry.EventID] = true
newEventIDs++
if entry.Source.ID == "" { // missing File or Folder ID
continue
}
if entry.Source.Type != api.ItemTypeFile && entry.Source.Type != api.ItemTypeFolder { // event is not for a file or folder
continue
}
// Only interested in event types that result in a file tree change
if _, found := api.FileTreeChangeEventTypes[entry.EventType]; !found {
continue
}
f.itemMetaCacheMu.Lock()
itemMeta, cachedItemMetaFound := f.itemMetaCache[entry.Source.ID]
if cachedItemMetaFound {
if itemMeta.SequenceID >= entry.Source.SequenceID {
// Item in the cache has the same or newer SequenceID than
// this event. Ignore this event, it must be old.
f.itemMetaCacheMu.Unlock()
continue
}
// This event is newer. Delete its entry from the cache,
// we'll notify about its change below, then it's up to a
// future list operation to repopulate the cache.
delete(f.itemMetaCache, entry.Source.ID)
}
f.itemMetaCacheMu.Unlock()
entryType := fs.EntryDirectory
if entry.Source.Type == api.ItemTypeFile {
entryType = fs.EntryObject
}
// The box event only includes the new path for the object (e.g.
// the path after the object was moved). If there was an old path
// saved in our cache, it must be cleared.
if cachedItemMetaFound {
path := f.getFullPath(itemMeta.ParentID, itemMeta.Name)
if path != "" {
pathsToClear = append(pathsToClear, pathToClear{path: path, entryType: entryType})
}
// If this is a directory, also delete it from the dir cache.
// This will effectively invalidate the item metadata cache
// entries for all descendents of this directory, since we
// will no longer be able to construct a full path for them.
// This is exactly what we want, since we don't want to notify
// on the paths of these descendents if one of their ancestors
// has been renamed/deleted.
if entry.Source.Type == api.ItemTypeFolder {
f.dirCache.FlushDir(path)
}
}
// If the item is "active", then it is not trashed or deleted, so
// it potentially has a valid parent.
//
// Construct the new path of the object, based on the Parent ID
// and its name. If we get an empty result, it means we don't
// currently know about this object so notification is unnecessary.
if entry.Source.ItemStatus == api.ItemStatusActive {
path := f.getFullPath(entry.Source.Parent.ID, entry.Source.Name)
if path != "" {
pathsToClear = append(pathsToClear, pathToClear{path: path, entryType: entryType})
}
}
}
// box can sometimes repeatedly return the same Event IDs within a
// short period of time. If it stops giving us new ones, treat it
// the same as if it returned us none at all.
if newEventIDs == 0 {
return nextStreamPosition, nil
}
notifiedPaths := make(map[string]bool)
for _, p := range pathsToClear {
if _, ok := notifiedPaths[p.path]; ok {
continue
}
notifiedPaths[p.path] = true
notifyFunc(p.path, p.entryType)
}
fs.Debugf(f, "Received %v events, resulting in %v paths and %v notifications", len(result.Entries), len(pathsToClear), len(notifiedPaths))
}
}
// DirCacheFlush resets the directory cache - used in testing as an
// optional interface
func (f *Fs) DirCacheFlush() {

View File

@@ -76,17 +76,19 @@ func init() {
Name: "plex_url",
Help: "The URL of the Plex server.",
}, {
Name: "plex_username",
Help: "The username of the Plex user.",
Name: "plex_username",
Help: "The username of the Plex user.",
Sensitive: true,
}, {
Name: "plex_password",
Help: "The password of the Plex user.",
IsPassword: true,
}, {
Name: "plex_token",
Help: "The plex token for authentication - auto set normally.",
Hide: fs.OptionHideBoth,
Advanced: true,
Name: "plex_token",
Help: "The plex token for authentication - auto set normally.",
Hide: fs.OptionHideBoth,
Advanced: true,
Sensitive: true,
}, {
Name: "plex_insecure",
Help: "Skip all certificate verification when connecting to the Plex server.",
@@ -1787,7 +1789,7 @@ func (f *Fs) CleanUpCache(ignoreLastTs bool) {
}
}
// StopBackgroundRunners will signall all the runners to stop their work
// StopBackgroundRunners will signal all the runners to stop their work
// can be triggered from a terminate signal or from testing between runs
func (f *Fs) StopBackgroundRunners() {
f.cleanupChan <- false

View File

@@ -1098,27 +1098,6 @@ func (r *run) list(t *testing.T, f fs.Fs, remote string) ([]interface{}, error)
return l, err
}
func (r *run) copyFile(t *testing.T, f fs.Fs, src, dst string) error {
in, err := os.Open(src)
if err != nil {
return err
}
defer func() {
_ = in.Close()
}()
out, err := os.Create(dst)
if err != nil {
return err
}
defer func() {
_ = out.Close()
}()
_, err = io.Copy(out, in)
return err
}
func (r *run) dirMove(t *testing.T, rootFs fs.Fs, src, dst string) error {
var err error

View File

@@ -18,7 +18,7 @@ func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestCache:",
NilObject: (*cache.Object)(nil),
UnimplementableFsMethods: []string{"PublicLink", "OpenWriterAt"},
UnimplementableFsMethods: []string{"PublicLink", "OpenWriterAt", "OpenChunkWriter"},
UnimplementableObjectMethods: []string{"MimeType", "ID", "GetTier", "SetTier", "Metadata"},
SkipInvalidUTF8: true, // invalid UTF-8 confuses the cache
})

View File

@@ -160,11 +160,11 @@ func TestInternalUploadQueueMoreFiles(t *testing.T) {
minSize := 5242880
maxSize := 10485760
totalFiles := 10
rand.Seed(time.Now().Unix())
randInstance := rand.New(rand.NewSource(time.Now().Unix()))
lastFile := ""
for i := 0; i < totalFiles; i++ {
size := int64(rand.Intn(maxSize-minSize) + minSize)
size := int64(randInstance.Intn(maxSize-minSize) + minSize)
testReader := runInstance.randomReader(t, size)
remote := "test/" + strconv.Itoa(i) + ".bin"
runInstance.writeRemoteReader(t, rootFs, remote, testReader)

View File

@@ -40,6 +40,7 @@ func TestIntegration(t *testing.T) {
UnimplementableFsMethods: []string{
"PublicLink",
"OpenWriterAt",
"OpenChunkWriter",
"MergeDirs",
"DirCacheFlush",
"UserInfo",

View File

@@ -1,4 +1,4 @@
// Package combine implents a backend to combine multiple remotes in a directory tree
// Package combine implements a backend to combine multiple remotes in a directory tree
package combine
/*
@@ -233,6 +233,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs
ReadMetadata: true,
WriteMetadata: true,
UserMetadata: true,
PartialUploads: true,
}).Fill(ctx, f)
canMove := true
for _, u := range f.upstreams {
@@ -289,6 +290,16 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs
}
}
// Enable CleanUp when any upstreams support it
if features.CleanUp == nil {
for _, u := range f.upstreams {
if u.f.Features().CleanUp != nil {
features.CleanUp = f.CleanUp
break
}
}
}
// Enable ChangeNotify when any upstreams support it
if features.ChangeNotify == nil {
for _, u := range f.upstreams {
@@ -299,6 +310,9 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs
}
}
// show that we wrap other backends
features.Overlay = true
f.features = features
// Get common intersection of hashes
@@ -351,7 +365,7 @@ func (f *Fs) multithread(ctx context.Context, fn func(context.Context, *upstream
return g.Wait()
}
// join the elements together but unline path.Join return empty string
// join the elements together but unlike path.Join return empty string
func join(elem ...string) string {
result := path.Join(elem...)
if result == "." {
@@ -887,6 +901,100 @@ func (f *Fs) Shutdown(ctx context.Context) error {
})
}
// PublicLink generates a public link to the remote path (usually readable by anyone)
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (string, error) {
u, uRemote, err := f.findUpstream(remote)
if err != nil {
return "", err
}
do := u.f.Features().PublicLink
if do == nil {
return "", fs.ErrorNotImplemented
}
return do(ctx, uRemote, expire, unlink)
}
// PutUnchecked in to the remote path with the modTime given of the given size
//
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
// nil and the error
//
// May create duplicates or return errors if src already
// exists.
func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
srcPath := src.Remote()
u, uRemote, err := f.findUpstream(srcPath)
if err != nil {
return nil, err
}
do := u.f.Features().PutUnchecked
if do == nil {
return nil, fs.ErrorNotImplemented
}
uSrc := fs.NewOverrideRemote(src, uRemote)
return do(ctx, in, uSrc, options...)
}
// MergeDirs merges the contents of all the directories passed
// in into the first one and rmdirs the other directories.
func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
if len(dirs) == 0 {
return nil
}
var (
u *upstream
uDirs []fs.Directory
)
for _, dir := range dirs {
uNew, uDir, err := f.findUpstream(dir.Remote())
if err != nil {
return err
}
if u == nil {
u = uNew
} else if u != uNew {
return fmt.Errorf("can't merge directories from different upstreams")
}
uDirs = append(uDirs, fs.NewOverrideDirectory(dir, uDir))
}
do := u.f.Features().MergeDirs
if do == nil {
return fs.ErrorNotImplemented
}
return do(ctx, uDirs)
}
// CleanUp the trash in the Fs
//
// Implement this if you have a way of emptying the trash or
// otherwise cleaning up old versions of files.
func (f *Fs) CleanUp(ctx context.Context) error {
return f.multithread(ctx, func(ctx context.Context, u *upstream) error {
if do := u.f.Features().CleanUp; do != nil {
return do(ctx)
}
return nil
})
}
// OpenWriterAt opens with a handle for random access writes
//
// Pass in the remote desired and the size if known.
//
// It truncates any existing object
func (f *Fs) OpenWriterAt(ctx context.Context, remote string, size int64) (fs.WriterAtCloser, error) {
u, uRemote, err := f.findUpstream(remote)
if err != nil {
return nil, err
}
do := u.f.Features().OpenWriterAt
if do == nil {
return nil, fs.ErrorNotImplemented
}
return do(ctx, uRemote, size)
}
// Object describes a wrapped Object
//
// This is a wrapped Object which knows its path prefix
@@ -916,7 +1024,7 @@ func (o *Object) String() string {
func (o *Object) Remote() string {
newPath, err := o.u.pathAdjustment.do(o.Object.String())
if err != nil {
fs.Errorf(o, "Bad object: %v", err)
fs.Errorf(o.Object, "Bad object: %v", err)
return err.Error()
}
return newPath
@@ -988,5 +1096,10 @@ var (
_ fs.Abouter = (*Fs)(nil)
_ fs.ListRer = (*Fs)(nil)
_ fs.Shutdowner = (*Fs)(nil)
_ fs.PublicLinker = (*Fs)(nil)
_ fs.PutUncheckeder = (*Fs)(nil)
_ fs.MergeDirser = (*Fs)(nil)
_ fs.CleanUpper = (*Fs)(nil)
_ fs.OpenWriterAter = (*Fs)(nil)
_ fs.FullObject = (*Object)(nil)
)

View File

@@ -10,6 +10,11 @@ import (
"github.com/rclone/rclone/fstest/fstests"
)
var (
unimplementableFsMethods = []string{"UnWrap", "WrapFs", "SetWrapper", "UserInfo", "Disconnect", "OpenChunkWriter"}
unimplementableObjectMethods = []string{}
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
if *fstest.RemoteName == "" {
@@ -17,8 +22,8 @@ func TestIntegration(t *testing.T) {
}
fstests.Run(t, &fstests.Opt{
RemoteName: *fstest.RemoteName,
UnimplementableFsMethods: []string{"OpenWriterAt", "DuplicateFiles"},
UnimplementableObjectMethods: []string{"MimeType"},
UnimplementableFsMethods: unimplementableFsMethods,
UnimplementableObjectMethods: unimplementableObjectMethods,
})
}
@@ -35,7 +40,9 @@ func TestLocal(t *testing.T) {
{Name: name, Key: "type", Value: "combine"},
{Name: name, Key: "upstreams", Value: upstreams},
},
QuickTestOK: true,
QuickTestOK: true,
UnimplementableFsMethods: unimplementableFsMethods,
UnimplementableObjectMethods: unimplementableObjectMethods,
})
}
@@ -51,7 +58,9 @@ func TestMemory(t *testing.T) {
{Name: name, Key: "type", Value: "combine"},
{Name: name, Key: "upstreams", Value: upstreams},
},
QuickTestOK: true,
QuickTestOK: true,
UnimplementableFsMethods: unimplementableFsMethods,
UnimplementableObjectMethods: unimplementableObjectMethods,
})
}
@@ -68,6 +77,8 @@ func TestMixed(t *testing.T) {
{Name: name, Key: "type", Value: "combine"},
{Name: name, Key: "upstreams", Value: upstreams},
},
UnimplementableFsMethods: unimplementableFsMethods,
UnimplementableObjectMethods: unimplementableObjectMethods,
})
}

View File

@@ -186,6 +186,7 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
ReadMetadata: true,
WriteMetadata: true,
UserMetadata: true,
PartialUploads: true,
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
// We support reading MIME types no matter the wrapped fs
f.features.ReadMimeType = true
@@ -256,6 +257,16 @@ func isMetadataFile(filename string) bool {
return strings.HasSuffix(filename, metaFileExt)
}
// Checks whether a file is a metadata file and returns the original
// file name and a flag indicating whether it was a metadata file or
// not.
func unwrapMetadataFile(filename string) (string, bool) {
if !isMetadataFile(filename) {
return "", false
}
return filename[:len(filename)-len(metaFileExt)], true
}
// makeDataName generates the file name for a data file with specified compression mode
func makeDataName(remote string, size int64, mode int) (newRemote string) {
if mode != Uncompressed {
@@ -978,7 +989,8 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
wrappedNotifyFunc := func(path string, entryType fs.EntryType) {
fs.Logf(f, "path %q entryType %d", path, entryType)
var (
wrappedPath string
wrappedPath string
isMetadataFile bool
)
switch entryType {
case fs.EntryDirectory:
@@ -986,7 +998,10 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
case fs.EntryObject:
// Note: All we really need to do to monitor the object is to check whether the metadata changed,
// as the metadata contains the hash. This will work unless there's a hash collision and the sizes stay the same.
wrappedPath = makeMetadataName(path)
wrappedPath, isMetadataFile = unwrapMetadataFile(path)
if !isMetadataFile {
return
}
default:
fs.Errorf(path, "press ChangeNotify: ignoring unknown EntryType %d", entryType)
return

View File

@@ -14,23 +14,26 @@ import (
"github.com/rclone/rclone/fstest/fstests"
)
var defaultOpt = fstests.Opt{
RemoteName: "TestCompress:",
NilObject: (*Object)(nil),
UnimplementableFsMethods: []string{
"OpenWriterAt",
"OpenChunkWriter",
"MergeDirs",
"DirCacheFlush",
"PutUnchecked",
"PutStream",
"UserInfo",
"Disconnect",
},
TiersToTest: []string{"STANDARD", "STANDARD_IA"},
UnimplementableObjectMethods: []string{},
}
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
opt := fstests.Opt{
RemoteName: *fstest.RemoteName,
NilObject: (*Object)(nil),
UnimplementableFsMethods: []string{
"OpenWriterAt",
"MergeDirs",
"DirCacheFlush",
"PutUnchecked",
"PutStream",
"UserInfo",
"Disconnect",
},
TiersToTest: []string{"STANDARD", "STANDARD_IA"},
UnimplementableObjectMethods: []string{}}
fstests.Run(t, &opt)
fstests.Run(t, &defaultOpt)
}
// TestRemoteGzip tests GZIP compression
@@ -40,27 +43,13 @@ func TestRemoteGzip(t *testing.T) {
}
tempdir := filepath.Join(os.TempDir(), "rclone-compress-test-gzip")
name := "TestCompressGzip"
fstests.Run(t, &fstests.Opt{
RemoteName: name + ":",
NilObject: (*Object)(nil),
UnimplementableFsMethods: []string{
"OpenWriterAt",
"MergeDirs",
"DirCacheFlush",
"PutUnchecked",
"PutStream",
"UserInfo",
"Disconnect",
},
UnimplementableObjectMethods: []string{
"GetTier",
"SetTier",
},
ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "type", Value: "compress"},
{Name: name, Key: "remote", Value: tempdir},
{Name: name, Key: "compression_mode", Value: "gzip"},
},
QuickTestOK: true,
})
opt := defaultOpt
opt.RemoteName = name + ":"
opt.ExtraConfig = []fstests.ExtraConfigItem{
{Name: name, Key: "type", Value: "compress"},
{Name: name, Key: "remote", Value: tempdir},
{Name: name, Key: "compression_mode", Value: "gzip"},
}
opt.QuickTestOK = true
fstests.Run(t, &opt)
}

View File

@@ -21,6 +21,7 @@ import (
"github.com/rclone/rclone/backend/crypt/pkcs7"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/lib/readers"
"github.com/rclone/rclone/lib/version"
"github.com/rfjakob/eme"
"golang.org/x/crypto/nacl/secretbox"
@@ -37,7 +38,6 @@ const (
blockHeaderSize = secretbox.Overhead
blockDataSize = 64 * 1024
blockSize = blockHeaderSize + blockDataSize
encryptedSuffix = ".bin" // when file name encryption is off we add this suffix to make sure the cloud provider doesn't process the file
)
// Errors returned by cipher
@@ -53,8 +53,9 @@ var (
ErrorEncryptedBadBlock = errors.New("failed to authenticate decrypted block - bad password?")
ErrorBadBase32Encoding = errors.New("bad base32 filename encoding")
ErrorFileClosed = errors.New("file already closed")
ErrorNotAnEncryptedFile = errors.New("not an encrypted file - no \"" + encryptedSuffix + "\" suffix")
ErrorNotAnEncryptedFile = errors.New("not an encrypted file - does not match suffix")
ErrorBadSeek = errors.New("Seek beyond end of file")
ErrorSuffixMissingDot = errors.New("suffix config setting should include a '.'")
defaultSalt = []byte{0xA8, 0x0D, 0xF4, 0x3A, 0x8F, 0xBD, 0x03, 0x08, 0xA7, 0xCA, 0xB8, 0x3E, 0x58, 0x1F, 0x86, 0xB1}
obfuscQuoteRune = '!'
)
@@ -169,27 +170,30 @@ func NewNameEncoding(s string) (enc fileNameEncoding, err error) {
// Cipher defines an encoding and decoding cipher for the crypt backend
type Cipher struct {
dataKey [32]byte // Key for secretbox
nameKey [32]byte // 16,24 or 32 bytes
nameTweak [nameCipherBlockSize]byte // used to tweak the name crypto
block gocipher.Block
mode NameEncryptionMode
fileNameEnc fileNameEncoding
buffers sync.Pool // encrypt/decrypt buffers
cryptoRand io.Reader // read crypto random numbers from here
dirNameEncrypt bool
dataKey [32]byte // Key for secretbox
nameKey [32]byte // 16,24 or 32 bytes
nameTweak [nameCipherBlockSize]byte // used to tweak the name crypto
block gocipher.Block
mode NameEncryptionMode
fileNameEnc fileNameEncoding
buffers sync.Pool // encrypt/decrypt buffers
cryptoRand io.Reader // read crypto random numbers from here
dirNameEncrypt bool
passBadBlocks bool // if set passed bad blocks as zeroed blocks
encryptedSuffix string
}
// newCipher initialises the cipher. If salt is "" then it uses a built in salt val
func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bool, enc fileNameEncoding) (*Cipher, error) {
c := &Cipher{
mode: mode,
fileNameEnc: enc,
cryptoRand: rand.Reader,
dirNameEncrypt: dirNameEncrypt,
mode: mode,
fileNameEnc: enc,
cryptoRand: rand.Reader,
dirNameEncrypt: dirNameEncrypt,
encryptedSuffix: ".bin",
}
c.buffers.New = func() interface{} {
return make([]byte, blockSize)
return new([blockSize]byte)
}
err := c.Key(password, salt)
if err != nil {
@@ -198,11 +202,29 @@ func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bo
return c, nil
}
// setEncryptedSuffix set suffix, or an empty string
func (c *Cipher) setEncryptedSuffix(suffix string) {
if strings.EqualFold(suffix, "none") {
c.encryptedSuffix = ""
return
}
if !strings.HasPrefix(suffix, ".") {
fs.Errorf(nil, "crypt: bad suffix: %v", ErrorSuffixMissingDot)
suffix = "." + suffix
}
c.encryptedSuffix = suffix
}
// Call to set bad block pass through
func (c *Cipher) setPassBadBlocks(passBadBlocks bool) {
c.passBadBlocks = passBadBlocks
}
// Key creates all the internal keys from the password passed in using
// scrypt.
//
// If salt is "" we use a fixed salt just to make attackers lives
// slighty harder than using no salt.
// slightly harder than using no salt.
//
// Note that empty password makes all 0x00 keys which is used in the
// tests.
@@ -230,15 +252,12 @@ func (c *Cipher) Key(password, salt string) (err error) {
}
// getBlock gets a block from the pool of size blockSize
func (c *Cipher) getBlock() []byte {
return c.buffers.Get().([]byte)
func (c *Cipher) getBlock() *[blockSize]byte {
return c.buffers.Get().(*[blockSize]byte)
}
// putBlock returns a block to the pool of size blockSize
func (c *Cipher) putBlock(buf []byte) {
if len(buf) != blockSize {
panic("bad blocksize returned to pool")
}
func (c *Cipher) putBlock(buf *[blockSize]byte) {
c.buffers.Put(buf)
}
@@ -508,7 +527,7 @@ func (c *Cipher) encryptFileName(in string) string {
// EncryptFileName encrypts a file path
func (c *Cipher) EncryptFileName(in string) string {
if c.mode == NameEncryptionOff {
return in + encryptedSuffix
return in + c.encryptedSuffix
}
return c.encryptFileName(in)
}
@@ -568,8 +587,8 @@ func (c *Cipher) decryptFileName(in string) (string, error) {
// DecryptFileName decrypts a file path
func (c *Cipher) DecryptFileName(in string) (string, error) {
if c.mode == NameEncryptionOff {
remainingLength := len(in) - len(encryptedSuffix)
if remainingLength == 0 || !strings.HasSuffix(in, encryptedSuffix) {
remainingLength := len(in) - len(c.encryptedSuffix)
if remainingLength == 0 || !strings.HasSuffix(in, c.encryptedSuffix) {
return "", ErrorNotAnEncryptedFile
}
decrypted := in[:remainingLength]
@@ -609,7 +628,7 @@ func (n *nonce) pointer() *[fileNonceSize]byte {
// fromReader fills the nonce from an io.Reader - normally the OSes
// crypto random number generator
func (n *nonce) fromReader(in io.Reader) error {
read, err := io.ReadFull(in, (*n)[:])
read, err := readers.ReadFill(in, (*n)[:])
if read != fileNonceSize {
return fmt.Errorf("short read of nonce: %w", err)
}
@@ -664,8 +683,8 @@ type encrypter struct {
in io.Reader
c *Cipher
nonce nonce
buf []byte
readBuf []byte
buf *[blockSize]byte
readBuf *[blockSize]byte
bufIndex int
bufSize int
err error
@@ -690,9 +709,9 @@ func (c *Cipher) newEncrypter(in io.Reader, nonce *nonce) (*encrypter, error) {
}
}
// Copy magic into buffer
copy(fh.buf, fileMagicBytes)
copy((*fh.buf)[:], fileMagicBytes)
// Copy nonce into buffer
copy(fh.buf[fileMagicSize:], fh.nonce[:])
copy((*fh.buf)[fileMagicSize:], fh.nonce[:])
return fh, nil
}
@@ -707,22 +726,20 @@ func (fh *encrypter) Read(p []byte) (n int, err error) {
if fh.bufIndex >= fh.bufSize {
// Read data
// FIXME should overlap the reads with a go-routine and 2 buffers?
readBuf := fh.readBuf[:blockDataSize]
n, err = io.ReadFull(fh.in, readBuf)
readBuf := (*fh.readBuf)[:blockDataSize]
n, err = readers.ReadFill(fh.in, readBuf)
if n == 0 {
// err can't be nil since:
// n == len(buf) if and only if err == nil.
return fh.finish(err)
}
// possibly err != nil here, but we will process the
// data and the next call to ReadFull will return 0, err
// data and the next call to ReadFill will return 0, err
// Encrypt the block using the nonce
secretbox.Seal(fh.buf[:0], readBuf[:n], fh.nonce.pointer(), &fh.c.dataKey)
secretbox.Seal((*fh.buf)[:0], readBuf[:n], fh.nonce.pointer(), &fh.c.dataKey)
fh.bufIndex = 0
fh.bufSize = blockHeaderSize + n
fh.nonce.increment()
}
n = copy(p, fh.buf[fh.bufIndex:fh.bufSize])
n = copy(p, (*fh.buf)[fh.bufIndex:fh.bufSize])
fh.bufIndex += n
return n, nil
}
@@ -763,8 +780,8 @@ type decrypter struct {
nonce nonce
initialNonce nonce
c *Cipher
buf []byte
readBuf []byte
buf *[blockSize]byte
readBuf *[blockSize]byte
bufIndex int
bufSize int
err error
@@ -782,12 +799,12 @@ func (c *Cipher) newDecrypter(rc io.ReadCloser) (*decrypter, error) {
limit: -1,
}
// Read file header (magic + nonce)
readBuf := fh.readBuf[:fileHeaderSize]
_, err := io.ReadFull(fh.rc, readBuf)
if err == io.EOF || err == io.ErrUnexpectedEOF {
readBuf := (*fh.readBuf)[:fileHeaderSize]
n, err := readers.ReadFill(fh.rc, readBuf)
if n < fileHeaderSize && err == io.EOF {
// This read from 0..fileHeaderSize-1 bytes
return nil, fh.finishAndClose(ErrorEncryptedFileTooShort)
} else if err != nil {
} else if err != io.EOF && err != nil {
return nil, fh.finishAndClose(err)
}
// check the magic
@@ -845,10 +862,8 @@ func (c *Cipher) newDecrypterSeek(ctx context.Context, open OpenRangeSeek, offse
func (fh *decrypter) fillBuffer() (err error) {
// FIXME should overlap the reads with a go-routine and 2 buffers?
readBuf := fh.readBuf
n, err := io.ReadFull(fh.rc, readBuf)
n, err := readers.ReadFill(fh.rc, (*readBuf)[:])
if n == 0 {
// err can't be nil since:
// n == len(buf) if and only if err == nil.
return err
}
// possibly err != nil here, but we will process the data and
@@ -856,18 +871,25 @@ func (fh *decrypter) fillBuffer() (err error) {
// Check header + 1 byte exists
if n <= blockHeaderSize {
if err != nil {
if err != nil && err != io.EOF {
return err // return pending error as it is likely more accurate
}
return ErrorEncryptedFileBadHeader
}
// Decrypt the block using the nonce
_, ok := secretbox.Open(fh.buf[:0], readBuf[:n], fh.nonce.pointer(), &fh.c.dataKey)
_, ok := secretbox.Open((*fh.buf)[:0], (*readBuf)[:n], fh.nonce.pointer(), &fh.c.dataKey)
if !ok {
if err != nil {
if err != nil && err != io.EOF {
return err // return pending error as it is likely more accurate
}
return ErrorEncryptedBadBlock
if !fh.c.passBadBlocks {
return ErrorEncryptedBadBlock
}
fs.Errorf(nil, "crypt: ignoring: %v", ErrorEncryptedBadBlock)
// Zero out the bad block and continue
for i := range (*fh.buf)[:n] {
(*fh.buf)[i] = 0
}
}
fh.bufIndex = 0
fh.bufSize = n - blockHeaderSize
@@ -893,7 +915,7 @@ func (fh *decrypter) Read(p []byte) (n int, err error) {
if fh.limit >= 0 && fh.limit < int64(toCopy) {
toCopy = int(fh.limit)
}
n = copy(p, fh.buf[fh.bufIndex:fh.bufIndex+toCopy])
n = copy(p, (*fh.buf)[fh.bufIndex:fh.bufIndex+toCopy])
fh.bufIndex += n
if fh.limit >= 0 {
fh.limit -= int64(n)
@@ -904,9 +926,8 @@ func (fh *decrypter) Read(p []byte) (n int, err error) {
return n, nil
}
// calculateUnderlying converts an (offset, limit) in a crypted file
// into an (underlyingOffset, underlyingLimit) for the underlying
// file.
// calculateUnderlying converts an (offset, limit) in an encrypted file
// into an (underlyingOffset, underlyingLimit) for the underlying file.
//
// It also returns number of bytes to discard after reading the first
// block and number of blocks this is from the start so the nonce can

View File

@@ -27,14 +27,14 @@ func TestNewNameEncryptionMode(t *testing.T) {
{"off", NameEncryptionOff, ""},
{"standard", NameEncryptionStandard, ""},
{"obfuscate", NameEncryptionObfuscated, ""},
{"potato", NameEncryptionOff, "Unknown file name encryption mode \"potato\""},
{"potato", NameEncryptionOff, "unknown file name encryption mode \"potato\""},
} {
actual, actualErr := NewNameEncryptionMode(test.in)
assert.Equal(t, actual, test.expected)
if test.expectedErr == "" {
assert.NoError(t, actualErr)
} else {
assert.Error(t, actualErr, test.expectedErr)
assert.EqualError(t, actualErr, test.expectedErr)
}
}
}
@@ -405,6 +405,13 @@ func TestNonStandardEncryptFileName(t *testing.T) {
// Off mode
c, _ := newCipher(NameEncryptionOff, "", "", true, nil)
assert.Equal(t, "1/12/123.bin", c.EncryptFileName("1/12/123"))
// Off mode with custom suffix
c, _ = newCipher(NameEncryptionOff, "", "", true, nil)
c.setEncryptedSuffix(".jpg")
assert.Equal(t, "1/12/123.jpg", c.EncryptFileName("1/12/123"))
// Off mode with empty suffix
c.setEncryptedSuffix("none")
assert.Equal(t, "1/12/123", c.EncryptFileName("1/12/123"))
// Obfuscation mode
c, _ = newCipher(NameEncryptionObfuscated, "", "", true, nil)
assert.Equal(t, "49.6/99.23/150.890/53.!!lipps", c.EncryptFileName("1/12/123/!hello"))
@@ -483,21 +490,27 @@ func TestNonStandardDecryptFileName(t *testing.T) {
in string
expected string
expectedErr error
customSuffix string
}{
{NameEncryptionOff, true, "1/12/123.bin", "1/12/123", nil},
{NameEncryptionOff, true, "1/12/123.bix", "", ErrorNotAnEncryptedFile},
{NameEncryptionOff, true, ".bin", "", ErrorNotAnEncryptedFile},
{NameEncryptionOff, true, "1/12/123-v2001-02-03-040506-123.bin", "1/12/123-v2001-02-03-040506-123", nil},
{NameEncryptionOff, true, "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.bin", "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123", nil},
{NameEncryptionOff, true, "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.txt.bin", "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.txt", nil},
{NameEncryptionObfuscated, true, "!.hello", "hello", nil},
{NameEncryptionObfuscated, true, "hello", "", ErrorNotAnEncryptedFile},
{NameEncryptionObfuscated, true, "161.\u00e4", "\u00a1", nil},
{NameEncryptionObfuscated, true, "160.\u03c2", "\u03a0", nil},
{NameEncryptionObfuscated, false, "1/12/123/53.!!lipps", "1/12/123/!hello", nil},
{NameEncryptionObfuscated, false, "1/12/123/53-v2001-02-03-040506-123.!!lipps", "1/12/123/!hello-v2001-02-03-040506-123", nil},
{NameEncryptionOff, true, "1/12/123.bin", "1/12/123", nil, ""},
{NameEncryptionOff, true, "1/12/123.bix", "", ErrorNotAnEncryptedFile, ""},
{NameEncryptionOff, true, ".bin", "", ErrorNotAnEncryptedFile, ""},
{NameEncryptionOff, true, "1/12/123-v2001-02-03-040506-123.bin", "1/12/123-v2001-02-03-040506-123", nil, ""},
{NameEncryptionOff, true, "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.bin", "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123", nil, ""},
{NameEncryptionOff, true, "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.txt.bin", "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.txt", nil, ""},
{NameEncryptionOff, true, "1/12/123.jpg", "1/12/123", nil, ".jpg"},
{NameEncryptionOff, true, "1/12/123", "1/12/123", nil, "none"},
{NameEncryptionObfuscated, true, "!.hello", "hello", nil, ""},
{NameEncryptionObfuscated, true, "hello", "", ErrorNotAnEncryptedFile, ""},
{NameEncryptionObfuscated, true, "161.\u00e4", "\u00a1", nil, ""},
{NameEncryptionObfuscated, true, "160.\u03c2", "\u03a0", nil, ""},
{NameEncryptionObfuscated, false, "1/12/123/53.!!lipps", "1/12/123/!hello", nil, ""},
{NameEncryptionObfuscated, false, "1/12/123/53-v2001-02-03-040506-123.!!lipps", "1/12/123/!hello-v2001-02-03-040506-123", nil, ""},
} {
c, _ := newCipher(test.mode, "", "", test.dirNameEncrypt, enc)
if test.customSuffix != "" {
c.setEncryptedSuffix(test.customSuffix)
}
actual, actualErr := c.DecryptFileName(test.in)
what := fmt.Sprintf("Testing %q (mode=%v)", test.in, test.mode)
assert.Equal(t, test.expected, actual, what)
@@ -726,7 +739,7 @@ func TestNonceFromReader(t *testing.T) {
assert.Equal(t, nonce{'1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o'}, x)
buf = bytes.NewBufferString("123456789abcdefghijklmn")
err = x.fromReader(buf)
assert.Error(t, err, "short read of nonce")
assert.EqualError(t, err, "short read of nonce: EOF")
}
func TestNonceFromBuf(t *testing.T) {
@@ -1050,7 +1063,7 @@ func TestRandomSource(t *testing.T) {
_, _ = source.Read(buf)
sink = newRandomSource(1e8)
_, err = io.Copy(sink, source)
assert.Error(t, err, "Error in stream")
assert.EqualError(t, err, "Error in stream at 1")
}
type zeroes struct{}
@@ -1167,13 +1180,13 @@ func TestNewEncrypter(t *testing.T) {
fh, err := c.newEncrypter(z, nil)
assert.NoError(t, err)
assert.Equal(t, nonce{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18}, fh.nonce)
assert.Equal(t, []byte{'R', 'C', 'L', 'O', 'N', 'E', 0x00, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18}, fh.buf[:32])
assert.Equal(t, []byte{'R', 'C', 'L', 'O', 'N', 'E', 0x00, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18}, (*fh.buf)[:32])
// Test error path
c.cryptoRand = bytes.NewBufferString("123456789abcdefghijklmn")
fh, err = c.newEncrypter(z, nil)
assert.Nil(t, fh)
assert.Error(t, err, "short read of nonce")
assert.EqualError(t, err, "short read of nonce: EOF")
}
// Test the stream returning 0, io.ErrUnexpectedEOF - this used to
@@ -1224,7 +1237,7 @@ func TestNewDecrypter(t *testing.T) {
cd := newCloseDetector(bytes.NewBuffer(file0[:i]))
fh, err = c.newDecrypter(cd)
assert.Nil(t, fh)
assert.Error(t, err, ErrorEncryptedFileTooShort.Error())
assert.EqualError(t, err, ErrorEncryptedFileTooShort.Error())
assert.Equal(t, 1, cd.closed)
}
@@ -1232,7 +1245,7 @@ func TestNewDecrypter(t *testing.T) {
cd = newCloseDetector(er)
fh, err = c.newDecrypter(cd)
assert.Nil(t, fh)
assert.Error(t, err, "potato")
assert.EqualError(t, err, "potato")
assert.Equal(t, 1, cd.closed)
// bad magic
@@ -1243,7 +1256,7 @@ func TestNewDecrypter(t *testing.T) {
cd := newCloseDetector(bytes.NewBuffer(file0copy))
fh, err := c.newDecrypter(cd)
assert.Nil(t, fh)
assert.Error(t, err, ErrorEncryptedBadMagic.Error())
assert.EqualError(t, err, ErrorEncryptedBadMagic.Error())
file0copy[i] ^= 0x1
assert.Equal(t, 1, cd.closed)
}
@@ -1495,8 +1508,10 @@ func TestDecrypterRead(t *testing.T) {
case i == fileHeaderSize:
// This would normally produce an error *except* on the first block
expectedErr = nil
case i <= fileHeaderSize+blockHeaderSize:
expectedErr = ErrorEncryptedFileBadHeader
default:
expectedErr = io.ErrUnexpectedEOF
expectedErr = ErrorEncryptedBadBlock
}
if expectedErr != nil {
assert.EqualError(t, err, expectedErr.Error(), what)
@@ -1514,7 +1529,7 @@ func TestDecrypterRead(t *testing.T) {
fh, err := c.newDecrypter(cd)
assert.NoError(t, err)
_, err = io.ReadAll(fh)
assert.Error(t, err, "potato")
assert.EqualError(t, err, "potato")
assert.Equal(t, 0, cd.closed)
// Test corrupting the input
@@ -1525,15 +1540,26 @@ func TestDecrypterRead(t *testing.T) {
file16copy[i] ^= 0xFF
fh, err := c.newDecrypter(io.NopCloser(bytes.NewBuffer(file16copy)))
if i < fileMagicSize {
assert.Error(t, err, ErrorEncryptedBadMagic.Error())
assert.EqualError(t, err, ErrorEncryptedBadMagic.Error())
assert.Nil(t, fh)
} else {
assert.NoError(t, err)
_, err = io.ReadAll(fh)
assert.Error(t, err, ErrorEncryptedFileBadHeader.Error())
assert.EqualError(t, err, ErrorEncryptedBadBlock.Error())
}
file16copy[i] ^= 0xFF
}
// Test that we can corrupt a byte and read zeroes if
// passBadBlocks is set
copy(file16copy, file16)
file16copy[len(file16copy)-1] ^= 0xFF
c.passBadBlocks = true
fh, err = c.newDecrypter(io.NopCloser(bytes.NewBuffer(file16copy)))
assert.NoError(t, err)
buf, err := io.ReadAll(fh)
assert.NoError(t, err)
assert.Equal(t, make([]byte, 16), buf)
}
func TestDecrypterClose(t *testing.T) {
@@ -1554,7 +1580,7 @@ func TestDecrypterClose(t *testing.T) {
// double close
err = fh.Close()
assert.Error(t, err, ErrorFileClosed.Error())
assert.EqualError(t, err, ErrorFileClosed.Error())
assert.Equal(t, 1, cd.closed)
// try again reading the file this time
@@ -1581,8 +1607,6 @@ func TestPutGetBlock(t *testing.T) {
block := c.getBlock()
c.putBlock(block)
c.putBlock(block)
assert.Panics(t, func() { c.putBlock(block[:len(block)-1]) })
}
func TestKey(t *testing.T) {

View File

@@ -48,7 +48,7 @@ func init() {
Help: "Very simple filename obfuscation.",
}, {
Value: "off",
Help: "Don't encrypt the file names.\nAdds a \".bin\" extension only.",
Help: "Don't encrypt the file names.\nAdds a \".bin\", or \"suffix\" extension only.",
},
},
}, {
@@ -79,7 +79,9 @@ NB If filename_encryption is "off" then this option will do nothing.`,
}, {
Name: "server_side_across_configs",
Default: false,
Help: `Allow server-side operations (e.g. copy) to work across different crypt configs.
Help: `Deprecated: use --server-side-across-configs instead.
Allow server-side operations (e.g. copy) to work across different crypt configs.
Normally this option is not what you want, but if you have two crypts
pointing to the same backend you can use it.
@@ -119,6 +121,15 @@ names, or for debugging purposes.`,
Help: "Encrypt file data.",
},
},
}, {
Name: "pass_bad_blocks",
Help: `If set this will pass bad blocks through as all 0.
This should not be set in normal operation, it should only be set if
trying to recover an encrypted file with errors and it is desired to
recover as much of the file as possible.`,
Default: false,
Advanced: true,
}, {
Name: "filename_encoding",
Help: `How to encode the encrypted filename to text string.
@@ -138,10 +149,18 @@ length and if it's case sensitive.`,
},
{
Value: "base32768",
Help: "Encode using base32768. Suitable if your remote counts UTF-16 or\nUnicode codepoint instead of UTF-8 byte length. (Eg. Onedrive)",
Help: "Encode using base32768. Suitable if your remote counts UTF-16 or\nUnicode codepoint instead of UTF-8 byte length. (Eg. Onedrive, Dropbox)",
},
},
Advanced: true,
}, {
Name: "suffix",
Help: `If this is set it will override the default suffix of ".bin".
Setting suffix to "none" will result in an empty suffix. This may be useful
when the path length is critical.`,
Default: ".bin",
Advanced: true,
}},
})
}
@@ -174,6 +193,8 @@ func newCipherForConfig(opt *Options) (*Cipher, error) {
if err != nil {
return nil, fmt.Errorf("failed to make cipher: %w", err)
}
cipher.setEncryptedSuffix(opt.Suffix)
cipher.setPassBadBlocks(opt.PassBadBlocks)
return cipher, nil
}
@@ -247,6 +268,7 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
ReadMetadata: true,
WriteMetadata: true,
UserMetadata: true,
PartialUploads: true,
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
return f, err
@@ -262,7 +284,9 @@ type Options struct {
Password2 string `config:"password2"`
ServerSideAcrossConfigs bool `config:"server_side_across_configs"`
ShowMapping bool `config:"show_mapping"`
PassBadBlocks bool `config:"pass_bad_blocks"`
FilenameEncoding string `config:"filename_encoding"`
Suffix string `config:"suffix"`
}
// Fs represents a wrapped fs.Fs
@@ -454,7 +478,7 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
if err != nil {
fs.Errorf(o, "Failed to remove corrupted object: %v", err)
}
return nil, fmt.Errorf("corrupted on transfer: %v crypted hash differ src %q vs dst %q", ht, srcHash, dstHash)
return nil, fmt.Errorf("corrupted on transfer: %v encrypted hash differ src %q vs dst %q", ht, srcHash, dstHash)
}
fs.Debugf(src, "%v = %s OK", ht, srcHash)
}

View File

@@ -24,7 +24,7 @@ func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: *fstest.RemoteName,
NilObject: (*crypt.Object)(nil),
UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"},
UnimplementableObjectMethods: []string{"MimeType"},
})
}
@@ -45,7 +45,7 @@ func TestStandardBase32(t *testing.T) {
{Name: name, Key: "password", Value: obscure.MustObscure("potato")},
{Name: name, Key: "filename_encryption", Value: "standard"},
},
UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"},
UnimplementableObjectMethods: []string{"MimeType"},
QuickTestOK: true,
})
@@ -67,7 +67,7 @@ func TestStandardBase64(t *testing.T) {
{Name: name, Key: "filename_encryption", Value: "standard"},
{Name: name, Key: "filename_encoding", Value: "base64"},
},
UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"},
UnimplementableObjectMethods: []string{"MimeType"},
QuickTestOK: true,
})
@@ -89,7 +89,7 @@ func TestStandardBase32768(t *testing.T) {
{Name: name, Key: "filename_encryption", Value: "standard"},
{Name: name, Key: "filename_encoding", Value: "base32768"},
},
UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"},
UnimplementableObjectMethods: []string{"MimeType"},
QuickTestOK: true,
})
@@ -111,7 +111,7 @@ func TestOff(t *testing.T) {
{Name: name, Key: "password", Value: obscure.MustObscure("potato2")},
{Name: name, Key: "filename_encryption", Value: "off"},
},
UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"},
UnimplementableObjectMethods: []string{"MimeType"},
QuickTestOK: true,
})
@@ -137,7 +137,7 @@ func TestObfuscate(t *testing.T) {
{Name: name, Key: "filename_encryption", Value: "obfuscate"},
},
SkipBadWindowsCharacters: true,
UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"},
UnimplementableObjectMethods: []string{"MimeType"},
QuickTestOK: true,
})
@@ -164,7 +164,7 @@ func TestNoDataObfuscate(t *testing.T) {
{Name: name, Key: "no_data_encryption", Value: "true"},
},
SkipBadWindowsCharacters: true,
UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"},
UnimplementableObjectMethods: []string{"MimeType"},
QuickTestOK: true,
})

View File

@@ -202,7 +202,7 @@ func init() {
m.Set("root_folder_id", "appDataFolder")
}
if opt.ServiceAccountFile == "" && opt.ServiceAccountCredentials == "" {
if opt.ServiceAccountFile == "" && opt.ServiceAccountCredentials == "" && !opt.EnvAuth {
return oauthutil.ConfigOut("teamdrive", &oauthutil.Options{
OAuth2Config: driveConfig,
})
@@ -277,20 +277,23 @@ Leave blank normally.
Fill in to access "Computers" folders (see docs), or for rclone to use
a non root folder as its starting point.
`,
Advanced: true,
Advanced: true,
Sensitive: true,
}, {
Name: "service_account_file",
Help: "Service Account Credentials JSON file path.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login." + env.ShellExpandHelp,
}, {
Name: "service_account_credentials",
Help: "Service Account Credentials JSON blob.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
Hide: fs.OptionHideConfigurator,
Advanced: true,
Name: "service_account_credentials",
Help: "Service Account Credentials JSON blob.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
Hide: fs.OptionHideConfigurator,
Advanced: true,
Sensitive: true,
}, {
Name: "team_drive",
Help: "ID of the Shared Drive (Team Drive).",
Hide: fs.OptionHideConfigurator,
Advanced: true,
Name: "team_drive",
Help: "ID of the Shared Drive (Team Drive).",
Hide: fs.OptionHideConfigurator,
Advanced: true,
Sensitive: true,
}, {
Name: "auth_owner_only",
Default: false,
@@ -416,10 +419,11 @@ date is used.`,
Help: "Size of listing chunk 100-1000, 0 to disable.",
Advanced: true,
}, {
Name: "impersonate",
Default: "",
Help: `Impersonate this user when using a service account.`,
Advanced: true,
Name: "impersonate",
Default: "",
Help: `Impersonate this user when using a service account.`,
Advanced: true,
Sensitive: true,
}, {
Name: "alternate_export",
Default: false,
@@ -499,7 +503,9 @@ need to use --ignore size also.`,
}, {
Name: "server_side_across_configs",
Default: false,
Help: `Allow server-side operations (e.g. copy) to work across different drive configs.
Help: `Deprecated: use --server-side-across-configs instead.
Allow server-side operations (e.g. copy) to work across different drive configs.
This can be useful if you wish to do a server-side copy between two
different Google drives. Note that this isn't enabled by default
@@ -588,9 +594,32 @@ This resource key requirement only applies to a subset of old files.
Note also that opening the folder once in the web interface (with the
user you've authenticated rclone with) seems to be enough so that the
resource key is no needed.
resource key is not needed.
`,
Advanced: true,
Sensitive: true,
}, {
Name: "fast_list_bug_fix",
Help: `Work around a bug in Google Drive listing.
Normally rclone will work around a bug in Google Drive when using
--fast-list (ListR) where the search "(A in parents) or (B in
parents)" returns nothing sometimes. See #3114, #4289 and
https://issuetracker.google.com/issues/149522397
Rclone detects this by finding no items in more than one directory
when listing and retries them as lists of individual directories.
This means that if you have a lot of empty directories rclone will end
up listing them all individually and this can take many more API
calls.
This flag allows the work-around to be disabled. This is **not**
recommended in normal use - only if you have a particular case you are
having trouble with like many empty directories.
`,
Advanced: true,
Default: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
@@ -598,6 +627,18 @@ resource key is no needed.
// Encode invalid UTF-8 bytes as json doesn't handle them properly.
// Don't encode / as it's a valid name character in drive.
Default: encoder.EncodeInvalidUtf8,
}, {
Name: "env_auth",
Help: "Get IAM credentials from runtime (environment variables or instance meta data if no env vars).\n\nOnly applies if service_account_file and service_account_credentials is blank.",
Default: false,
Advanced: true,
Examples: []fs.OptionExample{{
Value: "false",
Help: "Enter credentials in the next step.",
}, {
Value: "true",
Help: "Get GCP IAM credentials from the environment (env vars or IAM).",
}},
}}...),
})
@@ -653,7 +694,9 @@ type Options struct {
SkipShortcuts bool `config:"skip_shortcuts"`
SkipDanglingShortcuts bool `config:"skip_dangling_shortcuts"`
ResourceKey string `config:"resource_key"`
FastListBugFix bool `config:"fast_list_bug_fix"`
Enc encoder.MultiEncoder `config:"encoding"`
EnvAuth bool `config:"env_auth"`
}
// Fs represents a remote drive server
@@ -1122,6 +1165,12 @@ func createOAuthClient(ctx context.Context, opt *Options, name string, m configm
if err != nil {
return nil, fmt.Errorf("failed to create oauth client from service account: %w", err)
}
} else if opt.EnvAuth {
scopes := driveScopes(opt.Scope)
oAuthClient, err = google.DefaultClient(ctx, scopes...)
if err != nil {
return nil, fmt.Errorf("failed to create client from environment: %w", err)
}
} else {
oAuthClient, _, err = oauthutil.NewClientWithBaseClient(ctx, name, m, driveConfig, getClient(ctx, opt))
if err != nil {
@@ -1493,6 +1542,9 @@ func (f *Fs) newObjectWithExportInfo(
// NewObject finds the Object at remote. If it can't be found
// it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
if strings.HasSuffix(remote, "/") {
return nil, fs.ErrorIsDir
}
info, extension, exportName, exportMimeType, isDocument, err := f.getRemoteInfoWithExport(ctx, remote)
if err != nil {
return nil, err
@@ -1862,7 +1914,7 @@ func (f *Fs) listRRunner(ctx context.Context, wg *sync.WaitGroup, in chan listRE
// drive where (A in parents) or (B in parents) returns nothing
// sometimes. See #3114, #4289 and
// https://issuetracker.google.com/issues/149522397
if len(dirs) > 1 && !foundItems {
if f.opt.FastListBugFix && len(dirs) > 1 && !foundItems {
if atomic.SwapInt32(&f.grouping, 1) != 1 {
fs.Debugf(f, "Disabling ListR to work around bug in drive as multi listing (%d) returned no entries", len(dirs))
}
@@ -2880,6 +2932,7 @@ func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.
if f.rootFolderID == "appDataFolder" {
changesCall.Spaces("appDataFolder")
}
changesCall.RestrictToMyDrive(!f.opt.SharedWithMe)
changeList, err = changesCall.Context(ctx).Do()
return f.shouldRetry(ctx, err)
})
@@ -3861,7 +3914,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
if err != nil {
return err
}
newO, err := o.fs.newObjectWithInfo(ctx, src.Remote(), info)
newO, err := o.fs.newObjectWithInfo(ctx, o.remote, info)
if err != nil {
return err
}

View File

@@ -13,7 +13,6 @@ import (
"sync"
"time"
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/async"
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/files"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/fserrors"
@@ -140,55 +139,12 @@ func (b *batcher) finishBatch(ctx context.Context, items []*files.UploadSessionF
return complete, nil
}
// finishBatchJobStatus waits for the batch to complete returning completed entries
func (b *batcher) finishBatchJobStatus(ctx context.Context, launchBatchStatus *files.UploadSessionFinishBatchLaunch) (complete *files.UploadSessionFinishBatchResult, err error) {
if launchBatchStatus.AsyncJobId == "" {
return nil, errors.New("wait for batch completion: empty job ID")
}
var batchStatus *files.UploadSessionFinishBatchJobStatus
sleepTime := 100 * time.Millisecond
const maxSleepTime = 1 * time.Second
startTime := time.Now()
try := 1
for {
remaining := time.Duration(b.f.opt.BatchCommitTimeout) - time.Since(startTime)
if remaining < 0 {
break
}
err = b.f.pacer.Call(func() (bool, error) {
batchStatus, err = b.f.srv.UploadSessionFinishBatchCheck(&async.PollArg{
AsyncJobId: launchBatchStatus.AsyncJobId,
})
return shouldRetry(ctx, err)
})
if err != nil {
fs.Debugf(b.f, "Wait for batch: sleeping for %v after error: %v: try %d remaining %v", sleepTime, err, try, remaining)
} else {
if batchStatus.Tag == "complete" {
fs.Debugf(b.f, "Upload batch completed in %v", time.Since(startTime))
return batchStatus.Complete, nil
}
fs.Debugf(b.f, "Wait for batch: sleeping for %v after status: %q: try %d remaining %v", sleepTime, batchStatus.Tag, try, remaining)
}
time.Sleep(sleepTime)
sleepTime *= 2
if sleepTime > maxSleepTime {
sleepTime = maxSleepTime
}
try++
}
if err == nil {
err = errors.New("batch didn't complete")
}
return nil, fmt.Errorf("wait for batch failed after %d tries in %v: %w", try, time.Since(startTime), err)
}
// commit a batch
func (b *batcher) commitBatch(ctx context.Context, items []*files.UploadSessionFinishArg, results []chan<- batcherResponse) (err error) {
// If commit fails then signal clients if sync
var signalled = b.async
defer func() {
if err != nil && signalled {
if err != nil && !signalled {
// Signal to clients that there was an error
for _, result := range results {
result <- batcherResponse{err: err}

View File

@@ -58,7 +58,7 @@ import (
const (
rcloneClientID = "5jcck7diasz0rqy"
rcloneEncryptedClientSecret = "fRS5vVLr2v6FbyXYnIgjwBuUAt0osq_QZTXAEcmZ7g"
minSleep = 10 * time.Millisecond
defaultMinSleep = fs.Duration(10 * time.Millisecond)
maxSleep = 2 * time.Second
decayConstant = 2 // bigger for slower decay, exponential
// Upload chunk size - setting too small makes uploads slow.
@@ -182,8 +182,9 @@ client_secret) to use this option as currently rclone's default set of
permissions doesn't include "members.read". This can be added once
v1.55 or later is in use everywhere.
`,
Default: "",
Advanced: true,
Default: "",
Advanced: true,
Sensitive: true,
}, {
Name: "shared_files",
Help: `Instructs rclone to work on individual shared files.
@@ -260,8 +261,8 @@ uploaded.
The default for this is 0 which means rclone will choose a sensible
default based on the batch_mode in use.
- batch_mode: async - default batch_timeout is 500ms
- batch_mode: sync - default batch_timeout is 10s
- batch_mode: async - default batch_timeout is 10s
- batch_mode: sync - default batch_timeout is 500ms
- batch_mode: off - not in use
`,
Default: fs.Duration(0),
@@ -271,6 +272,11 @@ default based on the batch_mode in use.
Help: `Max time to wait for a batch to finish committing`,
Default: fs.Duration(10 * time.Minute),
Advanced: true,
}, {
Name: "pacer_min_sleep",
Default: defaultMinSleep,
Help: "Minimum time to sleep between API calls.",
Advanced: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
@@ -299,6 +305,7 @@ type Options struct {
BatchTimeout fs.Duration `config:"batch_timeout"`
BatchCommitTimeout fs.Duration `config:"batch_commit_timeout"`
AsyncBatch bool `config:"async_batch"`
PacerMinSleep fs.Duration `config:"pacer_min_sleep"`
Enc encoder.MultiEncoder `config:"encoding"`
}
@@ -442,7 +449,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
name: name,
opt: *opt,
ci: ci,
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(opt.PacerMinSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
}
f.batcher, err = newBatcher(ctx, f, f.opt.BatchMode, f.opt.BatchSize, time.Duration(f.opt.BatchTimeout))
if err != nil {
@@ -536,7 +543,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
default:
return nil, err
}
// if the moint failed we have to abort here
// if the mount failed we have to abort here
}
// if the mount succeeded it's now a normal folder in the users root namespace
// we disable shared folder mode and proceed normally
@@ -719,7 +726,7 @@ func (f *Fs) listSharedFolders(ctx context.Context) (entries fs.DirEntries, err
}
for _, entry := range res.Entries {
leaf := f.opt.Enc.ToStandardName(entry.Name)
d := fs.NewDir(leaf, time.Now()).SetID(entry.SharedFolderId)
d := fs.NewDir(leaf, time.Time{}).SetID(entry.SharedFolderId)
entries = append(entries, d)
if err != nil {
return nil, err
@@ -906,7 +913,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
leaf := f.opt.Enc.ToStandardName(path.Base(entryPath))
remote := path.Join(dir, leaf)
if folderInfo != nil {
d := fs.NewDir(remote, time.Now()).SetID(folderInfo.Id)
d := fs.NewDir(remote, time.Time{}).SetID(folderInfo.Id)
entries = append(entries, d)
} else if fileInfo != nil {
o, err := f.newObjectWithInfo(ctx, remote, fileInfo)

View File

@@ -28,14 +28,14 @@ var retryErrorCodes = []int{
509, // Bandwidth Limit Exceeded
}
var errorRegex = regexp.MustCompile(`#\d{1,3}`)
var errorRegex = regexp.MustCompile(`#(\d{1,3})`)
func parseFichierError(err error) int {
matches := errorRegex.FindStringSubmatch(err.Error())
if len(matches) == 0 {
return 0
}
code, err := strconv.Atoi(matches[0])
code, err := strconv.Atoi(matches[1])
if err != nil {
fs.Debugf(nil, "failed parsing fichier error: %v", err)
return 0
@@ -118,6 +118,9 @@ func (f *Fs) getDownloadToken(ctx context.Context, url string) (*GetTokenRespons
Single: 1,
Pass: f.opt.FilePassword,
}
if f.opt.CDN {
request.CDN = 1
}
opts := rest.Opts{
Method: "POST",
Path: "/download/get_token.cgi",
@@ -405,6 +408,32 @@ func (f *Fs) moveFile(ctx context.Context, url string, folderID int, rename stri
return response, nil
}
func (f *Fs) moveDir(ctx context.Context, folderID int, newLeaf string, destinationFolderID int) (response *MoveDirResponse, err error) {
request := &MoveDirRequest{
FolderID: folderID,
DestinationFolderID: destinationFolderID,
Rename: newLeaf,
// DestinationUser: destinationUser,
}
opts := rest.Opts{
Method: "POST",
Path: "/folder/mv.cgi",
}
response = &MoveDirResponse{}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, request, response)
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, fmt.Errorf("couldn't move dir: %w", err)
}
return response, nil
}
func (f *Fs) copyFile(ctx context.Context, url string, folderID int, rename string) (response *CopyFileResponse, err error) {
request := &CopyFileRequest{
URLs: []string{url},
@@ -473,7 +502,7 @@ func (f *Fs) getUploadNode(ctx context.Context) (response *GetUploadNodeResponse
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, fmt.Errorf("didnt got an upload node: %w", err)
return nil, fmt.Errorf("didn't get an upload node: %w", err)
}
// fs.Debugf(f, "Got Upload node")

View File

@@ -38,8 +38,9 @@ func init() {
Description: "1Fichier",
NewFs: NewFs,
Options: []fs.Option{{
Help: "Your API Key, get it from https://1fichier.com/console/params.pl.",
Name: "api_key",
Help: "Your API Key, get it from https://1fichier.com/console/params.pl.",
Name: "api_key",
Sensitive: true,
}, {
Help: "If you want to download a shared folder, add this parameter.",
Name: "shared_folder",
@@ -54,6 +55,11 @@ func init() {
Name: "folder_password",
Advanced: true,
IsPassword: true,
}, {
Help: "Set if you wish to use CDN download links.",
Name: "cdn",
Default: false,
Advanced: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
@@ -89,6 +95,7 @@ type Options struct {
SharedFolder string `config:"shared_folder"`
FilePassword string `config:"file_password"`
FolderPassword string `config:"folder_password"`
CDN bool `config:"cdn"`
Enc encoder.MultiEncoder `config:"encoding"`
}
@@ -333,7 +340,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
// checking to see if there is one already - use Put() for that.
func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size int64, options ...fs.OpenOption) (fs.Object, error) {
if size > int64(300e9) {
return nil, errors.New("File too big, cant upload")
return nil, errors.New("File too big, can't upload")
} else if size == 0 {
return nil, fs.ErrorCantUploadEmptyFiles
}
@@ -481,6 +488,51 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
return dstObj, nil
}
// DirMove moves src, srcRemote to this remote at dstRemote
// using server-side move operations.
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantDirMove.
//
// If destination exists then return fs.ErrorDirExists.
//
// This is complicated by the fact that we can't use moveDir to move
// to a different directory AND rename at the same time as it can
// overwrite files in the source directory.
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
srcFs, ok := src.(*Fs)
if !ok {
fs.Debugf(srcFs, "Can't move directory - not same remote type")
return fs.ErrorCantDirMove
}
srcID, _, _, dstDirectoryID, dstLeaf, err := f.dirCache.DirMove(ctx, srcFs.dirCache, srcFs.root, srcRemote, f.root, dstRemote)
if err != nil {
return err
}
srcIDnumeric, err := strconv.Atoi(srcID)
if err != nil {
return err
}
dstDirectoryIDnumeric, err := strconv.Atoi(dstDirectoryID)
if err != nil {
return err
}
var resp *MoveDirResponse
resp, err = f.moveDir(ctx, srcIDnumeric, dstLeaf, dstDirectoryIDnumeric)
if err != nil {
return fmt.Errorf("couldn't rename leaf: %w", err)
}
if resp.Status != "OK" {
return fmt.Errorf("couldn't rename leaf: %s", resp.Message)
}
srcFs.dirCache.FlushDir(srcRemote)
return nil
}
// Copy src to this remote using server side move operations.
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object)
@@ -554,6 +606,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
var (
_ fs.Fs = (*Fs)(nil)
_ fs.Mover = (*Fs)(nil)
_ fs.DirMover = (*Fs)(nil)
_ fs.Copier = (*Fs)(nil)
_ fs.PublicLinker = (*Fs)(nil)
_ fs.PutUncheckeder = (*Fs)(nil)

View File

@@ -20,6 +20,7 @@ type DownloadRequest struct {
URL string `json:"url"`
Single int `json:"single"`
Pass string `json:"pass,omitempty"`
CDN int `json:"cdn,omitempty"`
}
// RemoveFolderRequest is the request structure of the corresponding request
@@ -69,6 +70,22 @@ type MoveFileResponse struct {
URLs []string `json:"urls"`
}
// MoveDirRequest is the request structure of the corresponding request
type MoveDirRequest struct {
FolderID int `json:"folder_id"`
DestinationFolderID int `json:"destination_folder_id,omitempty"`
DestinationUser string `json:"destination_user"`
Rename string `json:"rename,omitempty"`
}
// MoveDirResponse is the response structure of the corresponding request
type MoveDirResponse struct {
Status string `json:"status"`
Message string `json:"message"`
OldName string `json:"old_name"`
NewName string `json:"new_name"`
}
// CopyFileRequest is the request structure of the corresponding request
type CopyFileRequest struct {
URLs []string `json:"urls"`

View File

@@ -84,6 +84,7 @@ Leave blank normally.
Fill in to make rclone start with directory of a given ID.
`,
Sensitive: true,
}, {
Name: "permanent_token",
Help: `Permanent Authentication Token.
@@ -97,6 +98,7 @@ These tokens are normally valid for several years.
For more info see: https://docs.storagemadeeasy.com/organisationcloud/api-tokens
`,
Sensitive: true,
}, {
Name: "token",
Help: `Session Token.
@@ -106,7 +108,8 @@ usually valid for 1 hour.
Don't set this value - rclone will set it automatically.
`,
Advanced: true,
Advanced: true,
Sensitive: true,
}, {
Name: "token_expiry",
Help: `Token expiry time.
@@ -155,9 +158,9 @@ type Fs struct {
tokenMu sync.Mutex // hold when reading the token
token string // current access token
tokenExpiry time.Time // time the current token expires
tokenExpired int32 // read and written with atomic
canCopyWithName bool // set if detected that can use fi_name in copy
precision time.Duration // precision reported
tokenExpired atomic.Int32
canCopyWithName bool // set if detected that can use fi_name in copy
precision time.Duration // precision reported
}
// Object describes a filefabric object
@@ -240,7 +243,7 @@ func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error, st
err = status // return the error from the RPC
code := status.GetCode()
if code == "login_token_expired" {
atomic.AddInt32(&f.tokenExpired, 1)
f.tokenExpired.Add(1)
} else {
for _, retryCode := range retryStatusCodes {
if code == retryCode.code {
@@ -320,12 +323,12 @@ func (f *Fs) getToken(ctx context.Context) (token string, err error) {
var refreshed = false
defer func() {
if refreshed {
atomic.StoreInt32(&f.tokenExpired, 0)
f.tokenExpired.Store(0)
}
f.tokenMu.Unlock()
}()
expired := atomic.LoadInt32(&f.tokenExpired) != 0
expired := f.tokenExpired.Load() != 0
if expired {
fs.Debugf(f, "Token invalid - refreshing")
}

View File

@@ -28,6 +28,7 @@ import (
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/env"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/proxy"
"github.com/rclone/rclone/lib/readers"
)
@@ -48,13 +49,15 @@ func init() {
Description: "FTP",
NewFs: NewFs,
Options: []fs.Option{{
Name: "host",
Help: "FTP host to connect to.\n\nE.g. \"ftp.example.com\".",
Required: true,
Name: "host",
Help: "FTP host to connect to.\n\nE.g. \"ftp.example.com\".",
Required: true,
Sensitive: true,
}, {
Name: "user",
Help: "FTP username.",
Default: currentUser,
Name: "user",
Help: "FTP username.",
Default: currentUser,
Sensitive: true,
}, {
Name: "port",
Help: "FTP port number.",
@@ -172,6 +175,18 @@ Enabled by default. Use 0 to disable.`,
If this is set and no password is supplied then rclone will ask for a password
`,
Advanced: true,
}, {
Name: "socks_proxy",
Default: "",
Help: `Socks 5 proxy host.
Supports the format user:pass@host:port, user@host:port, host:port.
Example:
myUser:myPass@localhost:9005
`,
Advanced: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
@@ -216,6 +231,7 @@ type Options struct {
ShutTimeout fs.Duration `config:"shut_timeout"`
AskPassword bool `config:"ask_password"`
Enc encoder.MultiEncoder `config:"encoding"`
SocksProxy string `config:"socks_proxy"`
}
// Fs represents a remote FTP server
@@ -233,7 +249,6 @@ type Fs struct {
pool []*ftp.ServerConn
drain *time.Timer // used to drain the pool when we stop using the connections
tokens *pacer.TokenDispenser
tlsConf *tls.Config
pacer *fs.Pacer // pacer for FTP connections
fGetTime bool // true if the ftp library accepts GetTime
fSetTime bool // true if the ftp library accepts SetTime
@@ -315,10 +330,17 @@ func (dl *debugLog) Write(p []byte) (n int, err error) {
return len(p), nil
}
// Return a *textproto.Error if err contains one or nil otherwise
func textprotoError(err error) (errX *textproto.Error) {
if errors.As(err, &errX) {
return errX
}
return nil
}
// returns true if this FTP error should be retried
func isRetriableFtpError(err error) bool {
switch errX := err.(type) {
case *textproto.Error:
if errX := textprotoError(err); errX != nil {
switch errX.Code {
case ftp.StatusNotAvailable, ftp.StatusTransfertAborted:
return true
@@ -339,10 +361,36 @@ func shouldRetry(ctx context.Context, err error) (bool, error) {
return fserrors.ShouldRetry(err), err
}
// Get a TLS config with a unique session cache.
//
// We can't share session caches between connections.
//
// See: https://github.com/rclone/rclone/issues/7234
func (f *Fs) tlsConfig() *tls.Config {
var tlsConfig *tls.Config
if f.opt.TLS || f.opt.ExplicitTLS {
tlsConfig = &tls.Config{
ServerName: f.opt.Host,
InsecureSkipVerify: f.opt.SkipVerifyTLSCert,
}
if f.opt.TLSCacheSize > 0 {
tlsConfig.ClientSessionCache = tls.NewLRUClientSessionCache(f.opt.TLSCacheSize)
}
if f.opt.DisableTLS13 {
tlsConfig.MaxVersion = tls.VersionTLS12
}
}
return tlsConfig
}
// Open a new connection to the FTP server.
func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
fs.Debugf(f, "Connecting to FTP server")
// tls.Config for this connection only. Will be used for data
// and control connections.
tlsConfig := f.tlsConfig()
// Make ftp library dial with fshttp dialer optionally using TLS
initialConnection := true
dial := func(network, address string) (conn net.Conn, err error) {
@@ -350,12 +398,17 @@ func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
defer func() {
fs.Debugf(f, "> dial: conn=%T, err=%v", conn, err)
}()
conn, err = fshttp.NewDialer(ctx).Dial(network, address)
baseDialer := fshttp.NewDialer(ctx)
if f.opt.SocksProxy != "" {
conn, err = proxy.SOCKS5Dial(network, address, f.opt.SocksProxy, baseDialer)
} else {
conn, err = baseDialer.Dial(network, address)
}
if err != nil {
return nil, err
}
// Connect using cleartext only for non TLS
if f.tlsConf == nil {
if tlsConfig == nil {
return conn, nil
}
// Initial connection only needs to be cleartext for explicit TLS
@@ -364,7 +417,7 @@ func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
return conn, nil
}
// Upgrade connection to TLS
tlsConn := tls.Client(conn, f.tlsConf)
tlsConn := tls.Client(conn, tlsConfig)
// Do the initial handshake - tls.Client doesn't do it for us
// If we do this then connections to proftpd/pureftpd lock up
// See: https://github.com/rclone/rclone/issues/6426
@@ -386,9 +439,9 @@ func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
if f.opt.TLS {
// Our dialer takes care of TLS but ftp library also needs tlsConf
// as a trigger for sending PSBZ and PROT options to server.
ftpConfig = append(ftpConfig, ftp.DialWithTLS(f.tlsConf))
ftpConfig = append(ftpConfig, ftp.DialWithTLS(tlsConfig))
} else if f.opt.ExplicitTLS {
ftpConfig = append(ftpConfig, ftp.DialWithExplicitTLS(f.tlsConf))
ftpConfig = append(ftpConfig, ftp.DialWithExplicitTLS(tlsConfig))
}
if f.opt.DisableEPSV {
ftpConfig = append(ftpConfig, ftp.DialWithDisabledEPSV(true))
@@ -471,8 +524,7 @@ func (f *Fs) putFtpConnection(pc **ftp.ServerConn, err error) {
*pc = nil
if err != nil {
// If not a regular FTP error code then check the connection
var tpErr *textproto.Error
if !errors.As(err, &tpErr) {
if tpErr := textprotoError(err); tpErr != nil {
nopErr := c.NoOp()
if nopErr != nil {
fs.Debugf(f, "Connection failed, closing: %v", nopErr)
@@ -544,19 +596,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
if opt.TLS && opt.ExplicitTLS {
return nil, errors.New("implicit TLS and explicit TLS are mutually incompatible, please revise your config")
}
var tlsConfig *tls.Config
if opt.TLS || opt.ExplicitTLS {
tlsConfig = &tls.Config{
ServerName: opt.Host,
InsecureSkipVerify: opt.SkipVerifyTLSCert,
}
if opt.TLSCacheSize > 0 {
tlsConfig.ClientSessionCache = tls.NewLRUClientSessionCache(opt.TLSCacheSize)
}
if opt.DisableTLS13 {
tlsConfig.MaxVersion = tls.VersionTLS12
}
}
u := protocol + path.Join(dialAddr+"/", root)
ci := fs.GetConfig(ctx)
f := &Fs{
@@ -569,11 +608,11 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
pass: pass,
dialAddr: dialAddr,
tokens: pacer.NewTokenDispenser(opt.Concurrency),
tlsConf: tlsConfig,
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
}
f.features = (&fs.Features{
CanHaveEmptyDirectories: true,
PartialUploads: true,
}).Fill(ctx, f)
// set the pool drainer timer going
if f.opt.IdleTimeout > 0 {
@@ -621,8 +660,7 @@ func (f *Fs) Shutdown(ctx context.Context) error {
// translateErrorFile turns FTP errors into rclone errors if possible for a file
func translateErrorFile(err error) error {
switch errX := err.(type) {
case *textproto.Error:
if errX := textprotoError(err); errX != nil {
switch errX.Code {
case ftp.StatusFileUnavailable, ftp.StatusFileActionIgnored:
err = fs.ErrorObjectNotFound
@@ -633,8 +671,7 @@ func translateErrorFile(err error) error {
// translateErrorDir turns FTP errors into rclone errors if possible for a directory
func translateErrorDir(err error) error {
switch errX := err.(type) {
case *textproto.Error:
if errX := textprotoError(err); errX != nil {
switch errX.Code {
case ftp.StatusFileUnavailable, ftp.StatusFileActionIgnored:
err = fs.ErrorDirNotFound
@@ -688,6 +725,12 @@ func (f *Fs) findItem(ctx context.Context, remote string) (entry *ftp.Entry, err
if err == fs.ErrorObjectNotFound {
return nil, nil
}
if errX := textprotoError(err); errX != nil {
switch errX.Code {
case ftp.StatusBadArguments:
err = nil
}
}
return nil, err
}
if entry != nil {
@@ -925,8 +968,7 @@ func (f *Fs) mkdir(ctx context.Context, abspath string) error {
}
err = c.MakeDir(f.dirFromStandardPath(abspath))
f.putFtpConnection(&c, err)
switch errX := err.(type) {
case *textproto.Error:
if errX := textprotoError(err); errX != nil {
switch errX.Code {
case ftp.StatusFileUnavailable: // dir already exists: see issue #2181
err = nil
@@ -1095,7 +1137,7 @@ func (o *Object) ModTime(ctx context.Context) time.Time {
// SetModTime sets the modification time of the object
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
if !o.fs.fSetTime {
fs.Errorf(o.fs, "SetModTime is not supported")
fs.Debugf(o.fs, "SetModTime is not supported")
return nil
}
c, err := o.fs.getFtpConnection(ctx)
@@ -1167,8 +1209,7 @@ func (f *ftpReadCloser) Close() error {
// mask the error if it was caused by a premature close
// NB StatusAboutToSend is to work around a bug in pureftpd
// See: https://github.com/rclone/rclone/issues/3445#issuecomment-521654257
switch errX := err.(type) {
case *textproto.Error:
if errX := textprotoError(err); errX != nil {
switch errX.Code {
case ftp.StatusTransfertAborted, ftp.StatusFileUnavailable, ftp.StatusAboutToSend:
err = nil
@@ -1246,13 +1287,10 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
}
err = c.Stor(o.fs.opt.Enc.FromStandardPath(path), in)
// Ignore error 250 here - send by some servers
if err != nil {
switch errX := err.(type) {
case *textproto.Error:
switch errX.Code {
case ftp.StatusRequestedFileActionOK:
err = nil
}
if errX := textprotoError(err); errX != nil {
switch errX.Code {
case ftp.StatusRequestedFileActionOK:
err = nil
}
}
if err != nil {

View File

@@ -91,15 +91,21 @@ func init() {
})
},
Options: append(oauthutil.SharedOptions, []fs.Option{{
Name: "project_number",
Help: "Project number.\n\nOptional - needed only for list/create/delete buckets - see your developer console.",
Name: "project_number",
Help: "Project number.\n\nOptional - needed only for list/create/delete buckets - see your developer console.",
Sensitive: true,
}, {
Name: "user_project",
Help: "User project.\n\nOptional - needed only for requester pays.",
Sensitive: true,
}, {
Name: "service_account_file",
Help: "Service Account Credentials JSON file path.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login." + env.ShellExpandHelp,
}, {
Name: "service_account_credentials",
Help: "Service Account Credentials JSON blob.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
Hide: fs.OptionHideBoth,
Name: "service_account_credentials",
Help: "Service Account Credentials JSON blob.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
Hide: fs.OptionHideBoth,
Sensitive: true,
}, {
Name: "anonymous",
Help: "Access public buckets and objects without credentials.\n\nSet to 'true' if you just want to download files and don't configure credentials.",
@@ -298,6 +304,15 @@ Docs: https://cloud.google.com/storage/docs/bucket-policy-only
Value: "DURABLE_REDUCED_AVAILABILITY",
Help: "Durable reduced availability storage class",
}},
}, {
Name: "directory_markers",
Default: false,
Advanced: true,
Help: `Upload an empty object with a trailing slash when a new directory is created
Empty folders are unsupported for bucket based remotes, this option creates an empty
object ending with "/", to persist the folder.
`,
}, {
Name: "no_check_bucket",
Help: `If set, don't attempt to check the bucket exists or create it.
@@ -349,6 +364,7 @@ can't check the size and hash but the file contents will be decompressed.
// Options defines the configuration for this backend
type Options struct {
ProjectNumber string `config:"project_number"`
UserProject string `config:"user_project"`
ServiceAccountFile string `config:"service_account_file"`
ServiceAccountCredentials string `config:"service_account_credentials"`
Anonymous bool `config:"anonymous"`
@@ -362,6 +378,7 @@ type Options struct {
Endpoint string `config:"endpoint"`
Enc encoder.MultiEncoder `config:"encoding"`
EnvAuth bool `config:"env_auth"`
DirectoryMarkers bool `config:"directory_markers"`
}
// Fs represents a remote storage server
@@ -457,7 +474,7 @@ func parsePath(path string) (root string) {
// split returns bucket and bucketPath from the rootRelativePath
// relative to f.root
func (f *Fs) split(rootRelativePath string) (bucketName, bucketPath string) {
bucketName, bucketPath = bucket.Split(path.Join(f.root, rootRelativePath))
bucketName, bucketPath = bucket.Split(bucket.Join(f.root, rootRelativePath))
return f.opt.Enc.FromStandardName(bucketName), f.opt.Enc.FromStandardPath(bucketPath)
}
@@ -543,6 +560,9 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
BucketBased: true,
BucketBasedRootOK: true,
}).Fill(ctx, f)
if opt.DirectoryMarkers {
f.features.CanHaveEmptyDirectories = true
}
// Create a new authorized Drive client.
f.client = oAuthClient
@@ -559,7 +579,11 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
// Check to see if the object exists
encodedDirectory := f.opt.Enc.FromStandardPath(f.rootDirectory)
err = f.pacer.Call(func() (bool, error) {
_, err = f.svc.Objects.Get(f.rootBucket, encodedDirectory).Context(ctx).Do()
get := f.svc.Objects.Get(f.rootBucket, encodedDirectory).Context(ctx)
if f.opt.UserProject != "" {
get = get.UserProject(f.opt.UserProject)
}
_, err = get.Do()
return shouldRetry(ctx, err)
})
if err == nil {
@@ -619,9 +643,13 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
directory += "/"
}
list := f.svc.Objects.List(bucket).Prefix(directory).MaxResults(listChunks)
if f.opt.UserProject != "" {
list = list.UserProject(f.opt.UserProject)
}
if !recurse {
list = list.Delimiter("/")
}
foundItems := 0
for {
var objects *storage.Objects
err = f.pacer.Call(func() (bool, error) {
@@ -637,6 +665,7 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
return err
}
if !recurse {
foundItems += len(objects.Prefixes)
var object storage.Object
for _, remote := range objects.Prefixes {
if !strings.HasSuffix(remote, "/") {
@@ -657,22 +686,29 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
}
}
}
foundItems += len(objects.Items)
for _, object := range objects.Items {
remote := f.opt.Enc.ToStandardPath(object.Name)
if !strings.HasPrefix(remote, prefix) {
fs.Logf(f, "Odd name received %q", object.Name)
continue
}
remote = remote[len(prefix):]
isDirectory := remote == "" || strings.HasSuffix(remote, "/")
// is this a directory marker?
if isDirectory {
// Don't insert the root directory
if remote == directory {
continue
}
// process directory markers as directories
remote = strings.TrimRight(remote, "/")
}
remote = remote[len(prefix):]
if addBucket {
remote = path.Join(bucket, remote)
}
// is this a directory marker?
if isDirectory {
continue // skip directory marker
}
err = fn(remote, object, false)
err = fn(remote, object, isDirectory)
if err != nil {
return err
}
@@ -682,6 +718,17 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
}
list.PageToken(objects.NextPageToken)
}
if f.opt.DirectoryMarkers && foundItems == 0 && directory != "" {
// Determine whether the directory exists or not by whether it has a marker
_, err := f.readObjectInfo(ctx, bucket, directory)
if err != nil {
if err == fs.ErrorObjectNotFound {
return fs.ErrorDirNotFound
}
return err
}
}
return nil
}
@@ -725,6 +772,9 @@ func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error)
return nil, errors.New("can't list buckets without project number")
}
listBuckets := f.svc.Buckets.List(f.opt.ProjectNumber).MaxResults(listChunks)
if f.opt.UserProject != "" {
listBuckets = listBuckets.UserProject(f.opt.UserProject)
}
for {
var buckets *storage.Buckets
err = f.pacer.Call(func() (bool, error) {
@@ -842,10 +892,69 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
return f.Put(ctx, in, src, options...)
}
// Create directory marker file and parents
func (f *Fs) createDirectoryMarker(ctx context.Context, bucket, dir string) error {
if !f.opt.DirectoryMarkers || bucket == "" {
return nil
}
// Object to be uploaded
o := &Object{
fs: f,
modTime: time.Now(),
}
for {
_, bucketPath := f.split(dir)
// Don't create the directory marker if it is the bucket or at the very root
if bucketPath == "" {
break
}
o.remote = dir + "/"
// Check to see if object already exists
_, err := o.readObjectInfo(ctx)
if err == nil {
return nil
}
// Upload it if not
fs.Debugf(o, "Creating directory marker")
content := io.Reader(strings.NewReader(""))
err = o.Update(ctx, content, o)
if err != nil {
return fmt.Errorf("creating directory marker failed: %w", err)
}
// Now check parent directory exists
dir = path.Dir(dir)
if dir == "/" || dir == "." {
break
}
}
return nil
}
// Mkdir creates the bucket if it doesn't exist
func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
bucket, _ := f.split(dir)
return f.makeBucket(ctx, bucket)
e := f.checkBucket(ctx, bucket)
if e != nil {
return e
}
return f.createDirectoryMarker(ctx, bucket, dir)
}
// mkdirParent creates the parent bucket/directory if it doesn't exist
func (f *Fs) mkdirParent(ctx context.Context, remote string) error {
remote = strings.TrimRight(remote, "/")
dir := path.Dir(remote)
if dir == "/" || dir == "." {
dir = ""
}
return f.Mkdir(ctx, dir)
}
// makeBucket creates the bucket if it doesn't exist
@@ -854,7 +963,11 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) (err error) {
// List something from the bucket to see if it exists. Doing it like this enables the use of a
// service account that only has the "Storage Object Admin" role. See #2193 for details.
err = f.pacer.Call(func() (bool, error) {
_, err = f.svc.Objects.List(bucket).MaxResults(1).Context(ctx).Do()
list := f.svc.Objects.List(bucket).MaxResults(1).Context(ctx)
if f.opt.UserProject != "" {
list = list.UserProject(f.opt.UserProject)
}
_, err = list.Do()
return shouldRetry(ctx, err)
})
if err == nil {
@@ -889,7 +1002,11 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) (err error) {
if !f.opt.BucketPolicyOnly {
insertBucket.PredefinedAcl(f.opt.BucketACL)
}
_, err = insertBucket.Context(ctx).Do()
insertBucket = insertBucket.Context(ctx)
if f.opt.UserProject != "" {
insertBucket = insertBucket.UserProject(f.opt.UserProject)
}
_, err = insertBucket.Do()
return shouldRetry(ctx, err)
})
}, nil)
@@ -909,12 +1026,28 @@ func (f *Fs) checkBucket(ctx context.Context, bucket string) error {
// to delete was not empty.
func (f *Fs) Rmdir(ctx context.Context, dir string) (err error) {
bucket, directory := f.split(dir)
// Remove directory marker file
if f.opt.DirectoryMarkers && bucket != "" && dir != "" {
o := &Object{
fs: f,
remote: dir + "/",
}
fs.Debugf(o, "Removing directory marker")
err := o.Remove(ctx)
if err != nil {
return fmt.Errorf("removing directory marker failed: %w", err)
}
}
if bucket == "" || directory != "" {
return nil
}
return f.cache.Remove(bucket, func() error {
return f.pacer.Call(func() (bool, error) {
err = f.svc.Buckets.Delete(bucket).Context(ctx).Do()
deleteBucket := f.svc.Buckets.Delete(bucket).Context(ctx)
if f.opt.UserProject != "" {
deleteBucket = deleteBucket.UserProject(f.opt.UserProject)
}
err = deleteBucket.Do()
return shouldRetry(ctx, err)
})
})
@@ -936,7 +1069,7 @@ func (f *Fs) Precision() time.Duration {
// If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
dstBucket, dstPath := f.split(remote)
err := f.checkBucket(ctx, dstBucket)
err := f.mkdirParent(ctx, remote)
if err != nil {
return nil, err
}
@@ -960,7 +1093,11 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
var rewriteResponse *storage.RewriteResponse
for {
err = f.pacer.Call(func() (bool, error) {
rewriteResponse, err = rewriteRequest.Context(ctx).Do()
rewriteRequest = rewriteRequest.Context(ctx)
if f.opt.UserProject != "" {
rewriteRequest.UserProject(f.opt.UserProject)
}
rewriteResponse, err = rewriteRequest.Do()
return shouldRetry(ctx, err)
})
if err != nil {
@@ -1070,8 +1207,17 @@ func (o *Object) setMetaData(info *storage.Object) {
// readObjectInfo reads the definition for an object
func (o *Object) readObjectInfo(ctx context.Context) (object *storage.Object, err error) {
bucket, bucketPath := o.split()
err = o.fs.pacer.Call(func() (bool, error) {
object, err = o.fs.svc.Objects.Get(bucket, bucketPath).Context(ctx).Do()
return o.fs.readObjectInfo(ctx, bucket, bucketPath)
}
// readObjectInfo reads the definition for an object
func (f *Fs) readObjectInfo(ctx context.Context, bucket, bucketPath string) (object *storage.Object, err error) {
err = f.pacer.Call(func() (bool, error) {
get := f.svc.Objects.Get(bucket, bucketPath).Context(ctx)
if f.opt.UserProject != "" {
get = get.UserProject(f.opt.UserProject)
}
object, err = get.Do()
return shouldRetry(ctx, err)
})
if err != nil {
@@ -1143,7 +1289,11 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) (err error)
if !o.fs.opt.BucketPolicyOnly {
copyObject.DestinationPredefinedAcl(o.fs.opt.ObjectACL)
}
newObject, err = copyObject.Context(ctx).Do()
copyObject = copyObject.Context(ctx)
if o.fs.opt.UserProject != "" {
copyObject = copyObject.UserProject(o.fs.opt.UserProject)
}
newObject, err = copyObject.Do()
return shouldRetry(ctx, err)
})
if err != nil {
@@ -1160,6 +1310,9 @@ func (o *Object) Storable() bool {
// Open an object for read
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
if o.fs.opt.UserProject != "" {
o.url = o.url + "&userProject=" + o.fs.opt.UserProject
}
req, err := http.NewRequestWithContext(ctx, "GET", o.url, nil)
if err != nil {
return nil, err
@@ -1203,11 +1356,14 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
// Update the object with the contents of the io.Reader, modTime and size
//
// The new object may have been created if an error is returned
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
bucket, bucketPath := o.split()
err := o.fs.checkBucket(ctx, bucket)
if err != nil {
return err
// Create parent dir/bucket if not saving directory marker
if !strings.HasSuffix(o.remote, "/") {
err = o.fs.mkdirParent(ctx, o.remote)
if err != nil {
return err
}
}
modTime := src.ModTime(ctx)
@@ -1252,7 +1408,11 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
if !o.fs.opt.BucketPolicyOnly {
insertObject.PredefinedAcl(o.fs.opt.ObjectACL)
}
newObject, err = insertObject.Context(ctx).Do()
insertObject = insertObject.Context(ctx)
if o.fs.opt.UserProject != "" {
insertObject = insertObject.UserProject(o.fs.opt.UserProject)
}
newObject, err = insertObject.Do()
return shouldRetry(ctx, err)
})
if err != nil {
@@ -1267,7 +1427,11 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
func (o *Object) Remove(ctx context.Context) (err error) {
bucket, bucketPath := o.split()
err = o.fs.pacer.Call(func() (bool, error) {
err = o.fs.svc.Objects.Delete(bucket, bucketPath).Context(ctx).Do()
deleteBucket := o.fs.svc.Objects.Delete(bucket, bucketPath).Context(ctx)
if o.fs.opt.UserProject != "" {
deleteBucket = deleteBucket.UserProject(o.fs.opt.UserProject)
}
err = deleteBucket.Do()
return shouldRetry(ctx, err)
})
return err

View File

@@ -6,6 +6,7 @@ import (
"testing"
"github.com/rclone/rclone/backend/googlecloudstorage"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
)
@@ -16,3 +17,17 @@ func TestIntegration(t *testing.T) {
NilObject: (*googlecloudstorage.Object)(nil),
})
}
func TestIntegration2(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
name := "TestGoogleCloudStorage"
fstests.Run(t, &fstests.Opt{
RemoteName: name + ":",
NilObject: (*googlecloudstorage.Object)(nil),
ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "directory_markers", Value: "true"},
},
})
}

View File

@@ -166,6 +166,7 @@ func NewFs(ctx context.Context, fsname, rpath string, cmap configmap.Mapper) (fs
ReadMetadata: true,
WriteMetadata: true,
UserMetadata: true,
PartialUploads: true,
}
f.features = stubFeatures.Fill(ctx, f).Mask(ctx, f.Fs).WrapsFs(f, f.Fs)

View File

@@ -23,6 +23,7 @@ func TestIntegration(t *testing.T) {
NilObject: (*hasher.Object)(nil),
UnimplementableFsMethods: []string{
"OpenWriterAt",
"OpenChunkWriter",
},
UnimplementableObjectMethods: []string{},
}

View File

@@ -21,6 +21,7 @@ import (
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/pacer"
)
// Fs represents a HDFS server
@@ -31,8 +32,15 @@ type Fs struct {
opt Options // options for this backend
ci *fs.ConfigInfo // global config
client *hdfs.Client
pacer *fs.Pacer // pacer for API calls
}
const (
minSleep = 20 * time.Millisecond
maxSleep = 10 * time.Second
decayConstant = 2 // bigger for slower decay, exponential
)
// copy-paste from https://github.com/colinmarc/hdfs/blob/master/cmd/hdfs/kerberos.go
func getKerberosClient() (*krb.Client, error) {
configPath := os.Getenv("KRB5_CONFIG")
@@ -114,6 +122,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
opt: *opt,
ci: fs.GetConfig(ctx),
client: client,
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
}
f.features = (&fs.Features{

View File

@@ -19,9 +19,10 @@ func init() {
Description: "Hadoop distributed file system",
NewFs: NewFs,
Options: []fs.Option{{
Name: "namenode",
Help: "Hadoop name node and port.\n\nE.g. \"namenode:8020\" to connect to host namenode at port 8020.",
Required: true,
Name: "namenode",
Help: "Hadoop name node and port.\n\nE.g. \"namenode:8020\" to connect to host namenode at port 8020.",
Required: true,
Sensitive: true,
}, {
Name: "username",
Help: "Hadoop user name.",
@@ -29,6 +30,7 @@ func init() {
Value: "root",
Help: "Connect to hdfs as root.",
}},
Sensitive: true,
}, {
Name: "service_principal_name",
Help: `Kerberos service principal name for the namenode.
@@ -36,15 +38,16 @@ func init() {
Enables KERBEROS authentication. Specifies the Service Principal Name
(SERVICE/FQDN) for the namenode. E.g. \"hdfs/namenode.hadoop.docker\"
for namenode running as service 'hdfs' with FQDN 'namenode.hadoop.docker'.`,
Advanced: true,
Advanced: true,
Sensitive: true,
}, {
Name: "data_transfer_protection",
Help: `Kerberos data transfer protection: authentication|integrity|privacy.
Specifies whether or not authentication, data signature integrity
checks, and wire encryption is required when communicating the the
datanodes. Possible values are 'authentication', 'integrity' and
'privacy'. Used only with KERBEROS enabled.`,
checks, and wire encryption are required when communicating with
the datanodes. Possible values are 'authentication', 'integrity'
and 'privacy'. Used only with KERBEROS enabled.`,
Examples: []fs.OptionExample{{
Value: "privacy",
Help: "Ensure authentication, integrity and encryption enabled.",

View File

@@ -5,10 +5,12 @@ package hdfs
import (
"context"
"errors"
"io"
"path"
"time"
"github.com/colinmarc/hdfs/v2"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/readers"
@@ -106,7 +108,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
// Update object
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
realpath := o.fs.realpath(src.Remote())
realpath := o.fs.realpath(o.remote)
dirname := path.Dir(realpath)
fs.Debugf(o.fs, "update [%s]", realpath)
@@ -141,7 +143,23 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return err
}
err = out.Close()
// If the datanodes have acknowledged all writes but not yet
// to the namenode, FileWriter.Close can return ErrReplicating
// (wrapped in an os.PathError). This indicates that all data
// has been written, but the lease is still open for the file.
//
// It is safe in this case to either ignore the error (and let
// the lease expire on its own) or to call Close multiple
// times until it completes without an error. The Java client,
// for context, always chooses to retry, with exponential
// backoff.
err = o.fs.pacer.Call(func() (bool, error) {
err := out.Close()
if err == nil {
return false, nil
}
return errors.Is(err, hdfs.ErrReplicating), err
})
if err != nil {
cleanup()
return err

View File

@@ -294,15 +294,6 @@ func (f *Fs) copyOrMove(ctx context.Context, isDirectory bool, operationType Cop
return &result, nil
}
// copyDirectory moves the directory at the source-path to the destination-path and
// returns the resulting api-object if successful.
//
// The operation will only be successful
// if the parent-directory of the destination-path exists.
func (f *Fs) copyDirectory(ctx context.Context, source string, destination string, onExist OnExistAction) (*api.HiDriveObject, error) {
return f.copyOrMove(ctx, true, CopyOriginalPreserveModTime, source, destination, onExist)
}
// moveDirectory moves the directory at the source-path to the destination-path and
// returns the resulting api-object if successful.
//

View File

@@ -2,7 +2,7 @@
package hidrive
// FIXME HiDrive only supports file or folder names of 255 characters or less.
// Operations that create files oder folder with longer names will throw a HTTP error:
// Operations that create files or folders with longer names will throw an HTTP error:
// - 422 Unprocessable Entity
// A more graceful way for rclone to handle this may be desirable.
@@ -338,7 +338,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
return nil, fmt.Errorf("could not access root-prefix: %w", err)
}
if item.Type != api.HiDriveObjectTypeDirectory {
return nil, errors.New("The root-prefix needs to point to a valid directory or be empty")
return nil, errors.New("the root-prefix needs to point to a valid directory or be empty")
}
}

View File

@@ -495,7 +495,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
add(file)
case fs.ErrorNotAFile:
// ...found a directory not a file
add(fs.NewDir(remote, timeUnset))
add(fs.NewDir(remote, time.Time{}))
default:
fs.Debugf(remote, "skipping because of error: %v", err)
}
@@ -507,7 +507,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
name = strings.TrimRight(name, "/")
remote := path.Join(dir, name)
if isDir {
add(fs.NewDir(remote, timeUnset))
add(fs.NewDir(remote, time.Time{}))
} else {
in <- remote
}

View File

@@ -133,11 +133,13 @@ Owner is able to add custom keys. Metadata feature grabs all the keys including
},
Options: []fs.Option{{
Name: "access_key_id",
Help: "IAS3 Access Key.\n\nLeave blank for anonymous access.\nYou can find one here: https://archive.org/account/s3.php",
Name: "access_key_id",
Help: "IAS3 Access Key.\n\nLeave blank for anonymous access.\nYou can find one here: https://archive.org/account/s3.php",
Sensitive: true,
}, {
Name: "secret_access_key",
Help: "IAS3 Secret Key (password).\n\nLeave blank for anonymous access.",
Name: "secret_access_key",
Help: "IAS3 Secret Key (password).\n\nLeave blank for anonymous access.",
Sensitive: true,
}, {
// their official client (https://github.com/jjjake/internetarchive) hardcodes following the two
Name: "endpoint",

View File

@@ -67,13 +67,21 @@ const (
legacyEncryptedClientSecret = "Vp8eAv7eVElMnQwN-kgU9cbhgApNDaMqWdlDi5qFydlQoji4JBxrGMF2"
legacyConfigVersion = 0
teliaCloudTokenURL = "https://cloud-auth.telia.se/auth/realms/telia_se/protocol/openid-connect/token"
teliaCloudAuthURL = "https://cloud-auth.telia.se/auth/realms/telia_se/protocol/openid-connect/auth"
teliaCloudClientID = "desktop"
teliaseCloudTokenURL = "https://cloud-auth.telia.se/auth/realms/telia_se/protocol/openid-connect/token"
teliaseCloudAuthURL = "https://cloud-auth.telia.se/auth/realms/telia_se/protocol/openid-connect/auth"
teliaseCloudClientID = "desktop"
telianoCloudTokenURL = "https://sky-auth.telia.no/auth/realms/get/protocol/openid-connect/token"
telianoCloudAuthURL = "https://sky-auth.telia.no/auth/realms/get/protocol/openid-connect/auth"
telianoCloudClientID = "desktop"
tele2CloudTokenURL = "https://mittcloud-auth.tele2.se/auth/realms/comhem/protocol/openid-connect/token"
tele2CloudAuthURL = "https://mittcloud-auth.tele2.se/auth/realms/comhem/protocol/openid-connect/auth"
tele2CloudClientID = "desktop"
onlimeCloudTokenURL = "https://cloud-auth.onlime.dk/auth/realms/onlime_wl/protocol/openid-connect/token"
onlimeCloudAuthURL = "https://cloud-auth.onlime.dk/auth/realms/onlime_wl/protocol/openid-connect/auth"
onlimeCloudClientID = "desktop"
)
// Register with Fs
@@ -84,7 +92,7 @@ func init() {
Description: "Jottacloud",
NewFs: NewFs,
Config: Config,
Options: []fs.Option{{
Options: append(oauthutil.SharedOptions, []fs.Option{{
Name: "md5_memory_limit",
Help: "Files bigger than this will be cached on disk to calculate the MD5 if required.",
Default: fs.SizeSuffix(10 * 1024 * 1024),
@@ -119,7 +127,7 @@ func init() {
Default: (encoder.Display |
encoder.EncodeWin | // :?"*<>|
encoder.EncodeInvalidUtf8),
}},
}}...),
})
}
@@ -134,11 +142,17 @@ func Config(ctx context.Context, name string, m configmap.Mapper, config fs.Conf
Value: "legacy",
Help: "Legacy authentication.\nThis is only required for certain whitelabel versions of Jottacloud and not recommended for normal users.",
}, {
Value: "telia",
Help: "Telia Cloud authentication.\nUse this if you are using Telia Cloud.",
Value: "telia_se",
Help: "Telia Cloud authentication.\nUse this if you are using Telia Cloud (Sweden).",
}, {
Value: "telia_no",
Help: "Telia Sky authentication.\nUse this if you are using Telia Sky (Norway).",
}, {
Value: "tele2",
Help: "Tele2 Cloud authentication.\nUse this if you are using Tele2 Cloud.",
}, {
Value: "onlime",
Help: "Onlime Cloud authentication.\nUse this if you are using Onlime Cloud.",
}})
case "auth_type_done":
// Jump to next state according to config chosen
@@ -231,17 +245,32 @@ machines.`)
return nil, fmt.Errorf("error while saving token: %w", err)
}
return fs.ConfigGoto("choose_device")
case "telia": // telia cloud config
case "telia_se": // telia_se cloud config
m.Set("configVersion", fmt.Sprint(configVersion))
m.Set(configClientID, teliaCloudClientID)
m.Set(configTokenURL, teliaCloudTokenURL)
m.Set(configClientID, teliaseCloudClientID)
m.Set(configTokenURL, teliaseCloudTokenURL)
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
OAuth2Config: &oauth2.Config{
Endpoint: oauth2.Endpoint{
AuthURL: teliaCloudAuthURL,
TokenURL: teliaCloudTokenURL,
AuthURL: teliaseCloudAuthURL,
TokenURL: teliaseCloudTokenURL,
},
ClientID: teliaCloudClientID,
ClientID: teliaseCloudClientID,
Scopes: []string{"openid", "jotta-default", "offline_access"},
RedirectURL: oauthutil.RedirectLocalhostURL,
},
})
case "telia_no": // telia_no cloud config
m.Set("configVersion", fmt.Sprint(configVersion))
m.Set(configClientID, telianoCloudClientID)
m.Set(configTokenURL, telianoCloudTokenURL)
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
OAuth2Config: &oauth2.Config{
Endpoint: oauth2.Endpoint{
AuthURL: telianoCloudAuthURL,
TokenURL: telianoCloudTokenURL,
},
ClientID: telianoCloudClientID,
Scopes: []string{"openid", "jotta-default", "offline_access"},
RedirectURL: oauthutil.RedirectLocalhostURL,
},
@@ -261,6 +290,21 @@ machines.`)
RedirectURL: oauthutil.RedirectLocalhostURL,
},
})
case "onlime": // onlime cloud config
m.Set("configVersion", fmt.Sprint(configVersion))
m.Set(configClientID, onlimeCloudClientID)
m.Set(configTokenURL, onlimeCloudTokenURL)
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
OAuth2Config: &oauth2.Config{
Endpoint: oauth2.Endpoint{
AuthURL: onlimeCloudAuthURL,
TokenURL: onlimeCloudTokenURL,
},
ClientID: onlimeCloudClientID,
Scopes: []string{"openid", "jotta-default", "offline_access"},
RedirectURL: oauthutil.RedirectLocalhostURL,
},
})
case "choose_device":
return fs.ConfigConfirm("choose_device_query", false, "config_non_standard", `Use a non-standard device/mountpoint?
Choosing no, the default, will let you access the storage used for the archive
@@ -1838,12 +1882,12 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
if err == nil {
// if the object exists delete it
err = o.remove(ctx, true)
if err != nil {
if err != nil && err != fs.ErrorObjectNotFound {
// if delete failed then report that, unless it was because the file did not exist after all
return fmt.Errorf("failed to remove old object: %w", err)
}
}
// if the object does not exist we can just continue but if the error is something different we should report that
if err != fs.ErrorObjectNotFound {
} else if err != fs.ErrorObjectNotFound {
// if the object does not exist we can just continue but if the error is something different we should report that
return err
}
}
@@ -1930,7 +1974,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
o.md5 = result.Md5
o.modTime = time.Unix(result.Modified/1000, 0)
} else {
// If the file state is COMPLETE we don't need to upload it because the file was already found but we still ned to update our metadata
// If the file state is COMPLETE we don't need to upload it because the file was already found but we still need to update our metadata
return o.readMetaData(ctx, true)
}
@@ -1951,10 +1995,17 @@ func (o *Object) remove(ctx context.Context, hard bool) error {
opts.Parameters.Set("dl", "true")
}
return o.fs.pacer.Call(func() (bool, error) {
err := o.fs.pacer.Call(func() (bool, error) {
resp, err := o.fs.jfsSrv.CallXML(ctx, &opts, nil, nil)
return shouldRetry(ctx, resp, err)
})
if apiErr, ok := err.(*api.Error); ok {
// attempting to hard delete will fail if path does not exist, but standard delete will succeed
if apiErr.StatusCode == http.StatusNotFound {
return fs.ErrorObjectNotFound
}
}
return err
}
// Remove an object

View File

@@ -61,9 +61,10 @@ func init() {
Default: true,
Advanced: true,
}, {
Name: "user",
Help: "Your user name.",
Required: true,
Name: "user",
Help: "Your user name.",
Required: true,
Sensitive: true,
}, {
Name: "password",
Help: "Your password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password).",
@@ -376,7 +377,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
for i, file := range files {
remote := path.Join(dir, f.opt.Enc.ToStandardName(file.Name))
if file.Type == "dir" {
entries[i] = fs.NewDir(remote, time.Unix(0, 0))
entries[i] = fs.NewDir(remote, time.Time{})
} else {
entries[i] = &Object{
fs: f,

View File

@@ -13,6 +13,7 @@ import (
"runtime"
"strings"
"sync"
"sync/atomic"
"time"
"unicode/utf8"
@@ -243,7 +244,7 @@ type Fs struct {
precision time.Duration // precision of local filesystem
warnedMu sync.Mutex // used for locking access to 'warned'.
warned map[string]struct{} // whether we have warned about this string
xattrSupported int32 // whether xattrs are supported (atomic access)
xattrSupported atomic.Int32 // whether xattrs are supported
// do os.Lstat or os.Stat
lstat func(name string) (os.FileInfo, error)
@@ -266,7 +267,10 @@ type Object struct {
// ------------------------------------------------------------
var errLinksAndCopyLinks = errors.New("can't use -l/--links with -L/--copy-links")
var (
errLinksAndCopyLinks = errors.New("can't use -l/--links with -L/--copy-links")
errLinksNeedsSuffix = errors.New("need \"" + linkSuffix + "\" suffix to refer to symlink when using -l/--links")
)
// NewFs constructs an Fs from the path
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
@@ -288,7 +292,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
lstat: os.Lstat,
}
if xattrSupported {
f.xattrSupported = 1
f.xattrSupported.Store(1)
}
f.root = cleanRootPath(root, f.opt.NoUNC, f.opt.Enc)
f.features = (&fs.Features{
@@ -300,6 +304,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
WriteMetadata: true,
UserMetadata: xattrSupported, // can only R/W general purpose metadata if xattrs are supported
FilterAware: true,
PartialUploads: true,
}).Fill(ctx, f)
if opt.FollowSymlinks {
f.lstat = os.Stat
@@ -310,7 +315,16 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
if err == nil {
f.dev = readDevice(fi, f.opt.OneFileSystem)
}
// Check to see if this is a .rclonelink if not found
hasLinkSuffix := strings.HasSuffix(f.root, linkSuffix)
if hasLinkSuffix && opt.TranslateSymlinks && os.IsNotExist(err) {
fi, err = f.lstat(strings.TrimSuffix(f.root, linkSuffix))
}
if err == nil && f.isRegular(fi.Mode()) {
// Handle the odd case, that a symlink was specified by name without the link suffix
if !hasLinkSuffix && opt.TranslateSymlinks && fi.Mode()&os.ModeSymlink != 0 {
return nil, errLinksNeedsSuffix
}
// It is a file, so use the parent as the root
f.root = filepath.Dir(f.root)
// return an error with an fs which points to the parent
@@ -503,7 +517,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
continue
}
}
err = fmt.Errorf("failed to read directory %q: %w", namepath, fierr)
fierr = fmt.Errorf("failed to get info about directory entry %q: %w", namepath, fierr)
fs.Errorf(dir, "%v", fierr)
_ = accounting.Stats(ctx).Error(fserrors.NoRetryError(fierr)) // fail the sync
continue
@@ -540,11 +554,6 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
}
mode = fi.Mode()
}
// Don't include non directory if not included
// we leave directory filtering to the layer above
if useFilter && !fi.IsDir() && !filter.IncludeRemote(newRemote) {
continue
}
if fi.IsDir() {
// Ignore directories which are symlinks. These are junction points under windows which
// are kind of a souped up symlink. Unix doesn't have directories which are symlinks.
@@ -557,6 +566,11 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
if f.opt.TranslateSymlinks && fi.Mode()&os.ModeSymlink != 0 {
newRemote += linkSuffix
}
// Don't include non directory if not included
// we leave directory filtering to the layer above
if useFilter && !filter.IncludeRemote(newRemote) {
continue
}
fso, err := f.newObjectWithInfo(newRemote, fi)
if err != nil {
return nil, err
@@ -628,7 +642,13 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
//
// If it isn't empty it will return an error
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
return os.Remove(f.localPath(dir))
localPath := f.localPath(dir)
if fi, err := os.Stat(localPath); err != nil {
return err
} else if !fi.IsDir() {
return fs.ErrorIsFile
}
return os.Remove(localPath)
}
// Precision of the file system

View File

@@ -19,6 +19,7 @@ import (
"github.com/rclone/rclone/fs/filter"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/lib/file"
"github.com/rclone/rclone/lib/readers"
@@ -146,6 +147,20 @@ func TestSymlink(t *testing.T) {
_, err = r.Flocal.NewObject(ctx, "symlink2.txt")
require.Equal(t, fs.ErrorObjectNotFound, err)
// Check that NewFs works with the suffixed version and --links
f2, err := NewFs(ctx, "local", filepath.Join(dir, "symlink2.txt"+linkSuffix), configmap.Simple{
"links": "true",
})
require.Equal(t, fs.ErrorIsFile, err)
require.Equal(t, dir, f2.(*Fs).root)
// Check that NewFs doesn't see the non suffixed version with --links
f2, err = NewFs(ctx, "local", filepath.Join(dir, "symlink2.txt"), configmap.Simple{
"links": "true",
})
require.Equal(t, errLinksNeedsSuffix, err)
require.Nil(t, f2)
// Check reading the object
in, err := o.Open(ctx)
require.NoError(t, err)
@@ -397,7 +412,7 @@ func TestFilter(t *testing.T) {
require.Equal(t, "[included]", fmt.Sprint(entries))
}
func TestFilterSymlink(t *testing.T) {
func testFilterSymlink(t *testing.T, copyLinks bool) {
ctx := context.Background()
r := fstest.NewRun(t)
defer r.Finalise()
@@ -411,15 +426,23 @@ func TestFilterSymlink(t *testing.T) {
require.NoError(t, os.Symlink("included.dir", filepath.Join(r.LocalName, "included.dir.link")))
require.NoError(t, os.Symlink("dangling", filepath.Join(r.LocalName, "dangling.link")))
// Set fs into "-L" mode
f.opt.FollowSymlinks = true
f.opt.TranslateSymlinks = false
f.lstat = os.Stat
// Set fs into "-l" mode
// f.opt.FollowSymlinks = false
// f.opt.TranslateSymlinks = true
// f.lstat = os.Lstat
defer func() {
// Reset -L/-l mode
f.opt.FollowSymlinks = false
f.opt.TranslateSymlinks = false
f.lstat = os.Lstat
}()
if copyLinks {
// Set fs into "-L" mode
f.opt.FollowSymlinks = true
f.opt.TranslateSymlinks = false
f.lstat = os.Stat
} else {
// Set fs into "-l" mode
f.opt.FollowSymlinks = false
f.opt.TranslateSymlinks = true
f.lstat = os.Lstat
}
// Check set up for filtering
assert.True(t, f.Features().FilterAware)
@@ -431,21 +454,35 @@ func TestFilterSymlink(t *testing.T) {
// Add a filter
ctx, fi := filter.AddConfig(ctx)
require.NoError(t, fi.AddRule("+ included.file"))
require.NoError(t, fi.AddRule("+ included.file.link"))
require.NoError(t, fi.AddRule("+ included.dir/**"))
require.NoError(t, fi.AddRule("+ included.dir.link/**"))
if copyLinks {
require.NoError(t, fi.AddRule("+ included.file.link"))
require.NoError(t, fi.AddRule("+ included.dir.link/**"))
} else {
require.NoError(t, fi.AddRule("+ included.file.link.rclonelink"))
require.NoError(t, fi.AddRule("+ included.dir.link.rclonelink"))
}
require.NoError(t, fi.AddRule("- *"))
// Check listing without use filter flag
entries, err := f.List(ctx, "")
require.NoError(t, err)
// Check 1 global errors one for each dangling symlink
assert.Equal(t, int64(1), accounting.Stats(ctx).GetErrors(), "global errors found")
if copyLinks {
// Check 1 global errors one for each dangling symlink
assert.Equal(t, int64(1), accounting.Stats(ctx).GetErrors(), "global errors found")
} else {
// Check 0 global errors as dangling symlink copied properly
assert.Equal(t, int64(0), accounting.Stats(ctx).GetErrors(), "global errors found")
}
accounting.Stats(ctx).ResetErrors()
sort.Sort(entries)
require.Equal(t, "[included.dir included.dir.link included.file included.file.link]", fmt.Sprint(entries))
if copyLinks {
require.Equal(t, "[included.dir included.dir.link included.file included.file.link]", fmt.Sprint(entries))
} else {
require.Equal(t, "[dangling.link.rclonelink included.dir included.dir.link.rclonelink included.file included.file.link.rclonelink]", fmt.Sprint(entries))
}
// Add user filter flag
ctx = filter.SetUseFilter(ctx, true)
@@ -456,7 +493,11 @@ func TestFilterSymlink(t *testing.T) {
assert.Equal(t, int64(0), accounting.Stats(ctx).GetErrors(), "global errors found")
sort.Sort(entries)
require.Equal(t, "[included.dir included.dir.link included.file included.file.link]", fmt.Sprint(entries))
if copyLinks {
require.Equal(t, "[included.dir included.dir.link included.file included.file.link]", fmt.Sprint(entries))
} else {
require.Equal(t, "[included.dir included.dir.link.rclonelink included.file included.file.link.rclonelink]", fmt.Sprint(entries))
}
// Check listing through a symlink still works
entries, err = f.List(ctx, "included.dir")
@@ -466,3 +507,51 @@ func TestFilterSymlink(t *testing.T) {
sort.Sort(entries)
require.Equal(t, "[included.dir/included.sub.file]", fmt.Sprint(entries))
}
func TestFilterSymlinkCopyLinks(t *testing.T) {
testFilterSymlink(t, true)
}
func TestFilterSymlinkLinks(t *testing.T) {
testFilterSymlink(t, false)
}
func TestCopySymlink(t *testing.T) {
ctx := context.Background()
r := fstest.NewRun(t)
defer r.Finalise()
when := time.Now()
f := r.Flocal.(*Fs)
// Create a file and a symlink to it
r.WriteFile("src/file.txt", "hello world", when)
require.NoError(t, os.Symlink("file.txt", filepath.Join(r.LocalName, "src", "link.txt")))
defer func() {
// Reset -L/-l mode
f.opt.FollowSymlinks = false
f.opt.TranslateSymlinks = false
f.lstat = os.Lstat
}()
// Set fs into "-l/--links" mode
f.opt.FollowSymlinks = false
f.opt.TranslateSymlinks = true
f.lstat = os.Lstat
// Create dst
require.NoError(t, f.Mkdir(ctx, "dst"))
// Do copy from src into dst
src, err := f.NewObject(ctx, "src/link.txt.rclonelink")
require.NoError(t, err)
require.NotNil(t, src)
dst, err := operations.Copy(ctx, f, nil, "dst/link.txt.rclonelink", src)
require.NoError(t, err)
require.NotNil(t, dst)
// Test that we made a symlink and it has the right contents
dstPath := filepath.Join(r.LocalName, "dst", "link.txt")
linkContents, err := os.Readlink(dstPath)
require.NoError(t, err)
assert.Equal(t, "file.txt", linkContents)
}

View File

@@ -5,6 +5,7 @@ package local
import (
"fmt"
"runtime"
"sync"
"time"
@@ -23,7 +24,7 @@ func (o *Object) readMetadataFromFile(m *fs.Metadata) (err error) {
// Check statx() is available as it was only introduced in kernel 4.11
// If not, fall back to fstatat() which was introduced in 2.6.16 which is guaranteed for all Go versions
var stat unix.Statx_t
if unix.Statx(unix.AT_FDCWD, ".", 0, unix.STATX_ALL, &stat) != unix.ENOSYS {
if runtime.GOOS != "android" && unix.Statx(unix.AT_FDCWD, ".", 0, unix.STATX_ALL, &stat) != unix.ENOSYS {
readMetadataFromFileFn = readMetadataFromFileStatx
} else {
readMetadataFromFileFn = readMetadataFromFileFstatat
@@ -91,7 +92,7 @@ func readMetadataFromFileFstatat(o *Object, m *fs.Metadata) (err error) {
// The types of t.Sec and t.Nsec vary from int32 to int64 on
// different Linux architectures so we need to cast them to
// int64 here and hence need to quiet the linter about
// unecessary casts.
// unnecessary casts.
//
// nolint: unconvert
m.Set(key, time.Unix(int64(t.Sec), int64(t.Nsec)).Format(metadataTimeFormat))

View File

@@ -6,7 +6,6 @@ package local
import (
"fmt"
"strings"
"sync/atomic"
"syscall"
"github.com/pkg/xattr"
@@ -28,7 +27,7 @@ func (f *Fs) xattrIsNotSupported(err error) bool {
// Xattrs not supported can be ENOTSUP or ENOATTR or EINVAL (on Solaris)
if xattrErr.Err == syscall.EINVAL || xattrErr.Err == syscall.ENOTSUP || xattrErr.Err == xattr.ENOATTR {
// Show xattrs not supported
if atomic.CompareAndSwapInt32(&f.xattrSupported, 1, 0) {
if f.xattrSupported.CompareAndSwap(1, 0) {
fs.Errorf(f, "xattrs not supported - disabling: %v", err)
}
return true
@@ -41,7 +40,7 @@ func (f *Fs) xattrIsNotSupported(err error) bool {
// It doesn't return any attributes owned by this backend in
// metadataKeys
func (o *Object) getXattr() (metadata fs.Metadata, err error) {
if !xattrSupported || atomic.LoadInt32(&o.fs.xattrSupported) == 0 {
if !xattrSupported || o.fs.xattrSupported.Load() == 0 {
return nil, nil
}
var list []string
@@ -90,7 +89,7 @@ func (o *Object) getXattr() (metadata fs.Metadata, err error) {
//
// It doesn't set any attributes owned by this backend in metadataKeys
func (o *Object) setXattr(metadata fs.Metadata) (err error) {
if !xattrSupported || atomic.LoadInt32(&o.fs.xattrSupported) == 0 {
if !xattrSupported || o.fs.xattrSupported.Load() == 0 {
return nil
}
for k, value := range metadata {

View File

@@ -85,10 +85,11 @@ func init() {
Name: "mailru",
Description: "Mail.ru Cloud",
NewFs: NewFs,
Options: []fs.Option{{
Name: "user",
Help: "User name (usually email).",
Required: true,
Options: append(oauthutil.SharedOptions, []fs.Option{{
Name: "user",
Help: "User name (usually email).",
Required: true,
Sensitive: true,
}, {
Name: "pass",
Help: `Password.
@@ -213,7 +214,7 @@ Supported quirks: atomicmkdir binlist unknowndirs`,
encoder.EncodeWin | // :?"*<>|
encoder.EncodeBackSlash |
encoder.EncodeInvalidUtf8),
}},
}}...),
})
}

View File

@@ -58,9 +58,10 @@ func init() {
Description: "Mega",
NewFs: NewFs,
Options: []fs.Option{{
Name: "user",
Help: "User name.",
Required: true,
Name: "user",
Help: "User name.",
Required: true,
Sensitive: true,
}, {
Name: "pass",
Help: "Password.",
@@ -90,7 +91,7 @@ permanently delete objects instead.`,
MEGA uses plain text HTTP connections by default.
Some ISPs throttle HTTP connections, this causes transfers to become very slow.
Enabling this will force MEGA to use HTTPS for all transfers.
HTTPS is normally not necesary since all data is already encrypted anyway.
HTTPS is normally not necessary since all data is already encrypted anyway.
Enabling it will increase CPU usage and add network overhead.`,
Default: false,
Advanced: true,
@@ -205,7 +206,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
}
ci := fs.GetConfig(ctx)
// cache *mega.Mega on username so we can re-use and share
// cache *mega.Mega on username so we can reuse and share
// them between remotes. They are expensive to make as they
// contain all the objects and sharing the objects makes the
// move code easier as we don't have to worry about mixing

View File

@@ -65,11 +65,13 @@ HTTP is provided primarily for debugging purposes.`,
Help: `Domain+path of NetStorage host to connect to.
Format should be ` + "`<domain>/<internal folders>`",
Required: true,
Required: true,
Sensitive: true,
}, {
Name: "account",
Help: "Set the NetStorage account name",
Required: true,
Name: "account",
Help: "Set the NetStorage account name",
Required: true,
Sensitive: true,
}, {
Name: "secret",
Help: `Set the NetStorage account secret/G2O key for authentication.
@@ -819,6 +821,8 @@ func (f *Fs) getAuth(req *http.Request) error {
// Set Authorization header
dataHeader := generateDataHeader(f)
path := req.URL.RequestURI()
//lint:ignore SA1008 false positive when running staticcheck, the header name is according to docs even if not canonical
//nolint:staticcheck // Don't include staticcheck when running golangci-lint to avoid SA1008
actionHeader := req.Header["X-Akamai-ACS-Action"][0]
fs.Debugf(nil, "NetStorage API %s call %s for path %q", req.Method, actionHeader, path)
req.Header.Set("X-Akamai-ACS-Auth-Data", dataHeader)

View File

@@ -131,10 +131,11 @@ Note that the chunks will be buffered into memory.`,
Default: defaultChunkSize,
Advanced: true,
}, {
Name: "drive_id",
Help: "The ID of the drive to use.",
Default: "",
Advanced: true,
Name: "drive_id",
Help: "The ID of the drive to use.",
Default: "",
Advanced: true,
Sensitive: true,
}, {
Name: "drive_type",
Help: "The type of the drive (" + driveTypePersonal + " | " + driveTypeBusiness + " | " + driveTypeSharepoint + ").",
@@ -148,7 +149,8 @@ This isn't normally needed, but in special circumstances you might
know the folder ID that you wish to access but not be able to get
there through a path traversal.
`,
Advanced: true,
Advanced: true,
Sensitive: true,
}, {
Name: "access_scopes",
Help: `Set scopes to be requested by rclone.
@@ -196,7 +198,9 @@ listing, set this option.`,
}, {
Name: "server_side_across_configs",
Default: false,
Help: `Allow server-side operations (e.g. copy) to work across different onedrive configs.
Help: `Deprecated: use --server-side-across-configs instead.
Allow server-side operations (e.g. copy) to work across different onedrive configs.
This will only work if you are copying between two OneDrive *Personal* drives AND
the files to copy are already shared between them. In other cases, rclone will
@@ -258,14 +262,15 @@ this flag there.
At the time of writing this only works with OneDrive personal paid accounts.
`,
Advanced: true,
Advanced: true,
Sensitive: true,
}, {
Name: "hash_type",
Default: "auto",
Help: `Specify the hash in use for the backend.
This specifies the hash type in use. If set to "auto" it will use the
default hash which is is QuickXorHash.
default hash which is QuickXorHash.
Before rclone 1.62 an SHA1 hash was used by default for Onedrive
Personal. For 1.62 and later the default is to use a QuickXorHash for
@@ -301,6 +306,24 @@ rclone.
Help: "None - don't use any hashes",
}},
Advanced: true,
}, {
Name: "av_override",
Default: false,
Help: `Allows download of files the server thinks has a virus.
The onedrive/sharepoint server may check files uploaded with an Anti
Virus checker. If it detects any potential viruses or malware it will
block download of the file.
In this case you will see a message like this
server reports this file is infected with a virus - use --onedrive-av-override to download anyway: Infected (name of virus): 403 Forbidden:
If you are 100% sure you want to download this file anyway then use
the --onedrive-av-override flag, or av_override = true in the config
file.
`,
Advanced: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
@@ -549,15 +572,18 @@ func Config(ctx context.Context, name string, m configmap.Mapper, config fs.Conf
case "url":
return fs.ConfigInput("url_end", "config_site_url", `Site URL
Example: "https://contoso.sharepoint.com/sites/mysite" or "mysite"
Examples:
- "mysite"
- "https://XXX.sharepoint.com/sites/mysite"
- "https://XXX.sharepoint.com/teams/ID"
`)
case "url_end":
siteURL := config.Result
re := regexp.MustCompile(`https://.*\.sharepoint\.com/sites/(.*)`)
re := regexp.MustCompile(`https://.*\.sharepoint\.com(/.*)`)
match := re.FindStringSubmatch(siteURL)
if len(match) == 2 {
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
relativePath: "/sites/" + match[1],
relativePath: match[1],
})
}
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
@@ -640,6 +666,7 @@ type Options struct {
LinkType string `config:"link_type"`
LinkPassword string `config:"link_password"`
HashType string `config:"hash_type"`
AVOverride bool `config:"av_override"`
Enc encoder.MultiEncoder `config:"encoding"`
}
@@ -1724,6 +1751,10 @@ func (f *Fs) CleanUp(ctx context.Context) error {
token := make(chan struct{}, f.ci.Checkers)
var wg sync.WaitGroup
err := walk.Walk(ctx, f, "", true, -1, func(path string, entries fs.DirEntries, err error) error {
if err != nil {
fs.Errorf(f, "Failed to list %q: %v", path, err)
return nil
}
err = entries.ForObjectError(func(obj fs.Object) error {
o, ok := obj.(*Object)
if !ok {
@@ -1962,12 +1993,20 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
var resp *http.Response
opts := o.fs.newOptsCall(o.id, "GET", "/content")
opts.Options = options
if o.fs.opt.AVOverride {
opts.Parameters = url.Values{"AVOverride": {"1"}}
}
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.Call(ctx, &opts)
return shouldRetry(ctx, resp, err)
})
if err != nil {
if resp != nil {
if virus := resp.Header.Get("X-Virus-Infected"); virus != "" {
err = fmt.Errorf("server reports this file is infected with a virus - use --onedrive-av-override to download anyway: %s: %w", virus, err)
}
}
return nil, err
}

View File

@@ -61,7 +61,7 @@ func New() hash.Hash {
func (q *quickXorHash) Write(p []byte) (n int, err error) {
var i int
// fill last remain
lastRemain := int(q.size) % dataSize
lastRemain := q.size % dataSize
if lastRemain != 0 {
i += xorBytes(q.data[lastRemain:], p)
}

View File

@@ -42,9 +42,10 @@ func init() {
Description: "OpenDrive",
NewFs: NewFs,
Options: []fs.Option{{
Name: "username",
Help: "Username.",
Required: true,
Name: "username",
Help: "Username.",
Required: true,
Sensitive: true,
}, {
Name: "password",
Help: "Password.",
@@ -766,6 +767,17 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
return f.shouldRetry(ctx, resp, err)
})
if err != nil {
if apiError, ok := err.(*Error); ok {
// Work around a bug maybe in opendrive or maybe in rclone.
//
// We should know whether the folder exists or not by the call to
// FindDir above so exactly why it is not found here is a mystery.
//
// This manifests as a failure in fs/sync TestSyncOverlapWithFilter
if apiError.Info.Message == "Folder is already deleted" {
return fs.DirEntries{}, nil
}
}
return nil, fmt.Errorf("failed to get folder list: %w", err)
}

View File

@@ -13,7 +13,6 @@ import (
"github.com/oracle/oci-go-sdk/v65/common"
"github.com/oracle/oci-go-sdk/v65/objectstorage"
"github.com/oracle/oci-go-sdk/v65/objectstorage/transfer"
)
const (
@@ -128,18 +127,3 @@ func useBYOKCopyObject(fs *Fs, request *objectstorage.CopyObjectRequest) {
request.OpcSseCustomerKeySha256 = common.String(fs.opt.SSECustomerKeySha256)
}
}
func useBYOKUpload(fs *Fs, request *transfer.UploadRequest) {
if fs.opt.SSEKMSKeyID != "" {
request.OpcSseKmsKeyId = common.String(fs.opt.SSEKMSKeyID)
}
if fs.opt.SSECustomerAlgorithm != "" {
request.OpcSseCustomerAlgorithm = common.String(fs.opt.SSECustomerAlgorithm)
}
if fs.opt.SSECustomerKey != "" {
request.OpcSseCustomerKey = common.String(fs.opt.SSECustomerKey)
}
if fs.opt.SSECustomerKeySha256 != "" {
request.OpcSseCustomerKeySha256 = common.String(fs.opt.SSECustomerKeySha256)
}
}

View File

@@ -6,6 +6,7 @@ package oracleobjectstorage
import (
"context"
"fmt"
"sort"
"strings"
"time"
@@ -196,6 +197,32 @@ func (f *Fs) listMultipartUploadsAll(ctx context.Context) (uploadsMap map[string
// for "dir" and it returns "dirKey"
func (f *Fs) listMultipartUploads(ctx context.Context, bucketName, directory string) (
uploads []*objectstorage.MultipartUpload, err error) {
return f.listMultipartUploadsObject(ctx, bucketName, directory, false)
}
// listMultipartUploads finds first outstanding multipart uploads for (bucket, key)
//
// Note that rather lazily we treat key as a prefix, so it matches
// directories and objects. This could surprise the user if they ask
// for "dir" and it returns "dirKey"
func (f *Fs) findLatestMultipartUpload(ctx context.Context, bucketName, directory string) (
uploads []*objectstorage.MultipartUpload, err error) {
pastUploads, err := f.listMultipartUploadsObject(ctx, bucketName, directory, true)
if err != nil {
return nil, err
}
if len(pastUploads) > 0 {
sort.Slice(pastUploads, func(i, j int) bool {
return pastUploads[i].TimeCreated.After(pastUploads[j].TimeCreated.Time)
})
return pastUploads[:1], nil
}
return nil, err
}
func (f *Fs) listMultipartUploadsObject(ctx context.Context, bucketName, directory string, exact bool) (
uploads []*objectstorage.MultipartUpload, err error) {
uploads = []*objectstorage.MultipartUpload{}
req := objectstorage.ListMultipartUploadsRequest{
@@ -217,7 +244,13 @@ func (f *Fs) listMultipartUploads(ctx context.Context, bucketName, directory str
if directory != "" && item.Object != nil && !strings.HasPrefix(*item.Object, directory) {
continue
}
uploads = append(uploads, &response.Items[index])
if exact {
if *item.Object == directory {
uploads = append(uploads, &response.Items[index])
}
} else {
uploads = append(uploads, &response.Items[index])
}
}
if response.OpcNextPage == nil {
break
@@ -226,3 +259,34 @@ func (f *Fs) listMultipartUploads(ctx context.Context, bucketName, directory str
}
return uploads, nil
}
func (f *Fs) listMultipartUploadParts(ctx context.Context, bucketName, bucketPath string, uploadID string) (
uploadedParts map[int]objectstorage.MultipartUploadPartSummary, err error) {
uploadedParts = make(map[int]objectstorage.MultipartUploadPartSummary)
req := objectstorage.ListMultipartUploadPartsRequest{
NamespaceName: common.String(f.opt.Namespace),
BucketName: common.String(bucketName),
ObjectName: common.String(bucketPath),
UploadId: common.String(uploadID),
Limit: common.Int(1000),
}
var response objectstorage.ListMultipartUploadPartsResponse
for {
err = f.pacer.Call(func() (bool, error) {
response, err = f.srv.ListMultipartUploadParts(ctx, req)
return shouldRetry(ctx, response.HTTPResponse(), err)
})
if err != nil {
return uploadedParts, err
}
for _, item := range response.Items {
uploadedParts[*item.PartNumber] = item
}
if response.OpcNextPage == nil {
break
}
req.Page = response.OpcNextPage
}
return uploadedParts, nil
}

View File

@@ -0,0 +1,441 @@
//go:build !plan9 && !solaris && !js
// +build !plan9,!solaris,!js
package oracleobjectstorage
import (
"context"
"crypto/md5"
"encoding/base64"
"encoding/hex"
"fmt"
"io"
"strings"
"sync"
"time"
"github.com/ncw/swift/v2"
"github.com/rclone/rclone/lib/multipart"
"github.com/rclone/rclone/lib/pool"
"golang.org/x/net/http/httpguts"
"github.com/oracle/oci-go-sdk/v65/common"
"github.com/oracle/oci-go-sdk/v65/objectstorage"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/chunksize"
"github.com/rclone/rclone/fs/hash"
)
var warnStreamUpload sync.Once
// Info needed for an upload
type uploadInfo struct {
req *objectstorage.PutObjectRequest
md5sumHex string
}
type objectChunkWriter struct {
chunkSize int64
size int64
f *Fs
bucket *string
key *string
uploadID *string
partsToCommit []objectstorage.CommitMultipartUploadPartDetails
partsToCommitMu sync.Mutex
existingParts map[int]objectstorage.MultipartUploadPartSummary
eTag string
md5sMu sync.Mutex
md5s []byte
ui uploadInfo
o *Object
}
func (o *Object) uploadMultipart(ctx context.Context, src fs.ObjectInfo, in io.Reader, options ...fs.OpenOption) error {
_, err := multipart.UploadMultipart(ctx, src, in, multipart.UploadMultipartOptions{
Open: o.fs,
OpenOptions: options,
})
return err
}
// OpenChunkWriter returns the chunk size and a ChunkWriter
//
// Pass in the remote and the src object
// You can also use options to hint at the desired chunk size
func (f *Fs) OpenChunkWriter(
ctx context.Context,
remote string,
src fs.ObjectInfo,
options ...fs.OpenOption) (info fs.ChunkWriterInfo, writer fs.ChunkWriter, err error) {
// Temporary Object under construction
o := &Object{
fs: f,
remote: remote,
}
ui, err := o.prepareUpload(ctx, src, options)
if err != nil {
return info, nil, fmt.Errorf("failed to prepare upload: %w", err)
}
uploadParts := f.opt.MaxUploadParts
if uploadParts < 1 {
uploadParts = 1
} else if uploadParts > maxUploadParts {
uploadParts = maxUploadParts
}
size := src.Size()
// calculate size of parts
chunkSize := f.opt.ChunkSize
// size can be -1 here meaning we don't know the size of the incoming file. We use ChunkSize
// buffers here (default 5 MiB). With a maximum number of parts (10,000) this will be a file of
// 48 GiB which seems like a not too unreasonable limit.
if size == -1 {
warnStreamUpload.Do(func() {
fs.Logf(f, "Streaming uploads using chunk size %v will have maximum file size of %v",
f.opt.ChunkSize, fs.SizeSuffix(int64(chunkSize)*int64(uploadParts)))
})
} else {
chunkSize = chunksize.Calculator(src, size, uploadParts, chunkSize)
}
uploadID, existingParts, err := o.createMultipartUpload(ctx, ui.req)
if err != nil {
return info, nil, fmt.Errorf("create multipart upload request failed: %w", err)
}
bucketName, bucketPath := o.split()
chunkWriter := &objectChunkWriter{
chunkSize: int64(chunkSize),
size: size,
f: f,
bucket: &bucketName,
key: &bucketPath,
uploadID: &uploadID,
existingParts: existingParts,
ui: ui,
o: o,
}
info = fs.ChunkWriterInfo{
ChunkSize: int64(chunkSize),
Concurrency: o.fs.opt.UploadConcurrency,
LeavePartsOnError: o.fs.opt.LeavePartsOnError,
}
fs.Debugf(o, "open chunk writer: started multipart upload: %v", uploadID)
return info, chunkWriter, err
}
// WriteChunk will write chunk number with reader bytes, where chunk number >= 0
func (w *objectChunkWriter) WriteChunk(ctx context.Context, chunkNumber int, reader io.ReadSeeker) (bytesWritten int64, err error) {
if chunkNumber < 0 {
err := fmt.Errorf("invalid chunk number provided: %v", chunkNumber)
return -1, err
}
// Only account after the checksum reads have been done
if do, ok := reader.(pool.DelayAccountinger); ok {
// To figure out this number, do a transfer and if the accounted size is 0 or a
// multiple of what it should be, increase or decrease this number.
do.DelayAccounting(2)
}
m := md5.New()
currentChunkSize, err := io.Copy(m, reader)
if err != nil {
return -1, err
}
// If no data read, don't write the chunk
if currentChunkSize == 0 {
return 0, nil
}
md5sumBinary := m.Sum([]byte{})
w.addMd5(&md5sumBinary, int64(chunkNumber))
md5sum := base64.StdEncoding.EncodeToString(md5sumBinary[:])
// Object storage requires 1 <= PartNumber <= 10000
ossPartNumber := chunkNumber + 1
if existing, ok := w.existingParts[ossPartNumber]; ok {
if md5sum == *existing.Md5 {
fs.Debugf(w.o, "matched uploaded part found, part num %d, skipping part, md5=%v", *existing.PartNumber, md5sum)
w.addCompletedPart(existing.PartNumber, existing.Etag)
return currentChunkSize, nil
}
}
req := objectstorage.UploadPartRequest{
NamespaceName: common.String(w.f.opt.Namespace),
BucketName: w.bucket,
ObjectName: w.key,
UploadId: w.uploadID,
UploadPartNum: common.Int(ossPartNumber),
ContentLength: common.Int64(currentChunkSize),
ContentMD5: common.String(md5sum),
}
w.o.applyPartUploadOptions(w.ui.req, &req)
var resp objectstorage.UploadPartResponse
err = w.f.pacer.Call(func() (bool, error) {
// req.UploadPartBody = io.NopCloser(bytes.NewReader(buf))
// rewind the reader on retry and after reading md5
_, err = reader.Seek(0, io.SeekStart)
if err != nil {
return false, err
}
req.UploadPartBody = io.NopCloser(reader)
resp, err = w.f.srv.UploadPart(ctx, req)
if err != nil {
if ossPartNumber <= 8 {
return shouldRetry(ctx, resp.HTTPResponse(), err)
}
// retry all chunks once have done the first few
return true, err
}
return false, err
})
if err != nil {
fs.Errorf(w.o, "multipart upload failed to upload part:%d err: %v", ossPartNumber, err)
return -1, fmt.Errorf("multipart upload failed to upload part: %w", err)
}
w.addCompletedPart(&ossPartNumber, resp.ETag)
return currentChunkSize, err
}
// add a part number and etag to the completed parts
func (w *objectChunkWriter) addCompletedPart(partNum *int, eTag *string) {
w.partsToCommitMu.Lock()
defer w.partsToCommitMu.Unlock()
w.partsToCommit = append(w.partsToCommit, objectstorage.CommitMultipartUploadPartDetails{
PartNum: partNum,
Etag: eTag,
})
}
func (w *objectChunkWriter) Close(ctx context.Context) (err error) {
req := objectstorage.CommitMultipartUploadRequest{
NamespaceName: common.String(w.f.opt.Namespace),
BucketName: w.bucket,
ObjectName: w.key,
UploadId: w.uploadID,
}
req.PartsToCommit = w.partsToCommit
var resp objectstorage.CommitMultipartUploadResponse
err = w.f.pacer.Call(func() (bool, error) {
resp, err = w.f.srv.CommitMultipartUpload(ctx, req)
// if multipart is corrupted, we will abort the uploadId
if isMultiPartUploadCorrupted(err) {
fs.Debugf(w.o, "multipart uploadId %v is corrupted, aborting...", *w.uploadID)
_ = w.Abort(ctx)
return false, err
}
return shouldRetry(ctx, resp.HTTPResponse(), err)
})
if err != nil {
return err
}
w.eTag = *resp.ETag
hashOfHashes := md5.Sum(w.md5s)
wantMultipartMd5 := fmt.Sprintf("%s-%d", base64.StdEncoding.EncodeToString(hashOfHashes[:]), len(w.partsToCommit))
gotMultipartMd5 := *resp.OpcMultipartMd5
if wantMultipartMd5 != gotMultipartMd5 {
fs.Errorf(w.o, "multipart upload corrupted: multipart md5 differ: expecting %s but got %s", wantMultipartMd5, gotMultipartMd5)
return fmt.Errorf("multipart upload corrupted: md5 differ: expecting %s but got %s", wantMultipartMd5, gotMultipartMd5)
}
fs.Debugf(w.o, "multipart upload %v md5 matched: expecting %s and got %s", *w.uploadID, wantMultipartMd5, gotMultipartMd5)
return nil
}
func isMultiPartUploadCorrupted(err error) bool {
if err == nil {
return false
}
// Check if this oci-err object, and if it is multipart commit error
if ociError, ok := err.(common.ServiceError); ok {
// If it is a timeout then we want to retry that
if ociError.GetCode() == "InvalidUploadPart" {
return true
}
}
return false
}
func (w *objectChunkWriter) Abort(ctx context.Context) error {
fs.Debugf(w.o, "Cancelling multipart upload")
err := w.o.fs.abortMultiPartUpload(
ctx,
w.bucket,
w.key,
w.uploadID)
if err != nil {
fs.Debugf(w.o, "Failed to cancel multipart upload: %v", err)
} else {
fs.Debugf(w.o, "canceled and aborted multipart upload: %v", *w.uploadID)
}
return err
}
// addMd5 adds a binary md5 to the md5 calculated so far
func (w *objectChunkWriter) addMd5(md5binary *[]byte, chunkNumber int64) {
w.md5sMu.Lock()
defer w.md5sMu.Unlock()
start := chunkNumber * md5.Size
end := start + md5.Size
if extend := end - int64(len(w.md5s)); extend > 0 {
w.md5s = append(w.md5s, make([]byte, extend)...)
}
copy(w.md5s[start:end], (*md5binary)[:])
}
func (o *Object) prepareUpload(ctx context.Context, src fs.ObjectInfo, options []fs.OpenOption) (ui uploadInfo, err error) {
bucket, bucketPath := o.split()
ui.req = &objectstorage.PutObjectRequest{
NamespaceName: common.String(o.fs.opt.Namespace),
BucketName: common.String(bucket),
ObjectName: common.String(bucketPath),
}
// Set the mtime in the metadata
modTime := src.ModTime(ctx)
// Fetch metadata if --metadata is in use
meta, err := fs.GetMetadataOptions(ctx, src, options)
if err != nil {
return ui, fmt.Errorf("failed to read metadata from source object: %w", err)
}
ui.req.OpcMeta = make(map[string]string, len(meta)+2)
// merge metadata into request and user metadata
for k, v := range meta {
pv := common.String(v)
k = strings.ToLower(k)
switch k {
case "cache-control":
ui.req.CacheControl = pv
case "content-disposition":
ui.req.ContentDisposition = pv
case "content-encoding":
ui.req.ContentEncoding = pv
case "content-language":
ui.req.ContentLanguage = pv
case "content-type":
ui.req.ContentType = pv
case "tier":
// ignore
case "mtime":
// mtime in meta overrides source ModTime
metaModTime, err := time.Parse(time.RFC3339Nano, v)
if err != nil {
fs.Debugf(o, "failed to parse metadata %s: %q: %v", k, v, err)
} else {
modTime = metaModTime
}
case "btime":
// write as metadata since we can't set it
ui.req.OpcMeta[k] = v
default:
ui.req.OpcMeta[k] = v
}
}
// Set the mtime in the metadata
ui.req.OpcMeta[metaMtime] = swift.TimeToFloatString(modTime)
// read the md5sum if available
// - for non-multipart
// - so we can add a ContentMD5
// - so we can add the md5sum in the metadata as metaMD5Hash if using SSE/SSE-C
// - for multipart provided checksums aren't disabled
// - so we can add the md5sum in the metadata as metaMD5Hash
size := src.Size()
isMultipart := size < 0 || size >= int64(o.fs.opt.UploadCutoff)
var md5sumBase64 string
if !isMultipart || !o.fs.opt.DisableChecksum {
ui.md5sumHex, err = src.Hash(ctx, hash.MD5)
if err == nil && matchMd5.MatchString(ui.md5sumHex) {
hashBytes, err := hex.DecodeString(ui.md5sumHex)
if err == nil {
md5sumBase64 = base64.StdEncoding.EncodeToString(hashBytes)
if isMultipart && !o.fs.opt.DisableChecksum {
// Set the md5sum as metadata on the object if
// - a multipart upload
// - the ETag is not an MD5, e.g. when using SSE/SSE-C
// provided checksums aren't disabled
ui.req.OpcMeta[metaMD5Hash] = md5sumBase64
}
}
}
}
// Set the content type if it isn't set already
if ui.req.ContentType == nil {
ui.req.ContentType = common.String(fs.MimeType(ctx, src))
}
if size >= 0 {
ui.req.ContentLength = common.Int64(size)
}
if md5sumBase64 != "" {
ui.req.ContentMD5 = &md5sumBase64
}
o.applyPutOptions(ui.req, options...)
useBYOKPutObject(o.fs, ui.req)
if o.fs.opt.StorageTier != "" {
storageTier, ok := objectstorage.GetMappingPutObjectStorageTierEnum(o.fs.opt.StorageTier)
if !ok {
return ui, fmt.Errorf("not a valid storage tier: %v", o.fs.opt.StorageTier)
}
ui.req.StorageTier = storageTier
}
// Check metadata keys and values are valid
for key, value := range ui.req.OpcMeta {
if !httpguts.ValidHeaderFieldName(key) {
fs.Errorf(o, "Dropping invalid metadata key %q", key)
delete(ui.req.OpcMeta, key)
} else if value == "" {
fs.Errorf(o, "Dropping nil metadata value for key %q", key)
delete(ui.req.OpcMeta, key)
} else if !httpguts.ValidHeaderFieldValue(value) {
fs.Errorf(o, "Dropping invalid metadata value %q for key %q", value, key)
delete(ui.req.OpcMeta, key)
}
}
return ui, nil
}
func (o *Object) createMultipartUpload(ctx context.Context, putReq *objectstorage.PutObjectRequest) (
uploadID string, existingParts map[int]objectstorage.MultipartUploadPartSummary, err error) {
bucketName, bucketPath := o.split()
f := o.fs
if f.opt.AttemptResumeUpload {
fs.Debugf(o, "attempting to resume upload for %v (if any)", o.remote)
resumeUploads, err := o.fs.findLatestMultipartUpload(ctx, bucketName, bucketPath)
if err == nil && len(resumeUploads) > 0 {
uploadID = *resumeUploads[0].UploadId
existingParts, err = f.listMultipartUploadParts(ctx, bucketName, bucketPath, uploadID)
if err == nil {
fs.Debugf(o, "resuming with existing upload id: %v", uploadID)
return uploadID, existingParts, err
}
}
}
req := objectstorage.CreateMultipartUploadRequest{
NamespaceName: common.String(o.fs.opt.Namespace),
BucketName: common.String(bucketName),
}
req.Object = common.String(bucketPath)
if o.fs.opt.StorageTier != "" {
storageTier, ok := objectstorage.GetMappingStorageTierEnum(o.fs.opt.StorageTier)
if !ok {
return "", nil, fmt.Errorf("not a valid storage tier: %v", o.fs.opt.StorageTier)
}
req.StorageTier = storageTier
}
o.applyMultipartUploadOptions(putReq, &req)
var resp objectstorage.CreateMultipartUploadResponse
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.CreateMultipartUpload(ctx, req)
return shouldRetry(ctx, resp.HTTPResponse(), err)
})
if err != nil {
return "", existingParts, err
}
existingParts = make(map[int]objectstorage.MultipartUploadPartSummary)
uploadID = *resp.UploadId
fs.Debugf(o, "created new upload id: %v", uploadID)
return uploadID, existingParts, err
}

View File

@@ -4,12 +4,14 @@
package oracleobjectstorage
import (
"bytes"
"context"
"encoding/base64"
"encoding/hex"
"fmt"
"io"
"net/http"
"os"
"regexp"
"strconv"
"strings"
@@ -18,10 +20,8 @@ import (
"github.com/ncw/swift/v2"
"github.com/oracle/oci-go-sdk/v65/common"
"github.com/oracle/oci-go-sdk/v65/objectstorage"
"github.com/oracle/oci-go-sdk/v65/objectstorage/transfer"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/atexit"
)
// ------------------------------------------------------------
@@ -367,9 +367,28 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadClo
return resp.HTTPResponse().Body, nil
}
func isZeroLength(streamReader io.Reader) bool {
switch v := streamReader.(type) {
case *bytes.Buffer:
return v.Len() == 0
case *bytes.Reader:
return v.Len() == 0
case *strings.Reader:
return v.Len() == 0
case *os.File:
fi, err := v.Stat()
if err != nil {
return false
}
return fi.Size() == 0
default:
return false
}
}
// Update an object if it has changed
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
bucketName, bucketPath := o.split()
bucketName, _ := o.split()
err = o.fs.makeBucket(ctx, bucketName)
if err != nil {
return err
@@ -377,142 +396,24 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// determine if we like upload single or multipart.
size := src.Size()
multipart := size >= int64(o.fs.opt.UploadCutoff)
// Set the mtime in the metadata
modTime := src.ModTime(ctx)
metadata := map[string]string{
metaMtime: swift.TimeToFloatString(modTime),
multipart := size < 0 || size >= int64(o.fs.opt.UploadCutoff)
if isZeroLength(in) {
multipart = false
}
// read the md5sum if available
// - for non-multipart
// - so we can add a ContentMD5
// - so we can add the md5sum in the metadata as metaMD5Hash if using SSE/SSE-C
// - for multipart provided checksums aren't disabled
// - so we can add the md5sum in the metadata as metaMD5Hash
var md5sumBase64 string
var md5sumHex string
if !multipart || !o.fs.opt.DisableChecksum {
md5sumHex, err = src.Hash(ctx, hash.MD5)
if err == nil && matchMd5.MatchString(md5sumHex) {
hashBytes, err := hex.DecodeString(md5sumHex)
if err == nil {
md5sumBase64 = base64.StdEncoding.EncodeToString(hashBytes)
if multipart && !o.fs.opt.DisableChecksum {
// Set the md5sum as metadata on the object if
// - a multipart upload
// - the ETag is not an MD5, e.g. when using SSE/SSE-C
// provided checksums aren't disabled
metadata[metaMD5Hash] = md5sumBase64
}
}
}
}
// Guess the content type
mimeType := fs.MimeType(ctx, src)
if multipart {
chunkSize := int64(o.fs.opt.ChunkSize)
uploadRequest := transfer.UploadRequest{
NamespaceName: common.String(o.fs.opt.Namespace),
BucketName: common.String(bucketName),
ObjectName: common.String(bucketPath),
ContentType: common.String(mimeType),
PartSize: common.Int64(chunkSize),
AllowMultipartUploads: common.Bool(true),
AllowParrallelUploads: common.Bool(true),
ObjectStorageClient: o.fs.srv,
EnableMultipartChecksumVerification: common.Bool(!o.fs.opt.DisableChecksum),
NumberOfGoroutines: common.Int(o.fs.opt.UploadConcurrency),
Metadata: metadataWithOpcPrefix(metadata),
}
if o.fs.opt.StorageTier != "" {
storageTier, ok := objectstorage.GetMappingPutObjectStorageTierEnum(o.fs.opt.StorageTier)
if !ok {
return fmt.Errorf("not a valid storage tier: %v", o.fs.opt.StorageTier)
}
uploadRequest.StorageTier = storageTier
}
o.applyMultiPutOptions(&uploadRequest, options...)
useBYOKUpload(o.fs, &uploadRequest)
uploadStreamRequest := transfer.UploadStreamRequest{
UploadRequest: uploadRequest,
StreamReader: in,
}
uploadMgr := transfer.NewUploadManager()
var uploadID = ""
defer atexit.OnError(&err, func() {
if uploadID == "" {
return
}
if o.fs.opt.LeavePartsOnError {
return
}
fs.Debugf(o, "Cancelling multipart upload")
errCancel := o.fs.abortMultiPartUpload(
context.Background(),
bucketName,
bucketPath,
uploadID)
if errCancel != nil {
fs.Debugf(o, "Failed to cancel multipart upload: %v", errCancel)
}
})()
err = o.fs.pacer.Call(func() (bool, error) {
uploadResponse, err := uploadMgr.UploadStream(ctx, uploadStreamRequest)
var httpResponse *http.Response
if err == nil {
if uploadResponse.Type == transfer.MultipartUpload {
if uploadResponse.MultipartUploadResponse != nil {
httpResponse = uploadResponse.MultipartUploadResponse.HTTPResponse()
}
} else {
if uploadResponse.SinglepartUploadResponse != nil {
httpResponse = uploadResponse.SinglepartUploadResponse.HTTPResponse()
}
}
}
if err != nil {
uploadID := ""
if uploadResponse.MultipartUploadResponse != nil && uploadResponse.MultipartUploadResponse.UploadID != nil {
uploadID = *uploadResponse.MultipartUploadResponse.UploadID
fs.Debugf(o, "multipart streaming upload failed, aborting uploadID: %v, may retry", uploadID)
_ = o.fs.abortMultiPartUpload(ctx, bucketName, bucketPath, uploadID)
}
}
return shouldRetry(ctx, httpResponse, err)
})
err = o.uploadMultipart(ctx, src, in, options...)
if err != nil {
fs.Errorf(o, "multipart streaming upload failed %v", err)
return err
}
} else {
req := objectstorage.PutObjectRequest{
NamespaceName: common.String(o.fs.opt.Namespace),
BucketName: common.String(bucketName),
ObjectName: common.String(bucketPath),
ContentType: common.String(mimeType),
PutObjectBody: io.NopCloser(in),
OpcMeta: metadata,
ui, err := o.prepareUpload(ctx, src, options)
if err != nil {
return fmt.Errorf("failed to prepare upload: %w", err)
}
if size >= 0 {
req.ContentLength = common.Int64(size)
}
if o.fs.opt.StorageTier != "" {
storageTier, ok := objectstorage.GetMappingPutObjectStorageTierEnum(o.fs.opt.StorageTier)
if !ok {
return fmt.Errorf("not a valid storage tier: %v", o.fs.opt.StorageTier)
}
req.StorageTier = storageTier
}
o.applyPutOptions(&req, options...)
useBYOKPutObject(o.fs, &req)
var resp objectstorage.PutObjectResponse
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.PutObject(ctx, req)
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
ui.req.PutObjectBody = io.NopCloser(in)
resp, err = o.fs.srv.PutObject(ctx, *ui.req)
return shouldRetry(ctx, resp.HTTPResponse(), err)
})
if err != nil {
@@ -591,28 +492,24 @@ func (o *Object) applyGetObjectOptions(req *objectstorage.GetObjectRequest, opti
}
}
func (o *Object) applyMultiPutOptions(req *transfer.UploadRequest, options ...fs.OpenOption) {
// Apply upload options
for _, option := range options {
key, value := option.Header()
lowerKey := strings.ToLower(key)
switch lowerKey {
case "":
// ignore
case "content-encoding":
req.ContentEncoding = common.String(value)
case "content-language":
req.ContentLanguage = common.String(value)
case "content-type":
req.ContentType = common.String(value)
default:
if strings.HasPrefix(lowerKey, ociMetaPrefix) {
req.Metadata[lowerKey] = value
} else {
fs.Errorf(o, "Don't know how to set key %q on upload", key)
}
}
}
func (o *Object) applyMultipartUploadOptions(putReq *objectstorage.PutObjectRequest, req *objectstorage.CreateMultipartUploadRequest) {
req.ContentType = putReq.ContentType
req.ContentLanguage = putReq.ContentLanguage
req.ContentEncoding = putReq.ContentEncoding
req.ContentDisposition = putReq.ContentDisposition
req.CacheControl = putReq.CacheControl
req.Metadata = metadataWithOpcPrefix(putReq.OpcMeta)
req.OpcSseCustomerAlgorithm = putReq.OpcSseCustomerAlgorithm
req.OpcSseCustomerKey = putReq.OpcSseCustomerKey
req.OpcSseCustomerKeySha256 = putReq.OpcSseCustomerKeySha256
req.OpcSseKmsKeyId = putReq.OpcSseKmsKeyId
}
func (o *Object) applyPartUploadOptions(putReq *objectstorage.PutObjectRequest, req *objectstorage.UploadPartRequest) {
req.OpcSseCustomerAlgorithm = putReq.OpcSseCustomerAlgorithm
req.OpcSseCustomerKey = putReq.OpcSseCustomerKey
req.OpcSseCustomerKeySha256 = putReq.OpcSseCustomerKeySha256
req.OpcSseKmsKeyId = putReq.OpcSseKmsKeyId
}
func metadataWithOpcPrefix(src map[string]string) map[string]string {

View File

@@ -13,9 +13,10 @@ import (
const (
maxSizeForCopy = 4768 * 1024 * 1024
minChunkSize = fs.SizeSuffix(1024 * 1024 * 5)
defaultUploadCutoff = fs.SizeSuffix(200 * 1024 * 1024)
maxUploadParts = 10000
defaultUploadConcurrency = 10
minChunkSize = fs.SizeSuffix(5 * 1024 * 1024)
defaultUploadCutoff = fs.SizeSuffix(200 * 1024 * 1024)
maxUploadCutoff = fs.SizeSuffix(5 * 1024 * 1024 * 1024)
minSleep = 10 * time.Millisecond
defaultCopyTimeoutDuration = fs.Duration(time.Minute)
@@ -55,12 +56,14 @@ type Options struct {
ConfigProfile string `config:"config_profile"`
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
ChunkSize fs.SizeSuffix `config:"chunk_size"`
MaxUploadParts int `config:"max_upload_parts"`
UploadConcurrency int `config:"upload_concurrency"`
DisableChecksum bool `config:"disable_checksum"`
CopyCutoff fs.SizeSuffix `config:"copy_cutoff"`
CopyTimeout fs.Duration `config:"copy_timeout"`
StorageTier string `config:"storage_tier"`
LeavePartsOnError bool `config:"leave_parts_on_error"`
AttemptResumeUpload bool `config:"attempt_resume_upload"`
NoCheckBucket bool `config:"no_check_bucket"`
SSEKMSKeyID string `config:"sse_kms_key_id"`
SSECustomerAlgorithm string `config:"sse_customer_algorithm"`
@@ -92,14 +95,16 @@ func newOptions() []fs.Option {
Help: noAuthHelpText,
}},
}, {
Name: "namespace",
Help: "Object storage namespace",
Required: true,
Name: "namespace",
Help: "Object storage namespace",
Required: true,
Sensitive: true,
}, {
Name: "compartment",
Help: "Object storage compartment OCID",
Provider: "!no_auth",
Required: true,
Name: "compartment",
Help: "Object storage compartment OCID",
Provider: "!no_auth",
Required: true,
Sensitive: true,
}, {
Name: "region",
Help: "Object storage Region",
@@ -155,9 +160,8 @@ The minimum is 0 and the maximum is 5 GiB.`,
Help: `Chunk size to use for uploading.
When uploading files larger than upload_cutoff or files with unknown
size (e.g. from "rclone rcat" or uploaded with "rclone mount" or google
photos or google docs) they will be uploaded as multipart uploads
using this chunk size.
size (e.g. from "rclone rcat" or uploaded with "rclone mount" they will be uploaded
as multipart uploads using this chunk size.
Note that "upload_concurrency" chunks of this size are buffered
in memory per transfer.
@@ -179,6 +183,20 @@ statistics displayed with "-P" flag.
`,
Default: minChunkSize,
Advanced: true,
}, {
Name: "max_upload_parts",
Help: `Maximum number of parts in a multipart upload.
This option defines the maximum number of multipart chunks to use
when doing a multipart upload.
OCI has max parts limit of 10,000 chunks.
Rclone will automatically increase the chunk size when uploading a
large file of a known size to stay below this number of chunks limit.
`,
Default: maxUploadParts,
Advanced: true,
}, {
Name: "upload_concurrency",
Help: `Concurrency for multipart uploads.
@@ -236,12 +254,24 @@ to start uploading.`,
encoder.EncodeDot,
}, {
Name: "leave_parts_on_error",
Help: `If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery.
Help: `If true avoid calling abort upload on a failure, leaving all successfully uploaded parts for manual recovery.
It should be set to true for resuming uploads across different sessions.
WARNING: Storing parts of an incomplete multipart upload counts towards space usage on object storage and will add
additional costs if not cleaned up.
`,
Default: false,
Advanced: true,
}, {
Name: "attempt_resume_upload",
Help: `If true attempt to resume previously started multipart upload for the object.
This will be helpful to speed up multipart transfers by resuming uploads from past session.
WARNING: If chunk size differs in resumed session from past incomplete session, then the resumed multipart upload is
aborted and a new multipart upload is started with the new chunk size.
The flag leave_parts_on_error must be true to resume and optimize to skip parts that were already uploaded successfully.
`,
Default: false,
Advanced: true,
@@ -289,7 +319,7 @@ Server-Side Encryption (https://docs.cloud.oracle.com/Content/Object/Tasks/using
}},
}, {
Name: "sse_kms_key_id",
Help: `if using using your own master key in vault, this header specifies the
Help: `if using your own master key in vault, this header specifies the
OCID (https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of a master encryption key used to call
the Key Management service to generate a data encryption key or to encrypt or decrypt a data encryption key.
Please note only one of sse_customer_key_file|sse_customer_key|sse_kms_key_id is needed.`,

View File

@@ -318,7 +318,6 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
remote := *object.Name
remote = f.opt.Enc.ToStandardPath(remote)
if !strings.HasPrefix(remote, prefix) {
// fs.Debugf(f, "Odd name received %v", object.Name)
continue
}
remote = remote[len(prefix):]
@@ -558,15 +557,15 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
})
}
func (f *Fs) abortMultiPartUpload(ctx context.Context, bucketName, bucketPath, uploadID string) (err error) {
if uploadID == "" {
func (f *Fs) abortMultiPartUpload(ctx context.Context, bucketName, bucketPath, uploadID *string) (err error) {
if uploadID == nil || *uploadID == "" {
return nil
}
request := objectstorage.AbortMultipartUploadRequest{
NamespaceName: common.String(f.opt.Namespace),
BucketName: common.String(bucketName),
ObjectName: common.String(bucketPath),
UploadId: common.String(uploadID),
BucketName: bucketName,
ObjectName: bucketPath,
UploadId: uploadID,
}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.srv.AbortMultipartUpload(ctx, request)
@@ -589,12 +588,7 @@ func (f *Fs) cleanUpBucket(ctx context.Context, bucket string, maxAge time.Durat
if operations.SkipDestructive(ctx, what, "remove pending upload") {
continue
}
ignoreErr := f.abortMultiPartUpload(ctx, *upload.Bucket, *upload.Object, *upload.UploadId)
if ignoreErr != nil {
// fs.Debugf(f, "ignoring error %s", ignoreErr)
}
} else {
// fs.Debugf(f, "ignoring %s", what)
_ = f.abortMultiPartUpload(ctx, upload.Bucket, upload.Object, upload.UploadId)
}
} else {
fs.Infof(f, "MultipartUpload doesn't have sufficient details to abort.")
@@ -689,12 +683,13 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
// Check the interfaces are satisfied
var (
_ fs.Fs = &Fs{}
_ fs.Copier = &Fs{}
_ fs.PutStreamer = &Fs{}
_ fs.ListRer = &Fs{}
_ fs.Commander = &Fs{}
_ fs.CleanUpper = &Fs{}
_ fs.Fs = &Fs{}
_ fs.Copier = &Fs{}
_ fs.PutStreamer = &Fs{}
_ fs.ListRer = &Fs{}
_ fs.Commander = &Fs{}
_ fs.CleanUpper = &Fs{}
_ fs.OpenChunkWriter = &Fs{}
_ fs.Object = &Object{}
_ fs.MimeTyper = &Object{}

View File

@@ -110,10 +110,11 @@ func init() {
encoder.EncodeBackSlash |
encoder.EncodeInvalidUtf8),
}, {
Name: "root_folder_id",
Help: "Fill in for rclone to use a non root folder as its starting point.",
Default: "d0",
Advanced: true,
Name: "root_folder_id",
Help: "Fill in for rclone to use a non root folder as its starting point.",
Default: "d0",
Advanced: true,
Sensitive: true,
}, {
Name: "hostname",
Help: `Hostname to connect to.
@@ -138,7 +139,8 @@ with rclone authorize.
This is only required when you want to use the cleanup command. Due to a bug
in the pcloud API the required API does not support OAuth authentication so
we have to rely on user password authentication for it.`,
Advanced: true,
Advanced: true,
Sensitive: true,
}, {
Name: "password",
Help: "Your pcloud password.",

536
backend/pikpak/api/types.go Normal file
View File

@@ -0,0 +1,536 @@
// Package api has type definitions for pikpak
//
// Manually obtained from the API responses using Browse Dev. Tool and https://mholt.github.io/json-to-go/
package api
import (
"fmt"
"reflect"
"strconv"
"time"
)
const (
// "2022-09-17T14:31:06.056+08:00"
timeFormat = `"` + time.RFC3339 + `"`
)
// Time represents date and time information for the pikpak API, by using RFC3339
type Time time.Time
// MarshalJSON turns a Time into JSON (in UTC)
func (t *Time) MarshalJSON() (out []byte, err error) {
timeString := (*time.Time)(t).Format(timeFormat)
return []byte(timeString), nil
}
// UnmarshalJSON turns JSON into a Time
func (t *Time) UnmarshalJSON(data []byte) error {
if string(data) == "null" || string(data) == `""` {
return nil
}
newT, err := time.Parse(timeFormat, string(data))
if err != nil {
return err
}
*t = Time(newT)
return nil
}
// Types of things in Item
const (
KindOfFolder = "drive#folder"
KindOfFile = "drive#file"
KindOfFileList = "drive#fileList"
KindOfResumable = "drive#resumable"
KindOfForm = "drive#form"
ThumbnailSizeS = "SIZE_SMALL"
ThumbnailSizeM = "SIZE_MEDIUM"
ThumbnailSizeL = "SIZE_LARGE"
PhaseTypeComplete = "PHASE_TYPE_COMPLETE"
PhaseTypeRunning = "PHASE_TYPE_RUNNING"
PhaseTypeError = "PHASE_TYPE_ERROR"
PhaseTypePending = "PHASE_TYPE_PENDING"
UploadTypeForm = "UPLOAD_TYPE_FORM"
UploadTypeResumable = "UPLOAD_TYPE_RESUMABLE"
ListLimit = 100
)
// ------------------------------------------------------------
// Error details api error from pikpak
type Error struct {
Reason string `json:"error"` // short description of the reason, e.g. "file_name_empty" "invalid_request"
Code int `json:"error_code"`
URL string `json:"error_url,omitempty"`
Message string `json:"error_description,omitempty"`
// can have either of `error_details` or `details``
ErrorDetails []*ErrorDetails `json:"error_details,omitempty"`
Details []*ErrorDetails `json:"details,omitempty"`
}
// ErrorDetails contains further details of api error
type ErrorDetails struct {
Type string `json:"@type,omitempty"`
Reason string `json:"reason,omitempty"`
Domain string `json:"domain,omitempty"`
Metadata struct {
} `json:"metadata,omitempty"` // TODO: undiscovered yet
Locale string `json:"locale,omitempty"` // e.g. "en"
Message string `json:"message,omitempty"`
StackEntries []interface{} `json:"stack_entries,omitempty"` // TODO: undiscovered yet
Detail string `json:"detail,omitempty"`
}
// Error returns a string for the error and satisfies the error interface
func (e *Error) Error() string {
out := fmt.Sprintf("Error %q (%d)", e.Reason, e.Code)
if e.Message != "" {
out += ": " + e.Message
}
return out
}
// Check Error satisfies the error interface
var _ error = (*Error)(nil)
// ------------------------------------------------------------
// Filters contains parameters for filters when listing.
//
// possible operators
// * in: a list of comma-separated string
// * eq: "true" or "false"
// * gt or lt: time format string, e.g. "2023-01-28T10:56:49.757+08:00"
type Filters struct {
Phase map[string]string `json:"phase,omitempty"` // "in" or "eq"
Trashed map[string]bool `json:"trashed,omitempty"` // "eq"
Kind map[string]string `json:"kind,omitempty"` // "eq"
Starred map[string]bool `json:"starred,omitempty"` // "eq"
ModifiedTime map[string]string `json:"modified_time,omitempty"` // "gt" or "lt"
}
// Set sets filter values using field name, operator and corresponding value
func (f *Filters) Set(field, operator, value string) {
if value == "" {
// UNSET for empty values
return
}
r := reflect.ValueOf(f)
fd := reflect.Indirect(r).FieldByName(field)
if v, err := strconv.ParseBool(value); err == nil {
fd.Set(reflect.ValueOf(map[string]bool{operator: v}))
} else {
fd.Set(reflect.ValueOf(map[string]string{operator: value}))
}
}
// ------------------------------------------------------------
// Common Elements
// Link contains a download URL for opening files
type Link struct {
URL string `json:"url"`
Token string `json:"token"`
Expire Time `json:"expire"`
Type string `json:"type,omitempty"`
}
// Valid reports whether l is non-nil, has an URL, and is not expired.
func (l *Link) Valid() bool {
return l != nil && l.URL != "" && time.Now().Add(10*time.Second).Before(time.Time(l.Expire))
}
// URL is a basic form of URL
type URL struct {
Kind string `json:"kind,omitempty"` // e.g. "upload#url"
URL string `json:"url,omitempty"`
}
// ------------------------------------------------------------
// Base Elements
// FileList contains a list of File elements
type FileList struct {
Kind string `json:"kind,omitempty"` // drive#fileList
Files []*File `json:"files,omitempty"`
NextPageToken string `json:"next_page_token"`
Version string `json:"version,omitempty"`
VersionOutdated bool `json:"version_outdated,omitempty"`
}
// File is a basic element representing a single file object
//
// There are two types of download links,
// 1) one from File.WebContentLink or File.Links.ApplicationOctetStream.URL and
// 2) the other from File.Medias[].Link.URL.
// Empirically, 2) is less restrictive to multiple concurrent range-requests
// for a single file, i.e. supports for higher `--multi-thread-streams=N`.
// However, it is not generally applicable as it is only for meadia.
type File struct {
Apps []*FileApp `json:"apps,omitempty"`
Audit *FileAudit `json:"audit,omitempty"`
Collection string `json:"collection,omitempty"` // TODO
CreatedTime Time `json:"created_time,omitempty"`
DeleteTime Time `json:"delete_time,omitempty"`
FileCategory string `json:"file_category,omitempty"`
FileExtension string `json:"file_extension,omitempty"`
FolderType string `json:"folder_type,omitempty"`
Hash string `json:"hash,omitempty"` // sha1 but NOT a valid file hash. looks like a torrent hash
IconLink string `json:"icon_link,omitempty"`
ID string `json:"id,omitempty"`
Kind string `json:"kind,omitempty"` // "drive#file"
Links *FileLinks `json:"links,omitempty"`
Md5Checksum string `json:"md5_checksum,omitempty"`
Medias []*Media `json:"medias,omitempty"`
MimeType string `json:"mime_type,omitempty"`
ModifiedTime Time `json:"modified_time,omitempty"` // updated when renamed or moved
Name string `json:"name,omitempty"`
OriginalFileIndex int `json:"original_file_index,omitempty"` // TODO
OriginalURL string `json:"original_url,omitempty"`
Params *FileParams `json:"params,omitempty"`
ParentID string `json:"parent_id,omitempty"`
Phase string `json:"phase,omitempty"`
Revision int `json:"revision,omitempty,string"`
Size int64 `json:"size,omitempty,string"`
SortName string `json:"sort_name,omitempty"`
Space string `json:"space,omitempty"`
SpellName []interface{} `json:"spell_name,omitempty"` // TODO maybe list of something?
Starred bool `json:"starred,omitempty"`
ThumbnailLink string `json:"thumbnail_link,omitempty"`
Trashed bool `json:"trashed,omitempty"`
UserID string `json:"user_id,omitempty"`
UserModifiedTime Time `json:"user_modified_time,omitempty"`
WebContentLink string `json:"web_content_link,omitempty"`
Writable bool `json:"writable,omitempty"`
}
// FileLinks includes links to file at backend
type FileLinks struct {
ApplicationOctetStream *Link `json:"application/octet-stream,omitempty"`
}
// FileAudit contains audit information for the file
type FileAudit struct {
Status string `json:"status,omitempty"` // "STATUS_OK"
Message string `json:"message,omitempty"`
Title string `json:"title,omitempty"`
}
// Media contains info about supported version of media, e.g. original, transcoded, etc
type Media struct {
MediaID string `json:"media_id,omitempty"`
MediaName string `json:"media_name,omitempty"`
Video struct {
Height int `json:"height,omitempty"`
Width int `json:"width,omitempty"`
Duration int64 `json:"duration,omitempty"`
BitRate int `json:"bit_rate,omitempty"`
FrameRate int `json:"frame_rate,omitempty"`
VideoCodec string `json:"video_codec,omitempty"` // "h264", "hevc"
AudioCodec string `json:"audio_codec,omitempty"` // "pcm_bluray", "aac"
VideoType string `json:"video_type,omitempty"` // "mpegts"
HdrType string `json:"hdr_type,omitempty"`
} `json:"video,omitempty"`
Link *Link `json:"link,omitempty"`
NeedMoreQuota bool `json:"need_more_quota,omitempty"`
VipTypes []interface{} `json:"vip_types,omitempty"` // TODO maybe list of something?
RedirectLink string `json:"redirect_link,omitempty"`
IconLink string `json:"icon_link,omitempty"`
IsDefault bool `json:"is_default,omitempty"`
Priority int `json:"priority,omitempty"`
IsOrigin bool `json:"is_origin,omitempty"`
ResolutionName string `json:"resolution_name,omitempty"`
IsVisible bool `json:"is_visible,omitempty"`
Category string `json:"category,omitempty"`
}
// FileParams includes parameters for instant open
type FileParams struct {
Duration int64 `json:"duration,omitempty,string"` // in seconds
Height int `json:"height,omitempty,string"`
Platform string `json:"platform,omitempty"` // "Upload"
PlatformIcon string `json:"platform_icon,omitempty"`
URL string `json:"url,omitempty"`
Width int `json:"width,omitempty,string"`
}
// FileApp includes parameters for instant open
type FileApp struct {
ID string `json:"id,omitempty"` // "decompress" for rar files
Name string `json:"name,omitempty"` // decompress" for rar files
Access []interface{} `json:"access,omitempty"`
Link string `json:"link,omitempty"` // "https://mypikpak.com/drive/decompression/{File.Id}?gcid={File.Hash}\u0026wv-style=topbar%3Ahide"
RedirectLink string `json:"redirect_link,omitempty"`
VipTypes []interface{} `json:"vip_types,omitempty"`
NeedMoreQuota bool `json:"need_more_quota,omitempty"`
IconLink string `json:"icon_link,omitempty"`
IsDefault bool `json:"is_default,omitempty"`
Params struct {
} `json:"params,omitempty"` // TODO
CategoryIds []interface{} `json:"category_ids,omitempty"`
AdSceneType int `json:"ad_scene_type,omitempty"`
Space string `json:"space,omitempty"`
Links struct {
} `json:"links,omitempty"` // TODO
}
// ------------------------------------------------------------
// TaskList contains a list of Task elements
type TaskList struct {
Tasks []*Task `json:"tasks,omitempty"` // "drive#task"
NextPageToken string `json:"next_page_token"`
ExpiresIn int `json:"expires_in,omitempty"`
}
// Task is a basic element representing a single task such as offline download and upload
type Task struct {
Kind string `json:"kind,omitempty"` // "drive#task"
ID string `json:"id,omitempty"` // task id?
Name string `json:"name,omitempty"` // torrent name?
Type string `json:"type,omitempty"` // "offline"
UserID string `json:"user_id,omitempty"`
Statuses []interface{} `json:"statuses,omitempty"` // TODO
StatusSize int `json:"status_size,omitempty"` // TODO
Params *TaskParams `json:"params,omitempty"` // TODO
FileID string `json:"file_id,omitempty"`
FileName string `json:"file_name,omitempty"`
FileSize string `json:"file_size,omitempty"`
Message string `json:"message,omitempty"` // e.g. "Saving"
CreatedTime Time `json:"created_time,omitempty"`
UpdatedTime Time `json:"updated_time,omitempty"`
ThirdTaskID string `json:"third_task_id,omitempty"` // TODO
Phase string `json:"phase,omitempty"` // e.g. "PHASE_TYPE_RUNNING"
Progress int `json:"progress,omitempty"`
IconLink string `json:"icon_link,omitempty"`
Callback string `json:"callback,omitempty"`
ReferenceResource interface{} `json:"reference_resource,omitempty"` // TODO
Space string `json:"space,omitempty"`
}
// TaskParams includes parameters informing status of Task
type TaskParams struct {
Age string `json:"age,omitempty"`
PredictSpeed string `json:"predict_speed,omitempty"`
PredictType string `json:"predict_type,omitempty"`
URL string `json:"url,omitempty"`
}
// Form contains parameters for upload by multipart/form-data
type Form struct {
Headers struct{} `json:"headers"`
Kind string `json:"kind"` // "drive#form"
Method string `json:"method"` // "POST"
MultiParts struct {
OSSAccessKeyID string `json:"OSSAccessKeyId"`
Signature string `json:"Signature"`
Callback string `json:"callback"`
Key string `json:"key"`
Policy string `json:"policy"`
XUserData string `json:"x:user_data"`
} `json:"multi_parts"`
URL string `json:"url"`
}
// Resumable contains parameters for upload by resumable
type Resumable struct {
Kind string `json:"kind,omitempty"` // "drive#resumable"
Provider string `json:"provider,omitempty"` // e.g. "PROVIDER_ALIYUN"
Params *ResumableParams `json:"params,omitempty"`
}
// ResumableParams specifies resumable paramegers
type ResumableParams struct {
AccessKeyID string `json:"access_key_id,omitempty"`
AccessKeySecret string `json:"access_key_secret,omitempty"`
Bucket string `json:"bucket,omitempty"`
Endpoint string `json:"endpoint,omitempty"`
Expiration Time `json:"expiration,omitempty"`
Key string `json:"key,omitempty"`
SecurityToken string `json:"security_token,omitempty"`
}
// FileInArchive is a basic element in archive
type FileInArchive struct {
Index int `json:"index,omitempty"`
Filename string `json:"filename,omitempty"`
Filesize string `json:"filesize,omitempty"`
MimeType string `json:"mime_type,omitempty"`
Gcid string `json:"gcid,omitempty"`
Kind string `json:"kind,omitempty"`
IconLink string `json:"icon_link,omitempty"`
Path string `json:"path,omitempty"`
}
// ------------------------------------------------------------
// NewFile is a response to RequestNewFile
type NewFile struct {
File *File `json:"file,omitempty"`
Form *Form `json:"form,omitempty"`
Resumable *Resumable `json:"resumable,omitempty"`
Task *Task `json:"task,omitempty"` // null in this case
UploadType string `json:"upload_type,omitempty"` // "UPLOAD_TYPE_FORM" or "UPLOAD_TYPE_RESUMABLE"
}
// NewTask is a response to RequestNewTask
type NewTask struct {
UploadType string `json:"upload_type,omitempty"` // "UPLOAD_TYPE_URL"
File *File `json:"file,omitempty"` // null in this case
Task *Task `json:"task,omitempty"`
URL *URL `json:"url,omitempty"` // {"kind": "upload#url"}
}
// About informs drive status
type About struct {
Kind string `json:"kind,omitempty"` // "drive#about"
Quota *Quota `json:"quota,omitempty"`
ExpiresAt string `json:"expires_at,omitempty"`
Quotas struct {
} `json:"quotas,omitempty"` // maybe []*Quota?
}
// Quota informs drive quota
type Quota struct {
Kind string `json:"kind,omitempty"` // "drive#quota"
Limit int64 `json:"limit,omitempty,string"` // limit in bytes
Usage int64 `json:"usage,omitempty,string"` // bytes in use
UsageInTrash int64 `json:"usage_in_trash,omitempty,string"` // bytes in trash but this seems not working
PlayTimesLimit string `json:"play_times_limit,omitempty"` // maybe in seconds
PlayTimesUsage string `json:"play_times_usage,omitempty"` // maybe in seconds
}
// Share is a response to RequestShare
//
// used in PublicLink()
type Share struct {
ShareID string `json:"share_id,omitempty"`
ShareURL string `json:"share_url,omitempty"`
PassCode string `json:"pass_code,omitempty"`
ShareText string `json:"share_text,omitempty"`
}
// User contains user account information
//
// GET https://user.mypikpak.com/v1/user/me
type User struct {
Sub string `json:"sub,omitempty"` // userid for internal use
Name string `json:"name,omitempty"` // Username
Picture string `json:"picture,omitempty"` // URL to Avatar image
Email string `json:"email,omitempty"` // redacted email address
Providers *[]UserProvider `json:"providers,omitempty"` // OAuth provider
PhoneNumber string `json:"phone_number,omitempty"`
Password string `json:"password,omitempty"` // "SET" if configured
Status string `json:"status,omitempty"` // "ACTIVE"
CreatedAt Time `json:"created_at,omitempty"`
PasswordUpdatedAt Time `json:"password_updated_at,omitempty"`
}
// UserProvider details third-party authentication
type UserProvider struct {
ID string `json:"id,omitempty"` // e.g. "google.com"
ProviderUserID string `json:"provider_user_id,omitempty"`
Name string `json:"name,omitempty"` // username
}
// VIP includes subscription details about premium account
//
// GET https://api-drive.mypikpak.com/drive/v1/privilege/vip
type VIP struct {
Result string `json:"result,omitempty"` // "ACCEPTED"
Message string `json:"message,omitempty"`
RedirectURI string `json:"redirect_uri,omitempty"`
Data struct {
Expire Time `json:"expire,omitempty"`
Status string `json:"status,omitempty"` // "invalid" or "ok"
Type string `json:"type,omitempty"` // "novip" or "platinum"
UserID string `json:"user_id,omitempty"` // same as User.Sub
} `json:"data,omitempty"`
}
// DecompressResult is a response to RequestDecompress
type DecompressResult struct {
Status string `json:"status,omitempty"` // "OK"
StatusText string `json:"status_text,omitempty"`
TaskID string `json:"task_id,omitempty"` // same as File.Id
FilesNum int `json:"files_num,omitempty"` // number of files in archive
RedirectLink string `json:"redirect_link,omitempty"`
}
// ------------------------------------------------------------
// RequestShare is to request for file share
type RequestShare struct {
FileIds []string `json:"file_ids,omitempty"`
ShareTo string `json:"share_to,omitempty"` // "publiclink",
ExpirationDays int `json:"expiration_days,omitempty"` // -1 = 'forever'
PassCodeOption string `json:"pass_code_option,omitempty"` // "NOT_REQUIRED"
}
// RequestBatch is to request for batch actions
type RequestBatch struct {
Ids []string `json:"ids,omitempty"`
To map[string]string `json:"to,omitempty"`
}
// RequestNewFile is to request for creating a new `drive#folder` or `drive#file`
type RequestNewFile struct {
// always required
Kind string `json:"kind"` // "drive#folder" or "drive#file"
Name string `json:"name"`
ParentID string `json:"parent_id"`
FolderType string `json:"folder_type"`
// only when uploading a new file
Hash string `json:"hash,omitempty"` // sha1sum
Resumable map[string]string `json:"resumable,omitempty"` // {"provider": "PROVIDER_ALIYUN"}
Size int64 `json:"size,omitempty"`
UploadType string `json:"upload_type,omitempty"` // "UPLOAD_TYPE_FORM" or "UPLOAD_TYPE_RESUMABLE"
}
// RequestNewTask is to request for creating a new task like offline downloads
//
// Name and ParentID can be left empty.
type RequestNewTask struct {
Kind string `json:"kind,omitempty"` // "drive#file"
Name string `json:"name,omitempty"`
ParentID string `json:"parent_id,omitempty"`
UploadType string `json:"upload_type,omitempty"` // "UPLOAD_TYPE_URL"
URL *URL `json:"url,omitempty"` // {"url": downloadUrl}
FolderType string `json:"folder_type,omitempty"` // "" if parent_id else "DOWNLOAD"
}
// RequestDecompress is to request for decompress of archive files
type RequestDecompress struct {
Gcid string `json:"gcid,omitempty"` // same as File.Hash
Password string `json:"password,omitempty"` // ""
FileID string `json:"file_id,omitempty"`
Files []*FileInArchive `json:"files,omitempty"` // can request selected files to be decompressed
DefaultParent bool `json:"default_parent,omitempty"`
}
// ------------------------------------------------------------
// NOT implemented YET
// RequestArchiveFileList is to request for a list of files in archive
//
// POST https://api-drive.mypikpak.com/decompress/v1/list
type RequestArchiveFileList struct {
Gcid string `json:"gcid,omitempty"` // same as api.File.Hash
Path string `json:"path,omitempty"` // "" by default
Password string `json:"password,omitempty"` // "" by default
FileID string `json:"file_id,omitempty"`
}
// ArchiveFileList is a response to RequestArchiveFileList
type ArchiveFileList struct {
Status string `json:"status,omitempty"` // "OK"
StatusText string `json:"status_text,omitempty"` // ""
TaskID string `json:"task_id,omitempty"` // ""
CurrentPath string `json:"current_path,omitempty"` // ""
Title string `json:"title,omitempty"`
FileSize int64 `json:"file_size,omitempty"`
Gcid string `json:"gcid,omitempty"` // same as File.Hash
Files []*FileInArchive `json:"files,omitempty"`
}

253
backend/pikpak/helper.go Normal file
View File

@@ -0,0 +1,253 @@
package pikpak
import (
"bytes"
"context"
"crypto/sha1"
"encoding/hex"
"errors"
"fmt"
"io"
"net/http"
"os"
"github.com/rclone/rclone/backend/pikpak/api"
"github.com/rclone/rclone/lib/rest"
)
// Globals
const (
cachePrefix = "rclone-pikpak-sha1sum-"
)
// requestDecompress requests decompress of compressed files
func (f *Fs) requestDecompress(ctx context.Context, file *api.File, password string) (info *api.DecompressResult, err error) {
req := &api.RequestDecompress{
Gcid: file.Hash,
Password: password,
FileID: file.ID,
Files: []*api.FileInArchive{},
DefaultParent: true,
}
opts := rest.Opts{
Method: "POST",
Path: "/decompress/v1/decompress",
}
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.rst.CallJSON(ctx, &opts, &req, &info)
return f.shouldRetry(ctx, resp, err)
})
return
}
// getUserInfo gets UserInfo from API
func (f *Fs) getUserInfo(ctx context.Context) (info *api.User, err error) {
opts := rest.Opts{
Method: "GET",
RootURL: "https://user.mypikpak.com/v1/user/me",
}
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.rst.CallJSON(ctx, &opts, nil, &info)
return f.shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, fmt.Errorf("failed to get userinfo: %w", err)
}
return
}
// getVIPInfo gets VIPInfo from API
func (f *Fs) getVIPInfo(ctx context.Context) (info *api.VIP, err error) {
opts := rest.Opts{
Method: "GET",
RootURL: "https://api-drive.mypikpak.com/drive/v1/privilege/vip",
}
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.rst.CallJSON(ctx, &opts, nil, &info)
return f.shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, fmt.Errorf("failed to get vip info: %w", err)
}
return
}
// requestBatchAction requests batch actions to API
//
// action can be one of batch{Copy,Delete,Trash,Untrash}
func (f *Fs) requestBatchAction(ctx context.Context, action string, req *api.RequestBatch) (err error) {
opts := rest.Opts{
Method: "POST",
Path: "/drive/v1/files:" + action,
NoResponse: true, // Only returns `{"task_id":""}
}
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.rst.CallJSON(ctx, &opts, &req, nil)
return f.shouldRetry(ctx, resp, err)
})
if err != nil {
return fmt.Errorf("batch action %q failed: %w", action, err)
}
return nil
}
// requestNewTask requests a new api.NewTask and returns api.Task
func (f *Fs) requestNewTask(ctx context.Context, req *api.RequestNewTask) (info *api.Task, err error) {
opts := rest.Opts{
Method: "POST",
Path: "/drive/v1/files",
}
var newTask api.NewTask
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.rst.CallJSON(ctx, &opts, &req, &newTask)
return f.shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, err
}
return newTask.Task, nil
}
// requestNewFile requests a new api.NewFile and returns api.File
func (f *Fs) requestNewFile(ctx context.Context, req *api.RequestNewFile) (info *api.NewFile, err error) {
opts := rest.Opts{
Method: "POST",
Path: "/drive/v1/files",
}
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.rst.CallJSON(ctx, &opts, &req, &info)
return f.shouldRetry(ctx, resp, err)
})
return
}
// getFile gets api.File from API for the ID passed
// and returns rich information containing additional fields below
// * web_content_link
// * thumbnail_link
// * links
// * medias
func (f *Fs) getFile(ctx context.Context, ID string) (info *api.File, err error) {
opts := rest.Opts{
Method: "GET",
Path: "/drive/v1/files/" + ID,
}
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.rst.CallJSON(ctx, &opts, nil, &info)
if err == nil && info.Phase != api.PhaseTypeComplete {
// could be pending right after file is created/uploaded.
return true, errors.New("not PHASE_TYPE_COMPLETE")
}
return f.shouldRetry(ctx, resp, err)
})
return
}
// patchFile updates attributes of the file by ID
//
// currently known patchable fields are
// * name
func (f *Fs) patchFile(ctx context.Context, ID string, req *api.File) (info *api.File, err error) {
opts := rest.Opts{
Method: "PATCH",
Path: "/drive/v1/files/" + ID,
}
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.rst.CallJSON(ctx, &opts, &req, &info)
return f.shouldRetry(ctx, resp, err)
})
return
}
// getAbout gets drive#quota information from server
func (f *Fs) getAbout(ctx context.Context) (info *api.About, err error) {
opts := rest.Opts{
Method: "GET",
Path: "/drive/v1/about",
}
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.rst.CallJSON(ctx, &opts, nil, &info)
return f.shouldRetry(ctx, resp, err)
})
return
}
// requestShare returns information about ssharable links
func (f *Fs) requestShare(ctx context.Context, req *api.RequestShare) (info *api.Share, err error) {
opts := rest.Opts{
Method: "POST",
Path: "/drive/v1/share",
}
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.rst.CallJSON(ctx, &opts, &req, &info)
return f.shouldRetry(ctx, resp, err)
})
return
}
// Read the sha1 of in returning a reader which will read the same contents
//
// The cleanup function should be called when out is finished with
// regardless of whether this function returned an error or not.
func readSHA1(in io.Reader, size, threshold int64) (sha1sum string, out io.Reader, cleanup func(), err error) {
// we need an SHA1
hash := sha1.New()
// use the teeReader to write to the local file AND calculate the SHA1 while doing so
teeReader := io.TeeReader(in, hash)
// nothing to clean up by default
cleanup = func() {}
// don't cache small files on disk to reduce wear of the disk
if size > threshold {
var tempFile *os.File
// create the cache file
tempFile, err = os.CreateTemp("", cachePrefix)
if err != nil {
return
}
_ = os.Remove(tempFile.Name()) // Delete the file - may not work on Windows
// clean up the file after we are done downloading
cleanup = func() {
// the file should normally already be close, but just to make sure
_ = tempFile.Close()
_ = os.Remove(tempFile.Name()) // delete the cache file after we are done - may be deleted already
}
// copy the ENTIRE file to disc and calculate the SHA1 in the process
if _, err = io.Copy(tempFile, teeReader); err != nil {
return
}
// jump to the start of the local file so we can pass it along
if _, err = tempFile.Seek(0, 0); err != nil {
return
}
// replace the already read source with a reader of our cached file
out = tempFile
} else {
// that's a small file, just read it into memory
var inData []byte
inData, err = io.ReadAll(teeReader)
if err != nil {
return
}
// set the reader to our read memory block
out = bytes.NewReader(inData)
}
return hex.EncodeToString(hash.Sum(nil)), out, cleanup, nil
}

1718
backend/pikpak/pikpak.go Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,17 @@
// Test PikPak filesystem interface
package pikpak_test
import (
"testing"
"github.com/rclone/rclone/backend/pikpak"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestPikPak:",
NilObject: (*pikpak.Object)(nil),
})
}

View File

@@ -82,14 +82,15 @@ func init() {
OAuth2Config: oauthConfig,
})
},
Options: []fs.Option{{
Options: append(oauthutil.SharedOptions, []fs.Option{{
Name: "api_key",
Help: `API Key.
This is not normally used - use oauth instead.
`,
Hide: fs.OptionHideBoth,
Default: "",
Hide: fs.OptionHideBoth,
Default: "",
Sensitive: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
@@ -99,7 +100,7 @@ This is not normally used - use oauth instead.
encoder.EncodeBackSlash |
encoder.EncodeDoubleQuote |
encoder.EncodeInvalidUtf8),
}},
}}...),
})
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,16 @@
package protondrive_test
import (
"testing"
"github.com/rclone/rclone/backend/protondrive"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestProtonDrive:",
NilObject: (*protondrive.Object)(nil),
})
}

View File

@@ -23,6 +23,7 @@ import (
"github.com/rclone/rclone/lib/dircache"
"github.com/rclone/rclone/lib/oauthutil"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/random"
"github.com/rclone/rclone/lib/readers"
)
@@ -252,9 +253,12 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
// This will create a duplicate if we upload a new file without
// checking to see if there is one already - use Put() for that.
func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (o fs.Object, err error) {
return f.putUnchecked(ctx, in, src, src.Remote(), options...)
}
func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, remote string, options ...fs.OpenOption) (o fs.Object, err error) {
// defer log.Trace(f, "src=%+v", src)("o=%+v, err=%v", &o, &err)
size := src.Size()
remote := src.Remote()
leaf, directoryID, err := f.dirCache.FindPath(ctx, remote, true)
if err != nil {
return nil, err
@@ -540,24 +544,59 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (o fs.Objec
if err != nil {
return nil, err
}
modTime := src.ModTime(ctx)
var resp struct {
File putio.File `json:"file"`
}
// For some unknown reason the API sometimes returns the name
// already exists unless we upload to a temporary name and
// rename
//
// {"error_id":null,"error_message":"Name already exist","error_type":"NAME_ALREADY_EXIST","error_uri":"http://api.put.io/v2/docs","extra":{},"status":"ERROR","status_code":400}
suffix := "." + random.String(8)
err = f.pacer.Call(func() (bool, error) {
params := url.Values{}
params.Set("file_id", strconv.FormatInt(srcObj.file.ID, 10))
params.Set("parent_id", directoryID)
params.Set("name", f.opt.Enc.FromStandardName(leaf))
params.Set("name", f.opt.Enc.FromStandardName(leaf+suffix))
req, err := f.client.NewRequest(ctx, "POST", "/v2/files/copy", strings.NewReader(params.Encode()))
if err != nil {
return false, err
}
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
// fs.Debugf(f, "copying file (%d) to parent_id: %s", srcObj.file.ID, directoryID)
_, err = f.client.Do(req, nil)
_, err = f.client.Do(req, &resp)
return shouldRetry(ctx, err)
})
if err != nil {
return nil, err
}
return f.NewObject(ctx, remote)
err = f.pacer.Call(func() (bool, error) {
params := url.Values{}
params.Set("file_id", strconv.FormatInt(resp.File.ID, 10))
params.Set("name", f.opt.Enc.FromStandardName(leaf))
req, err := f.client.NewRequest(ctx, "POST", "/v2/files/rename", strings.NewReader(params.Encode()))
if err != nil {
return false, err
}
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
_, err = f.client.Do(req, &resp)
return shouldRetry(ctx, err)
})
if err != nil {
return nil, err
}
o, err = f.newObjectWithInfo(ctx, remote, resp.File)
if err != nil {
return nil, err
}
err = o.SetModTime(ctx, modTime)
if err != nil {
return nil, err
}
return o, nil
}
// Move src to this remote using server-side move operations.
@@ -579,6 +618,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (o fs.Objec
if err != nil {
return nil, err
}
modTime := src.ModTime(ctx)
err = f.pacer.Call(func() (bool, error) {
params := url.Values{}
params.Set("file_id", strconv.FormatInt(srcObj.file.ID, 10))
@@ -596,7 +636,15 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (o fs.Objec
if err != nil {
return nil, err
}
return f.NewObject(ctx, remote)
o, err = f.NewObject(ctx, remote)
if err != nil {
return nil, err
}
err = o.SetModTime(ctx, modTime)
if err != nil {
return nil, err
}
return o, nil
}
// DirMove moves src, srcRemote to this remote at dstRemote

View File

@@ -275,7 +275,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
if err != nil {
return err
}
newObj, err := o.fs.PutUnchecked(ctx, in, src, options...)
newObj, err := o.fs.putUnchecked(ctx, in, src, o.remote, options...)
if err != nil {
return err
}

View File

@@ -67,7 +67,7 @@ func init() {
NoOffline: true,
})
},
Options: []fs.Option{{
Options: append(oauthutil.SharedOptions, []fs.Option{{
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
Advanced: true,
@@ -77,7 +77,7 @@ func init() {
Default: (encoder.Display |
encoder.EncodeBackSlash |
encoder.EncodeInvalidUtf8),
}},
}}...),
})
}

View File

@@ -49,11 +49,13 @@ func init() {
Help: "Get QingStor credentials from the environment (env vars or IAM).",
}},
}, {
Name: "access_key_id",
Help: "QingStor Access Key ID.\n\nLeave blank for anonymous access or runtime credentials.",
Name: "access_key_id",
Help: "QingStor Access Key ID.\n\nLeave blank for anonymous access or runtime credentials.",
Sensitive: true,
}, {
Name: "secret_access_key",
Help: "QingStor Secret Access Key (password).\n\nLeave blank for anonymous access or runtime credentials.",
Name: "secret_access_key",
Help: "QingStor Secret Access Key (password).\n\nLeave blank for anonymous access or runtime credentials.",
Sensitive: true,
}, {
Name: "endpoint",
Help: "Enter an endpoint URL to connection QingStor API.\n\nLeave blank will use the default value \"https://qingstor.com:443\".",

View File

@@ -0,0 +1,182 @@
// Package api provides types used by the Quatrix API.
package api
import (
"strconv"
"time"
)
// OverwriteOnCopyMode is a conflict resolve mode during copy. Files with conflicting names will be overwritten
const OverwriteOnCopyMode = "overwrite"
// ProfileInfo is a profile info about quota
type ProfileInfo struct {
UserUsed int64 `json:"user_used"`
UserLimit int64 `json:"user_limit"`
AccUsed int64 `json:"acc_used"`
AccLimit int64 `json:"acc_limit"`
}
// IDList is a general object that contains list of ids
type IDList struct {
IDs []string `json:"ids"`
}
// DeleteParams is the request to delete object
type DeleteParams struct {
IDs []string `json:"ids"`
DeletePermanently bool `json:"delete_permanently"`
}
// FileInfoParams is the request to get object's (file or directory) info
type FileInfoParams struct {
ParentID string `json:"parent_id,omitempty"`
Path string `json:"path"`
}
// FileInfo is the response to get object's (file or directory) info
type FileInfo struct {
FileID string `json:"file_id"`
ParentID string `json:"parent_id"`
Src string `json:"src"`
Type string `json:"type"`
}
// IsFile returns true if object is a file
// false otherwise
func (fi *FileInfo) IsFile() bool {
if fi == nil {
return false
}
return fi.Type == "F"
}
// IsDir returns true if object is a directory
// false otherwise
func (fi *FileInfo) IsDir() bool {
if fi == nil {
return false
}
return fi.Type == "D" || fi.Type == "S" || fi.Type == "T"
}
// CreateDirParams is the request to create a directory
type CreateDirParams struct {
Target string `json:"target,omitempty"`
Name string `json:"name"`
Resolve bool `json:"resolve"`
}
// File represent metadata about object in Quatrix (file or directory)
type File struct {
ID string `json:"id"`
Created JSONTime `json:"created"`
Modified JSONTime `json:"modified"`
Name string `json:"name"`
ParentID string `json:"parent_id"`
Size int64 `json:"size"`
ModifiedMS JSONTime `json:"modified_ms"`
Type string `json:"type"`
Operations int `json:"operations"`
SubType string `json:"sub_type"`
Content []File `json:"content"`
}
// IsFile returns true if object is a file
// false otherwise
func (f *File) IsFile() bool {
if f == nil {
return false
}
return f.Type == "F"
}
// IsDir returns true if object is a directory
// false otherwise
func (f *File) IsDir() bool {
if f == nil {
return false
}
return f.Type == "D" || f.Type == "S" || f.Type == "T"
}
// SetMTimeParams is the request to set modification time for object
type SetMTimeParams struct {
ID string `json:"id,omitempty"`
MTime JSONTime `json:"mtime"`
}
// JSONTime provides methods to marshal/unmarshal time.Time as Unix time
type JSONTime time.Time
// MarshalJSON returns time representation in Unix time
func (u JSONTime) MarshalJSON() ([]byte, error) {
return []byte(strconv.FormatFloat(float64(time.Time(u).UTC().UnixNano())/1e9, 'f', 6, 64)), nil
}
// UnmarshalJSON sets time from Unix time representation
func (u *JSONTime) UnmarshalJSON(data []byte) error {
f, err := strconv.ParseFloat(string(data), 64)
if err != nil {
return err
}
t := JSONTime(time.Unix(0, int64(f*1e9)))
*u = t
return nil
}
// String returns Unix time representation of time as string
func (u JSONTime) String() string {
return strconv.FormatInt(time.Time(u).UTC().Unix(), 10)
}
// DownloadLinkResponse is the response to download-link request
type DownloadLinkResponse struct {
ID string `json:"id"`
}
// UploadLinkParams is the request to get upload-link
type UploadLinkParams struct {
Name string `json:"name"`
ParentID string `json:"parent_id"`
Resolve bool `json:"resolve"`
}
// UploadLinkResponse is the response to upload-link request
type UploadLinkResponse struct {
Name string `json:"name"`
FileID string `json:"file_id"`
ParentID string `json:"parent_id"`
UploadKey string `json:"upload_key"`
}
// UploadFinalizeResponse is the response to finalize file method
type UploadFinalizeResponse struct {
FileID string `json:"id"`
ParentID string `json:"parent_id"`
Modified int64 `json:"modified"`
FileSize int64 `json:"size"`
}
// FileModifyParams is the request to get modify file link
type FileModifyParams struct {
ID string `json:"id"`
Truncate int64 `json:"truncate"`
}
// FileCopyMoveOneParams is the request to do server-side copy and move
// can be used for file or directory
type FileCopyMoveOneParams struct {
ID string `json:"file_id"`
Target string `json:"target_id"`
Name string `json:"name"`
MTime JSONTime `json:"mtime"`
Resolve bool `json:"resolve"`
ResolveMode string `json:"resolve_mode"`
}

1254
backend/quatrix/quatrix.go Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,17 @@
// Test Quatrix filesystem interface
package quatrix_test
import (
"testing"
"github.com/rclone/rclone/backend/quatrix"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestQuatrix:",
NilObject: (*quatrix.Object)(nil),
})
}

View File

@@ -0,0 +1,108 @@
package quatrix
import (
"sync"
"time"
"github.com/rclone/rclone/fs"
)
// UploadMemoryManager dynamically calculates every chunk size for the transfer and increases or decreases it
// depending on the upload speed. This makes general upload time smaller, because transfers that are faster
// does not have to wait for the slower ones until they finish upload.
type UploadMemoryManager struct {
m sync.Mutex
useDynamicSize bool
shared int64
reserved int64
effectiveTime time.Duration
fileUsage map[string]int64
}
// NewUploadMemoryManager is a constructor for UploadMemoryManager
func NewUploadMemoryManager(ci *fs.ConfigInfo, opt *Options) *UploadMemoryManager {
useDynamicSize := true
sharedMemory := int64(opt.MaximalSummaryChunkSize) - int64(opt.MinimalChunkSize)*int64(ci.Transfers)
if sharedMemory <= 0 {
sharedMemory = 0
useDynamicSize = false
}
return &UploadMemoryManager{
useDynamicSize: useDynamicSize,
shared: sharedMemory,
reserved: int64(opt.MinimalChunkSize),
effectiveTime: time.Duration(opt.EffectiveUploadTime),
fileUsage: map[string]int64{},
}
}
// Consume -- decide amount of memory to consume
func (u *UploadMemoryManager) Consume(fileID string, neededMemory int64, speed float64) int64 {
if !u.useDynamicSize {
if neededMemory < u.reserved {
return neededMemory
}
return u.reserved
}
u.m.Lock()
defer u.m.Unlock()
borrowed, found := u.fileUsage[fileID]
if found {
u.shared += borrowed
borrowed = 0
}
defer func() { u.fileUsage[fileID] = borrowed }()
effectiveChunkSize := int64(speed * u.effectiveTime.Seconds())
if effectiveChunkSize < u.reserved {
effectiveChunkSize = u.reserved
}
if neededMemory < effectiveChunkSize {
effectiveChunkSize = neededMemory
}
if effectiveChunkSize <= u.reserved {
return effectiveChunkSize
}
toBorrow := effectiveChunkSize - u.reserved
if toBorrow <= u.shared {
u.shared -= toBorrow
borrowed = toBorrow
return effectiveChunkSize
}
borrowed = u.shared
u.shared = 0
return borrowed + u.reserved
}
// Return returns consumed memory for the previous chunk upload to the memory pool
func (u *UploadMemoryManager) Return(fileID string) {
if !u.useDynamicSize {
return
}
u.m.Lock()
defer u.m.Unlock()
borrowed, found := u.fileUsage[fileID]
if !found {
return
}
u.shared += borrowed
delete(u.fileUsage, fileID)
}

File diff suppressed because it is too large Load Diff

View File

@@ -6,15 +6,19 @@ import (
"context"
"crypto/md5"
"fmt"
"path"
"strings"
"testing"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/cache"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
"github.com/rclone/rclone/lib/bucket"
"github.com/rclone/rclone/lib/random"
"github.com/rclone/rclone/lib/version"
"github.com/stretchr/testify/assert"
@@ -250,7 +254,8 @@ func (f *Fs) InternalTestVersions(t *testing.T) {
time.Sleep(2 * time.Second)
// Create an object
const fileName = "test-versions.txt"
const dirName = "versions"
const fileName = dirName + "/" + "test-versions.txt"
contents := random.String(100)
item := fstest.NewItem(fileName, contents, fstest.Time("2001-05-06T04:05:06.499999999Z"))
obj := fstests.PutTestContents(ctx, t, f, &item, contents, true)
@@ -280,11 +285,12 @@ func (f *Fs) InternalTestVersions(t *testing.T) {
}()
// Read the contents
entries, err := f.List(ctx, "")
entries, err := f.List(ctx, dirName)
require.NoError(t, err)
tests := 0
var fileNameVersion string
for _, entry := range entries {
t.Log(entry)
remote := entry.Remote()
if remote == fileName {
t.Run("ReadCurrent", func(t *testing.T) {
@@ -309,6 +315,23 @@ func (f *Fs) InternalTestVersions(t *testing.T) {
require.NotNil(t, o)
assert.Equal(t, int64(100), o.Size(), o.Remote())
})
// Check we can make a NewFs from that object with a version suffix
t.Run("NewFs", func(t *testing.T) {
newPath := bucket.Join(fs.ConfigStringFull(f), fileNameVersion)
// Make sure --s3-versions is set in the config of the new remote
fs.Debugf(nil, "oldPath = %q", newPath)
lastColon := strings.LastIndex(newPath, ":")
require.True(t, lastColon >= 0)
newPath = newPath[:lastColon] + ",versions" + newPath[lastColon:]
fs.Debugf(nil, "newPath = %q", newPath)
fNew, err := cache.Get(ctx, newPath)
// This should return pointing to a file
require.Equal(t, fs.ErrorIsFile, err)
require.NotNil(t, fNew)
// With the directory the directory above
assert.Equal(t, dirName, path.Base(fs.ConfigStringFull(fNew)))
})
})
t.Run("VersionAt", func(t *testing.T) {

View File

@@ -5,6 +5,7 @@ import (
"testing"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
)
@@ -20,6 +21,24 @@ func TestIntegration(t *testing.T) {
})
}
func TestIntegration2(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("skipping as -remote is set")
}
name := "TestS3"
fstests.Run(t, &fstests.Opt{
RemoteName: name + ":",
NilObject: (*Object)(nil),
TiersToTest: []string{"STANDARD", "STANDARD_IA"},
ChunkedUpload: fstests.ChunkedUploadConfig{
MinChunkSize: minChunkSize,
},
ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "directory_markers", Value: "true"},
},
})
}
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setUploadChunkSize(cs)
}

View File

@@ -14,6 +14,7 @@ import (
// URL parameters that need to be added to the signature
var s3ParamsToSign = map[string]struct{}{
"delete": {},
"acl": {},
"location": {},
"logging": {},

View File

@@ -17,17 +17,17 @@ func TestShouldAllowShutdownTwice(t *testing.T) {
}
func TestRenewalInTimeLimit(t *testing.T) {
var count int64
var count atomic.Int64
renew := NewRenew(100*time.Millisecond, func() error {
atomic.AddInt64(&count, 1)
count.Add(1)
return nil
})
time.Sleep(time.Second)
renew.Shutdown()
// there's no guarantee the CI agent can handle a simple goroutine
renewCount := atomic.LoadInt64(&count)
renewCount := count.Load()
t.Logf("renew count = %d", renewCount)
assert.Greater(t, renewCount, int64(0))
assert.Less(t, renewCount, int64(11))

View File

@@ -67,15 +67,18 @@ func init() {
Value: "https://cloud.seafile.com/",
Help: "Connect to cloud.seafile.com.",
}},
Sensitive: true,
}, {
Name: configUser,
Help: "User name (usually email address).",
Required: true,
Name: configUser,
Help: "User name (usually email address).",
Required: true,
Sensitive: true,
}, {
// Password is not required, it will be left blank for 2FA
Name: configPassword,
Help: "Password.",
IsPassword: true,
Sensitive: true,
}, {
Name: config2FA,
Help: "Two-factor authentication ('true' if the account has 2FA enabled).",
@@ -87,6 +90,7 @@ func init() {
Name: configLibraryKey,
Help: "Library password (for encrypted libraries only).\n\nLeave blank if you pass it through the command line.",
IsPassword: true,
Sensitive: true,
}, {
Name: configCreateLibrary,
Help: "Should rclone create a library if it doesn't exist.",
@@ -94,9 +98,10 @@ func init() {
Default: false,
}, {
// Keep the authentication token after entering the 2FA code
Name: configAuthToken,
Help: "Authentication token.",
Hide: fs.OptionHideBoth,
Name: configAuthToken,
Help: "Authentication token.",
Hide: fs.OptionHideBoth,
Sensitive: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,

Some files were not shown because too many files have changed in this diff Show More