1
0
mirror of https://github.com/rclone/rclone.git synced 2026-01-03 09:03:50 +00:00

Compare commits

..

165 Commits

Author SHA1 Message Date
Nick Craig-Wood
9d464e8e9a Version v1.70.0 2025-06-17 17:53:11 +01:00
Nick Craig-Wood
92fea7eb1b ftp: add --ftp-http-proxy to connect via HTTP CONNECT proxy 2025-06-17 17:53:11 +01:00
Nick Craig-Wood
f226d12a2f pcloud: fix "Access denied. You do not have permissions to perform this operation" on large uploads
The API we use for OpenWriterAt seems to have been disabled at pcloud

    PUT /file_open?flags=XXX&folderid=XXX&name=XXX HTTP/1.1

gives

    {
            "result": 2003,
            "error": "Access denied. You do not have permissions to perform this operation."
    }

So disable OpenWriterAt and hence multipart uploads for the moment.
2025-06-17 12:46:35 +01:00
nielash
359260c49d operations: fix TransformFile when can't server-side copy/move 2025-06-16 17:40:19 +01:00
Nick Craig-Wood
125c8a98bb fstest: fix -verbose flag after logging revamp 2025-06-16 17:39:37 +01:00
Nick Craig-Wood
81fccd9c39 googlecloudstorage: fix directory marker after // changes in #5858
Before this change we were creating the directory markers with double
slashes on.
2025-06-16 17:33:40 +01:00
Nick Craig-Wood
1dc3421c7f s3: fix directory marker after // changes in #5858
Before this change we were creating the directory markers with double
slashes on.
2025-06-16 17:33:40 +01:00
Nick Craig-Wood
073184132e azureblob: fix directory marker after // changes in #5858
Before this change we were creating the directory markers with double
slashes on.
2025-06-16 17:33:40 +01:00
Nick Craig-Wood
476ff65fd7 tests: ignore some more habitually failing tests 2025-06-13 16:25:42 +01:00
Nick Craig-Wood
2847412433 googlephotos: fix typo in error message - Fixes #8600 2025-06-13 14:59:08 +01:00
Nick Craig-Wood
5c81132da0 s3: MEGA S4 support 2025-06-13 11:47:21 +01:00
Nick Craig-Wood
6e1c7b9239 Add Ser-Bul to contributors 2025-06-13 11:47:21 +01:00
nielash
e469c8974c chunker: fix double-transform
Before this change, chunker could double-transform a file under certain
conditions, when --name-transform was in use. This change fixes the issue by
ensuring that --name-transform is disabled during internal file moves.
2025-06-12 18:31:01 +01:00
Ser-Bul
629b427443 docs: mailru: added note about permissions level choice for the apps password 2025-06-12 17:35:42 +01:00
Nick Craig-Wood
108504963c tests: ignore habitually failing tests and backends
This ignores:

- cmd/bisync where it always fails
- cmd/gitannex where it always fails
- sharefile - citrix have refused to give us a testing account
- duplicated sia backend
- iclouddrive - token expiring every 30 days makes it too difficult

It would be nice to fix up these things at some point, but for the
integration test results to be useful they need less noise in them.
2025-06-12 16:24:14 +01:00
Nick Craig-Wood
6aa09fb1d6 docs: link to asciinema rather than including the js 2025-06-12 15:10:56 +01:00
Nick Craig-Wood
bfa6852334 docs: target="_blank" must have rel="noopener" 2025-06-12 15:10:56 +01:00
nielash
63d55d4a39 sync: fix testLoggerVsLsf when dst is local
Before this change, the testLoggerVsLsf function would get confused if given
r.Flocal when expecting r.Fremote. This change makes it agnostic.
2025-06-12 11:11:51 +01:00
kingston125
578ee49550 docs: fix FileLu docs
* Reorder providers alphabetically: moved FileLu above Files.com
* Added FileLu storage to docs.md
2025-06-11 16:25:30 +01:00
Nick Craig-Wood
dda6a863e9 build: update all dependencies
This updates all direct and indirect dependencies

It stops the linter complaining about deprecated azidentiy APIs also.
2025-06-09 14:19:53 +01:00
Nick Craig-Wood
99358cee88 onedrive: fix crash if no metadata was updated
Before this change, rclone would crash if no metadata was updated.
This could happen if the --onedrive-metadata-permissions read was
supplied but metadata to write was supplied.

Fixes #8586
2025-06-06 17:40:25 +01:00
Nick Craig-Wood
768a4236e6 Add kingston125 to contributors 2025-06-06 17:40:25 +01:00
Nick Craig-Wood
ffbf002ba8 Add Flora Thiebaut to contributors 2025-06-06 17:40:25 +01:00
kingston125
4a1b5b864c Add FileLu cloud storage backend 2025-06-06 15:15:07 +01:00
Flora Thiebaut
3b3096c940 doi: add new doi backend
Add a new backend to support mounting datasets published with a digital
object identifier (DOI).
2025-06-05 16:40:54 +01:00
Nick Craig-Wood
51fd697c7a build: fix check_autogenerated_edits.py flagging up files that didn't exist
Before this change new backend docs would have their changes flagged
which is undesirable for the first revision.
2025-06-05 16:37:01 +01:00
Nick Craig-Wood
210acb42cd docs: rc: add more info on how to discover _config and _filter parameters #8584 2025-06-05 10:44:33 +01:00
Nick Craig-Wood
6c36615efe s3: add Exaba provider 2025-06-04 17:42:48 +01:00
nielash
d4e2717081 convmv: add convmv command
convmv supports advanced path name transformations for converting and renaming
files and directories by applying prefixes, suffixes, and other alterations.

For example:

rclone convmv "stories/The Quick Brown Fox!.txt" --name-transform "all,uppercase"
// Output: STORIES/THE QUICK BROWN FOX!.TXT

See help doc for complete details.
2025-06-04 17:24:07 +01:00
nielash
013c563293 lib/transform: add transform library and --name-transform flag
lib/transform adds the transform library, supporting advanced path name
transformations for converting and renaming files and directories by applying
prefixes, suffixes, and other alterations.

It also adds the --name-transform flag for use with sync, copy, and move.

Multiple transformations can be used in sequence, applied in the order they are
specified on the command line.

By default --name-transform will only apply to file names. The means only the leaf
file name will be transformed. However some of the transforms would be better
applied to the whole path or just directories. To choose which which part of the
file path is affected some tags can be added to the --name-transform:

file	Only transform the leaf name of files (DEFAULT)
dir	Only transform name of directories - these may appear anywhere in the path
all	Transform the entire path for files and directories

Example syntax:
--name-transform file,prefix=ABC
--name-transform dir,prefix=DEF
2025-06-04 17:24:07 +01:00
nielash
41a407dcc9 march: split src and dst
splits m.key into separate functions for src and dst to prepare for
lib/transform which will want to do transforms on the src side only.

Co-Authored-By: Nick Craig-Wood <nick@craig-wood.com>
2025-06-04 17:24:07 +01:00
Nick Craig-Wood
cf1f5a7af6 Add ahxxm to contributors 2025-06-04 17:24:07 +01:00
Nick Craig-Wood
597872e5d7 Add Nathanael Demacon to contributors 2025-06-04 17:24:07 +01:00
ahxxm
e2d6872745 b2: use file id from listing when not presented in headers - fixes #8113 2025-06-04 16:23:58 +01:00
Nathanael Demacon
ddebca8d42 fs: fix goroutine leak and improve stats accounting process
This fixes the go routine leak in the stats accounting

- don't start stats average loop when initializing `StatsInfo`
- stop the loop instead of pausing
- use a context instead of a channel
- move `period` variable in `averageValues` struct

Fixes #8570
2025-06-04 14:43:19 +01:00
Nick Craig-Wood
5173ca0454 march: fix syncing with a duplicate file and directory
As part of the out of memory syncing code, in this commit

0148bd4668 march: Implement callback based syncing

we changed the syncing method to use a sorted stream of directory
entries.

Unfortunately as part of this change the sort order of files and
directories became undefined.

This meant that if there existed both a file `foo` and a directory
`foo` in the same directory (as is common on object storage systems)
then these could be matched up incorrectly.

They could be matched up correctly like this

- `foo` (directory) - `foo` (directory)
- `foo` (file)      - `foo` (file)

Or incorrectly like this (one of many possibilities)

- no match          - `foo` (file)
- `foo` (directory) - `foo` (directory)
- `foo` (file)      - no match

Just depending on how the input listings were ordered.

This in turn made container based syncing with a duplicated file and
directory name erratic, deleting files when it shouldn't.

This patch ensures that directories always sync before files by adding
a suffix to the sort key depending on whether the entry was a file or
directory.
2025-06-04 10:54:31 +01:00
Nick Craig-Wood
ccac9813f3 Add PrathameshLakawade to contributors 2025-06-04 10:54:31 +01:00
Nick Craig-Wood
9133fd03df Add Oleksiy Stashok to contributors 2025-06-04 10:54:31 +01:00
PrathameshLakawade
2e891f4ff8 docs: fix page_facing_up typo next to Lyve Cloud in README.md 2025-06-04 08:25:17 +02:00
PrathameshLakawade
3c66d9ccb1 backend/s3: require custom endpoint for Lyve Cloud v2 support
Lyve Cloud v2 no longer provides a shared S3 endpoint like v1 did. Instead, each customer receives
a unique, reseller-specific endpoint. To reflect this change, the S3 backend now requires users to
manually enter their endpoint when selecting Lyve Cloud as a provider.
Previously, users selected from a list of hardcoded Lyve Cloud v1 endpoints. This was not compatible
with Lyve Cloud v2 accounts and could cause confusion or misconfiguration.

This change:
- Removes outdated pre-defined endpoint selection for Lyve Cloud
- Requires users to provide their own endpoint
- Adds a format example to guide correct usage

Before: Users selected a fixed endpoint from a list (v1 only)
After:  Users must input their own endpoint (v2-compatible)
2025-06-03 16:19:41 +01:00
Oleksiy Stashok
badf16cc34 backend: skip hash calculation when the hashType is None - fixes #8518
When hashType is None `local` backend still runs expensive logic that reads the entire file content to produce an empty string.
2025-06-03 15:40:50 +01:00
Nick Craig-Wood
0ee7cd80f2 azureblob: fix multipart server side copies of 0 sized files
Before this fix multipart server side copies would fail.

This problem was due to an incorrect calculation of the number of
parts to transfer - it calculated 1 part to transfer rather than 0.
2025-06-02 17:22:37 +01:00
Nick Craig-Wood
aeb43c6a4c Add Jeremy Daer to contributors 2025-06-02 17:22:37 +01:00
Nick Craig-Wood
12322a2141 Add wbulot to contributors 2025-06-02 17:22:37 +01:00
Jeremy Daer
4fd5a3d0a2 s3: add Pure Storage FlashBlade provider support (#8575)
Pure Storage FlashBlade is an enterprise object storage platform that
provides S3-compatible APIs. This change adds FlashBlade as a new
provider option in the S3 backend.

Before this change, FlashBlade users had to use the "Other" provider
with manual configuration of various compatibility flags. This often
resulted in suboptimal performance due to conservative default settings.

After this change, users can select the "FlashBlade" S3 provider and
get an optimal configuration:

- ListObjectsV2 enabled for better performance
- AWS-compatible multipart ETags for reliable transfers
- Proper handling of "AlreadyOwnedByYou" bucket creation responses
- Path-style URLs by default (virtual-host style with DNS setup)
- Unsigned payloads to ensure compatibility with all rclone features

FlashBlade supports modern S3 features including trailer checksum
algorithms (SHA256, CRC32, CRC32C), object versioning, and lifecycle
management.

Provider settings were verified by testing against a FlashBlade//E
system running Purity//FB 4.5.7.

Documentation and test configurations are included.

Integration test results:
```
go test -v -fast-list -remote TestS3FlashBlade:
PASS
ok  	github.com/rclone/rclone/backend/s3	232.444s
```
2025-05-30 12:35:13 +01:00
wbulot
3594330177 backend/gofile: update to use new direct upload endpoint
Update the Gofile backend to use the new direct upload endpoint based on the latest API changes.
The previous implementation used dynamic server selection, but Gofile has simplified their API
to use a single upload endpoint at https://upload.gofile.io/uploadfile.

This change:
- Removes server selection logic and related code
- Simplifies the Fs struct by removing server-related fields
- Updates the upload process to use the direct upload URL
2025-05-27 14:28:25 +01:00
Nick Craig-Wood
15510c66d4 log: add --windows-event-log-level to support Windows Event Log
This provides JSON logs in the Windows Event Log.
2025-05-23 11:27:49 +01:00
Nick Craig-Wood
dfa4d94827 fs: Remove github.com/sirupsen/logrus and replace with log/slog
This removes logrus which is not developed any more and replaces it
with the new log/slog from the Go standard library.

It implements its own slog Handler which is backwards compatible with
all of rclone's previous logging modes.
2025-05-23 11:27:49 +01:00
Nick Craig-Wood
36b89960e3 Add fhuber to contributors 2025-05-23 11:27:49 +01:00
fhuber
a3f3fc61ee cmd serve s3: fix ListObjectsV2 response
add trailing slash to s3 ListObjectsV2 response because some clients expect a trailing forward slash to distinguish if the returned object is a directory

Fixes #8464
2025-05-22 22:27:38 +01:00
Nick Craig-Wood
b8fde4fc46 Changelog updates from Version v1.69.3 2025-05-22 09:55:00 +01:00
Nick Craig-Wood
c37fe733df onedrive: re-add --onedrive-upload-cutoff flag
This was removed as part of #1716 to fix rclone uploads taking double
the space.

7f744033d8 onedrive: Removed upload cutoff and always do session uploads

As far as I can see, two revisions are still being created for single
part uploads so the default for this flag is set to -1, off.

However it may be useful for experimentation.

See: #8545
2025-05-15 15:25:10 +01:00
Nick Craig-Wood
b31659904f onedrive: fix "The upload session was not found" errors
Before this change, sometimes, perhaps on heavily loaded sharepoint
servers, uploads would sometimes fail with the error:

{"error":{"code":"itemNotFound","message":"The upload session was not found"}}

This retries the upload after a 5 second delay up to --low-level-retries times.

Fixes #8545
2025-05-15 15:25:10 +01:00
Nick Craig-Wood
ebcf51336e Add Germán Casares to contributors 2025-05-15 15:25:10 +01:00
Nick Craig-Wood
a334bba643 Add Jeff Geerling to contributors 2025-05-15 15:25:10 +01:00
Germán Casares
d4fd93e7f3 googlephotos: update read only and read write scopes to meet Google's requirements.
As part of changes to the Google Photos APIs the scopes rclone used
for accessing Google photos have been removed.

This commit replaces the scopes with updated ones.

These aren't as powerful as the old scopes - this means rclone will
only be able to download photos it uploaded from March 31, 2025.

To use these new scopes do `rclone reconnect yourgooglephotosremote:`

Fixes #8434

Co-authored-by: Nick Craig-Wood <nick@craig-wood.com>
2025-05-12 16:43:23 +01:00
albertony
6644bdba0f build: update github.com/ebitengine/purego to v0.8.3 to fix mac_amd64 build
Fixes #8552
2025-05-12 09:08:15 +02:00
albertony
68a65e878f docs: add hint about config touch and config file not found 2025-05-09 08:30:34 +01:00
Jeff Geerling
7606ad8294 docs: add FAQ for dismissing 'rclone.conf not found'
See: https://forum.rclone.org/t/notice-about-missing-rclone-conf-is-annoying/51116
2025-05-09 08:23:31 +02:00
Nick Craig-Wood
32847e88b4 docs: document how to keep an out of tree backend 2025-05-08 17:16:28 +01:00
Nick Craig-Wood
2e879586bd Add Clément Wehrung to contributors 2025-05-08 17:16:28 +01:00
Clément Wehrung
9d55b2411f iclouddrive: fix panic and files potentially downloaded twice
- Fixing SIGSEGV Fixes #8211
- Removed files potentially downloaded twice
2025-05-07 18:00:33 +01:00
Nick Craig-Wood
fe880c0fac docs: move --max-connections documentation to the correct place 2025-05-06 15:23:55 +01:00
Nick Craig-Wood
b160089be7 Add Ben Boeckel to contributors 2025-05-06 15:23:55 +01:00
Nick Craig-Wood
c2254164f8 Add Tho Neyugn to contributors 2025-05-06 15:23:55 +01:00
Ben Boeckel
e57b94c4ac docs: fix typo in s3/storj docs 2025-05-04 18:57:47 +02:00
Tho Neyugn
3273bf3716 serve s3: remove redundant handler initialization 2025-05-01 16:49:11 +01:00
Nick Craig-Wood
f5501edfcf Changelog updates from Version 1.69.2 2025-05-01 16:43:16 +01:00
Nick Craig-Wood
2404831725 sftp: add --sftp-http-proxy to connect via HTTP CONNECT proxy 2025-04-29 14:16:17 +01:00
Nick Craig-Wood
9f0e237931 Add Jugal Kishore to contributors 2025-04-29 14:16:09 +01:00
Jugal Kishore
f752eaa298 docs: correct SSL docs anchor link from #ssl-tls to #tls-ssl
Fixed the anchor link in the documentation that points to the SSL/TLS section.
This change ensures the link directs correctly to the intended section (#tls-ssl) instead of the incorrect #ssl-tls.

No functional code changes, documentation only.
2025-04-28 10:19:35 +02:00
Nick Craig-Wood
1f8373fae8 drive: metadata: fix error when setting copy-requires-writer-permission on a folder
This appears not to be allowed, so this fixes the problem by ignoring
that metadata for a folder.

Fixes #8517
2025-04-25 12:15:37 +01:00
Nick Craig-Wood
b94f80b9d7 docs: Update contributors
- Add Andrew Kreimer to contributors
- Add Christian Richter to contributors
- Add Ed Craig-Wood to contributors
- Add Klaas Freitag to contributors
- Add Ralf Haferkamp to contributors
2025-04-25 12:14:37 +01:00
dependabot[bot]
5f4e983ccb build: bump golang.org/x/net from 0.36.0 to 0.38.0
Bumps [golang.org/x/net](https://github.com/golang/net) from 0.36.0 to 0.38.0.
- [Commits](https://github.com/golang/net/compare/v0.36.0...v0.38.0)

---
updated-dependencies:
- dependency-name: golang.org/x/net
  dependency-version: 0.38.0
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-04-22 13:49:35 +02:00
Ed Craig-Wood
28b6f38135 Update README.md
removed warp as project sponsor
2025-04-16 10:42:00 +01:00
Andrew Kreimer
6adb4056bb docs: fix typos via codespell
There are some types in the changelog.

Fix them via codespell.
2025-04-16 09:24:01 +02:00
Klaas Freitag
0b9671313b webdav: add an ownCloud Infinite Scale vendor that enables tus chunked upload support
This change adds a new vendor called "infinitescale" to the webdav
backend. It enables the ownCloud Infinite Scale
https://github.com/owncloud/ocis project and implements its specific
chunked uploader following the tus protocol https://tus.io

Signed-off-by: Christian Richter <crichter@owncloud.com>
Co-authored-by: Klaas Freitag <klaas.freitag@kiteworks.com>
Co-authored-by: Christian Richter <crichter@owncloud.com>
Co-authored-by: Christian Richter <1058116+dragonchaser@users.noreply.github.com>
Co-authored-by: Ralf Haferkamp <r.haferkamp@opencloud.eu>
2025-04-11 12:23:55 +01:00
Nick Craig-Wood
e0c99d6203 onedrive: fix metadata ordering in permissions
Before this change, due to a quirk in Graph, User permissions could be
lost when applying permissions.

Fixes #8465
2025-04-11 10:38:51 +01:00
Nick Craig-Wood
7af1a930b7 Add Ben Alex to contributors 2025-04-11 10:38:51 +01:00
Nick Craig-Wood
6e46ee4ffa Add simwai to contributors 2025-04-11 10:38:51 +01:00
Ben Alex
4f1fc1a84e iclouddrive: fix so created files are writable
At present any created file (eg through the touch command, copy, mount
etc) is read-only in iCloud.

This has been reported by users at
https://forum.rclone.org/t/icloud-and-file-editing-permissions/50659.
2025-04-10 11:38:38 +01:00
simwai
c10b6c5e8e cmd/authorize: show required arguments in help text 2025-04-09 16:30:38 +01:00
yuval-cloudinary
52ff407116 cloudinary: var naming convention - #8416 2025-04-09 15:03:59 +01:00
yuval-cloudinary
078d202f39 cloudinary: automatically add/remove known media files extensions #8416 2025-04-09 15:03:59 +01:00
Nick Craig-Wood
3e105f7e58 Add Markus Gerstel to contributors 2025-04-09 15:03:59 +01:00
Nick Craig-Wood
02ca72e30c Add Enduriel to contributors 2025-04-09 15:03:59 +01:00
Nick Craig-Wood
e567c52457 Add huanghaojun to contributors 2025-04-09 15:03:59 +01:00
Nick Craig-Wood
10501d0398 Add simonmcnair to contributors 2025-04-09 15:03:59 +01:00
Nick Craig-Wood
972ed42661 Add Samantha Bowen to contributors 2025-04-09 15:03:59 +01:00
Markus Gerstel
48802b0a3b s3: documentation regression - fixes #8438
We lost a previous documentation fix (#7077) detailing how to restore
single objects from AWS S3 Glacier.

Also make clearer that rclone provides restore functionality natively.

Co-authored-by: danielkrajnik <dan94kra@gmail.com>
2025-04-09 14:18:18 +01:00
Enduriel
a9c7c493cf hash: add SHA512 support for file hashes 2025-04-09 14:16:22 +01:00
huanghaojun
49f6ed5f5e vfs: fix inefficient directory caching when directory reads are slow
Before this change, when querying directories with large datasets, if
the query duration exceeded the directory cache expiration time, the
cache became invalid by the time results were retrieved. This means
every execution of `_readDir` triggers `_readDirFromEntries`,
resulting in prolonged processing times.

After this change we update the directory time with the time at the
end of the query.
2025-04-09 11:58:09 +01:00
simonmcnair
a5d03e0ada docs: update fuse version in docker docs 2025-04-09 11:54:06 +01:00
Samantha Bowen
199f61cefa fs/config: Read configuration passwords from stdin even when terminated with EOF - fixes #8480 2025-04-09 11:41:10 +01:00
Dan McArdle
fa78c6443e cmd/gitannex: Reject unknown layout modes in INITREMOTE
This is a "fail fast" improvement. Now, we will reject invalid layout
modes at setup time, rather than deferring failure until the user
attempts a transfer.
2025-04-09 11:27:44 +01:00
Dan McArdle
52e2e4b84c cmd/gitannex: Add configparse.go and refactor
This is a behavior-preserving refactor. I'm mostly just moving the code
that defines and parses configs (e.g. "rcloneremotename") into a new
source file. This lets us focus more on implementing the text protocol
in gitannex.go.
2025-04-09 11:27:44 +01:00
Dan McArdle
1c933372fe cmd/gitannex: Permit remotes with options
It looks like commit 2a1e28f5f5 did not
fix the errors in the integration tests that I hoped it would. Upon
further inspection, I noticed that I forgot that remotes can have
options just like backends.

This should fix some of the failing integration tests. For context:
https://github.com/rclone/rclone/pull/7987#issuecomment-2688580667

Specifically, I believe that TestGitAnnexFstestBackendCases/HandlesInit
should no longer fail on the Azure backend with "INITREMOTE-FAILURE
remote does not exist: TestAzureBlob,directory_markers:".

Issue #7984
2025-04-09 11:27:44 +01:00
Nick Craig-Wood
f5dfe3f5a6 serve ftp: add serve rc interface 2025-04-09 11:12:07 +01:00
Nick Craig-Wood
5702b7578c serve sftp: add serve rc interface 2025-04-09 11:12:07 +01:00
Nick Craig-Wood
703788b40e serve restic: add serve rc interface 2025-04-09 11:12:07 +01:00
Nick Craig-Wood
aef9c2117e serve s3: add serve rc interface 2025-04-09 11:12:07 +01:00
Nick Craig-Wood
2a42d95385 serve dlna: add serve rc interface 2025-04-09 11:12:07 +01:00
Nick Craig-Wood
e37775bb41 serve webdav: add serve rc interface - fixes #4505 2025-04-09 11:12:07 +01:00
Nick Craig-Wood
780f4040ea serve http: add serve rc interface 2025-04-09 11:12:07 +01:00
Nick Craig-Wood
0b7be6ffb9 serve nfs: add serve rc interface 2025-04-09 11:12:07 +01:00
Nick Craig-Wood
4d9a165e56 serve: Add rc control for serve commands #4505
This adds the framework for serving. The individual servers will be
added in separate commits.
2025-04-09 11:12:07 +01:00
Nick Craig-Wood
21e5fa192a configstruct: add SetAny to parse config from the rc
Now that we have unified the config, we can make a much more
convenient rc interface which mirrors the command line exactly, rather
than using the structure of the internal Go structs.
2025-04-09 11:12:07 +01:00
Nick Craig-Wood
cf571ad661 rc: In options/info make FieldName contain a "." if it should be nested
Before this would have Output "FieldName": "ListenAddr" where it
actually needs to be set in a sub object "HTTP".

After this fix it outputs "FieldName": "HTTP.ListenAddr" to indicate
"ListenAddr" needs to be set in the object "HTTP".
2025-04-09 11:12:07 +01:00
Nick Craig-Wood
b1456835d8 serve restic: convert options to new style 2025-04-09 11:12:07 +01:00
Nick Craig-Wood
b930c4b437 serve s3: convert options to new style 2025-04-09 11:12:07 +01:00
Nick Craig-Wood
cebd588092 serve http: convert options to new style 2025-04-09 11:12:07 +01:00
Nick Craig-Wood
3c981e6c2c serve webdav: convert options to new style 2025-04-09 11:12:07 +01:00
Nick Craig-Wood
6054c4e49d auth proxy: convert options to new style 2025-04-09 11:12:07 +01:00
Nick Craig-Wood
028316ba5d auth proxy: add VFS options parameter for use for default VFS
This is for use from the RC API.
2025-04-09 11:12:07 +01:00
Nick Craig-Wood
df457f5802 serve: make the servers self registering
This is so that they can import cmd/serve without causing an import
loop.

The active servers can now be configured by commenting lines out in
cmd/all/all.go like all the other commands.
2025-04-09 11:12:07 +01:00
Nick Craig-Wood
084e35c49d lib/http: fix race between Serve() and Shutdown()
This was discovered by the race detector.
2025-04-09 11:12:07 +01:00
Nick Craig-Wood
90ea4a73ad lib/http: add Addr() method to return the first configured server address 2025-04-09 11:12:07 +01:00
Nick Craig-Wood
efe8ac8f35 Add Danny Garside to contributors 2025-04-09 11:12:06 +01:00
Danny Garside
894ef3b375 docs: fix minor typo in box docs 2025-04-08 20:51:22 +01:00
Nick Craig-Wood
385465bfa9 sync: implement --list-cutoff to allow on disk sorting for reduced memory use
Before this change, rclone had to load an entire directory into RAM in
order to sort it so it could be synced.

With directories with millions of entries, this used too much memory.

This fixes the probem by using an on disk sort when there are more
than --list-cutoff entries in a directory.

Fixes #7974
2025-04-08 18:02:24 +01:00
Nick Craig-Wood
0148bd4668 march: Implement callback based syncing
This changes the syncing method to take callbacks for directory
listings rather than being passed the entire directory listing at
once.

This will enable out of memory syncing.
2025-04-08 18:02:24 +01:00
Nick Craig-Wood
0f7ecf6f06 list: add ListDirSortedFn for callback oriented directory listing
This will be used for the out of memory sync
2025-04-08 15:14:09 +01:00
Nick Craig-Wood
08e81f8420 list: Implement Sorter to sort directory entries
Later this will be extended to do out of memory sorts
2025-04-08 15:14:09 +01:00
Nick Craig-Wood
0ac2d2f50f cache: mark ListP as not supported yet 2025-04-08 15:14:09 +01:00
Nick Craig-Wood
42fcb0a6fc hasher: implement ListP interface 2025-04-08 15:14:09 +01:00
Nick Craig-Wood
490dd14bc5 compress: implement ListP interface 2025-04-08 15:14:09 +01:00
Nick Craig-Wood
943ea0acae chunker: mark ListP as not supported yet 2025-04-08 15:14:09 +01:00
Nick Craig-Wood
d64a97f973 union: mark ListP as not supported yet 2025-04-08 15:14:09 +01:00
Nick Craig-Wood
5d8f1d4b88 crypt: implement ListP interface 2025-04-08 15:14:09 +01:00
Nick Craig-Wood
b1d774c2e3 combine: implement ListP interface 2025-04-08 15:14:09 +01:00
Nick Craig-Wood
fad579c4a2 s3: Implement paged listing interface ListP 2025-04-08 15:14:09 +01:00
Nick Craig-Wood
37120ef7bd list: add WithListP helper to implement List for ListP backends 2025-04-08 15:14:09 +01:00
Nick Craig-Wood
cba653d502 walk: move NewListRHelper into list.Helper to avoid circular dependency
It turns out that the list helpers were at the wrong level and needed
to be pushed down into the fs/list for future work.
2025-04-08 15:14:00 +01:00
Nick Craig-Wood
2a90de9502 fs: define ListP interface for paged listing #4788 2025-04-08 15:12:53 +01:00
Nick Craig-Wood
bff229713a accounting: Add listed stat for number of directory entries listed 2025-04-08 15:12:53 +01:00
Nick Craig-Wood
117f583ebe walk: factor Listing helpers into their own file and add tests 2025-04-08 15:12:53 +01:00
Nick Craig-Wood
205667143c serve nfs: make metadata files have special file handles
Metadata files have the file handle of their source file with
0x00000001 suffixed in big endian so we can look them up directly from
their file handles.
2025-04-07 13:41:29 +01:00
Nick Craig-Wood
fe84cbdc9d serve nfs: change the format of --nfs-cache-type symlink file handles
This is an backwards incompatible change which will invalidate the
current handles.

This change adds a 4 byte big endian length prefix to the handles so
we can in future suffix extra info on the handles. This needed to be 4
bytes as Linux does not like File handles which aren't multiples of 4
bytes long.
2025-04-07 13:41:29 +01:00
Nick Craig-Wood
533c6438f3 vfs: add --vfs-metadata-extension to expose metadata sidecar files
This adds --vfs-metadata-extension which can be used to expose sidecar
files with file metadata in. These files don't exist in the listings
until they are accessed.
2025-04-07 13:41:29 +01:00
Nick Craig-Wood
b587b094c9 docs: Add rcloneui.com as Silver Sponsor 2025-04-07 13:41:29 +01:00
Nick Craig-Wood
525798e1a5 Add Klaas Freitag to contributors 2025-04-07 13:41:29 +01:00
Nick Craig-Wood
ea63052d36 Add eccoisle to contributors 2025-04-07 13:41:29 +01:00
Nick Craig-Wood
b5a99c5011 Add Fernando Fernández to contributors 2025-04-07 13:41:29 +01:00
Nick Craig-Wood
56b7015675 Add alingse to contributors 2025-04-07 13:41:29 +01:00
Nick Craig-Wood
4ff970ebab Add Jörn Friedrich Dreyer to contributors 2025-04-07 13:41:29 +01:00
eccoisle
dccb5144c3 docs: replace option --auto-filename-header with --header-filename 2025-04-06 14:28:34 +02:00
dependabot[bot]
33b087171a build: update github.com/golang-jwt/jwt/v5 from 5.2.1 to 5.2.2 to fix CVE-2025-30204
Bumps [github.com/golang-jwt/jwt/v5](https://github.com/golang-jwt/jwt) from 5.2.1 to 5.2.2.
- [Release notes](https://github.com/golang-jwt/jwt/releases)
- [Changelog](https://github.com/golang-jwt/jwt/blob/main/VERSION_HISTORY.md)
- [Commits](https://github.com/golang-jwt/jwt/compare/v5.2.1...v5.2.2)

See: https://github.com/golang-jwt/jwt/security/advisories/GHSA-mh63-6h87-95cp
See: https://www.cve.org/CVERecord?id=CVE-2025-30204

Signed-off-by: dependabot[bot] <support@github.com>
2025-04-06 11:46:13 +01:00
Fernando Fernández
58d9ae1c60 docs/googlephotos: fix typos 2025-04-06 10:49:02 +02:00
dependabot[bot]
20302ab6b9 build: bump github.com/golang-jwt/jwt/v4 from 4.5.1 to 4.5.2
Bumps [github.com/golang-jwt/jwt/v4](https://github.com/golang-jwt/jwt) from 4.5.1 to 4.5.2.
- [Release notes](https://github.com/golang-jwt/jwt/releases)
- [Changelog](https://github.com/golang-jwt/jwt/blob/main/VERSION_HISTORY.md)
- [Commits](https://github.com/golang-jwt/jwt/compare/v4.5.1...v4.5.2)

---
updated-dependencies:
- dependency-name: github.com/golang-jwt/jwt/v4
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-04-04 17:26:44 +02:00
alingse
6fb0de62a4 operations: fix call fmt.Errorf with wrong err 2025-04-04 16:21:45 +02:00
Jörn Friedrich Dreyer
839eef0db2 webdav: retry propfind on 425 status
This retries propfind on 425 status

In ownCloud Infinite Scale, files might be in that state if
postprocessing is still ongoing. All metadata are available anyway

Allow item status 425 "too early" for items when changing metadata

Fixes the upload behavior with ownCloud Infinite Scale

Signed-off-by: Jörn Friedrich Dreyer <jfd@butonic.de>
Co-authored-by: Klaas Freitag <kraft@freisturz.de>
2025-03-26 12:51:04 +00:00
Nick Craig-Wood
267eebe5c9 Add --max-connections to control maximum backend concurrency 2025-03-25 15:49:27 +00:00
Nick Craig-Wood
755d72a591 rc: fix debug/* commands not being available over unix sockets
This was caused by an incorrect handler URL which was passing the
debug/* commands to the debug/pprof handler by accident. This only
happened when using unix sockets.
2025-03-25 15:30:49 +00:00
Dan McArdle
4d38424e6c cmd/gitannex: Prevent tests from hanging when assertion fails
This fixes another way that the gitannex tests can hang.

The issue is that our test harness explicitly called `wg.Done()` at the
end of each test case, but when assertions checked with [require] fail,
they halt test execution and prevent `wg.Done()` from happening.

A second issue is that we were incorrectly calling [require] functions
in the goroutine that runs the gitannex server. I found that [require]
calls [testing.T.FailNow] under the hood, which says "FailNow must be
called from the goroutine running the test or benchmark function, not
from other goroutines created during the test." [1]

This commit fixes both issues by replacing the explicit synchronization
with a `chan error`. This enables us to run the gitannex server in a
goroutine, interact with the server in the test's goroutine, and then at
then end use [require] on the test-associated goroutine to ensure the
server's error/nil value matches expectations.

[1]: https://pkg.go.dev/testing#T.FailNow
2025-03-18 12:38:04 +00:00
Dan McArdle
53624222c9 cmd/gitannex: Add explicit timeout for mock stdout reads in tests
It seems like (*testState).readLine() hangs indefinitely when it's
waiting for a line that will never be written [1].

This commit adds an explicit 30-second timeout when reading from the
internal mock stdout. Given that we integrate with fstest, this timeout
needs to be sufficiently long that it accommodates slow-but-successful
operations on real remotes.

[1]: https://github.com/rclone/rclone/pull/8423#issuecomment-2701601290
2025-03-18 12:38:04 +00:00
nielash
44e83d77d7 http: correct root if definitely pointing to a file - fixes #8428
This was formalized in
c69eb84573
But it appears that we forgot to update `http`, and the `FsRoot` test didn't
catch it because we don't currently have an http integration test.
2025-03-17 18:05:23 +00:00
Nick Craig-Wood
19aa366d88 pool: add --max-buffer-memory to limit total buffer memory usage 2025-03-17 18:01:15 +00:00
Nick Craig-Wood
3fb4164d87 filter: Add --hash-filter to deterministically select a subset of files
Fixes #8400
2025-03-17 17:25:59 +00:00
dependabot[bot]
4e2b78f65d build: update golang.org/x/net to 0.36.0. to fix CVE-2025-22869
SSH servers which implement file transfer protocols are vulnerable to
a denial of service attack from clients which complete the key
exchange slowly, or not at all, causing pending content to be read
into memory, but never transmitted.

This updates golang.org/x/net to fix the problem.

See: https://pkg.go.dev/vuln/GO-2025-3487
See: https://www.cve.org/CVERecord?id=CVE-2025-22869
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-03-17 17:25:12 +00:00
Nick Craig-Wood
e47f59e1f9 rc: add add short parameter to core/stats to not return transferring and checking 2025-03-17 13:44:37 +00:00
Nick Craig-Wood
63c4fef27a fs: fix corruption of SizeSuffix with "B" suffix in config (eg --min-size)
Before this change, the config system round tripped fs.SizeSuffix
values through strings like this, corrupting them in the process.

    "2B" -> 2 -> "2" -> 2048

This caused `--min-size 2B` to be interpreted as `--min-size 2k`.

This fix makes sure SizeSuffix values have a "B" suffix when turned
into a string where necessary, so it becomes

    "2B" -> 2 -> "2B" -> 2

In rclone v2 we should probably declare unsuffixed SizeSuffix values
are in bytes not kBytes (done for rsync compatibility) but this would
be a backwards incompatible change which we don't want for v1.

Fixes #8437
Fixes #8212
Fixes #5169
2025-03-13 09:56:20 +00:00
Nick Craig-Wood
a7a7c1d592 filters: show --min-size and --max-size in --dump filters 2025-03-12 12:32:21 +00:00
Nick Craig-Wood
6a7e68aaf2 build: check docs for edits of autogenerated sections
This adds a lint step which checks the top commit for edits to
autogenerated doc sections.
2025-03-10 22:07:19 +00:00
Nick Craig-Wood
6e7a3795f1 Add jack to contributors 2025-03-10 22:07:19 +00:00
jack
177337686a docs: fix incorrect mentions of vfs-cache-min-free-size 2025-03-09 01:23:42 +01:00
278 changed files with 27591 additions and 4521 deletions

View File

@@ -226,6 +226,8 @@ jobs:
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Install Go
id: setup-go
@@ -289,6 +291,10 @@ jobs:
- name: Scan for vulnerabilities
run: govulncheck ./...
- name: Scan edits of autogenerated files
run: bin/check_autogenerated_edits.py
if: github.event_name == 'pull_request'
android:
if: inputs.manual || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name))
timeout-minutes: 30

View File

@@ -572,3 +572,19 @@ Then, run `go build -buildmode=plugin -o PLUGIN_NAME.so .` to build the plugin.
[Go reference](https://godoc.org/github.com/rclone/rclone/lib/plugin)
[Minimal example](https://gist.github.com/terorie/21b517ee347828e899e1913efc1d684f)
## Keeping a backend or command out of tree
Rclone was designed to be modular so it is very easy to keep a backend
or a command out of the main rclone source tree.
So for example if you had a backend which accessed your proprietary
systems or a command which was specialised for your needs you could
add them out of tree.
This may be easier than using a plugin and is supported on all
platforms not just macOS and Linux.
This is explained further in https://github.com/rclone/rclone_out_of_tree_example
which has an example of an out of tree backend `ram` (which is a
renamed version of the `memory` backend).

3931
MANUAL.html generated

File diff suppressed because it is too large Load Diff

3115
MANUAL.md generated

File diff suppressed because it is too large Load Diff

3234
MANUAL.txt generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,20 +1,4 @@
<div align="center">
<sup>Special thanks to our sponsor:</sup>
<br>
<br>
<a href="https://www.warp.dev/?utm_source=github&utm_medium=referral&utm_campaign=rclone_20231103">
<div>
<img src="https://rclone.org/img/logos/warp-github.svg" width="300" alt="Warp">
</div>
<b>Warp is a modern, Rust-based terminal with AI built in so you and your team can build great software, faster.</b>
<div>
<sup>Visit warp.dev to learn more.</sup>
</div>
</a>
<br>
<hr>
</div>
<br>
[<img src="https://rclone.org/img/logo_on_light__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/#gh-light-mode-only)
[<img src="https://rclone.org/img/logo_on_dark__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/#gh-dark-mode-only)
@@ -56,6 +40,7 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
* Enterprise File Fabric [:page_facing_up:](https://rclone.org/filefabric/)
* Fastmail Files [:page_facing_up:](https://rclone.org/webdav/#fastmail-files)
* Files.com [:page_facing_up:](https://rclone.org/filescom/)
* FlashBlade [:page_facing_up:](https://rclone.org/s3/#pure-storage-flashblade)
* FTP [:page_facing_up:](https://rclone.org/ftp/)
* GoFile [:page_facing_up:](https://rclone.org/gofile/)
* Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/)
@@ -80,7 +65,8 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
* Magalu Object Storage [:page_facing_up:](https://rclone.org/s3/#magalu)
* Mail.ru Cloud [:page_facing_up:](https://rclone.org/mailru/)
* Memset Memstore [:page_facing_up:](https://rclone.org/swift/)
* Mega [:page_facing_up:](https://rclone.org/mega/)
* MEGA [:page_facing_up:](https://rclone.org/mega/)
* MEGA S4 Object Storage [:page_facing_up:](https://rclone.org/s3/#mega)
* Memory [:page_facing_up:](https://rclone.org/memory/)
* Microsoft Azure Blob Storage [:page_facing_up:](https://rclone.org/azureblob/)
* Microsoft Azure Files Storage [:page_facing_up:](https://rclone.org/azurefiles/)
@@ -110,6 +96,7 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
* rsync.net [:page_facing_up:](https://rclone.org/sftp/#rsync-net)
* Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)
* Seafile [:page_facing_up:](https://rclone.org/seafile/)
* Seagate Lyve Cloud [:page_facing_up:](https://rclone.org/s3/#lyve)
* SeaweedFS [:page_facing_up:](https://rclone.org/s3/#seaweedfs)
* Selectel Object Storage [:page_facing_up:](https://rclone.org/s3/#selectel)
* SFTP [:page_facing_up:](https://rclone.org/sftp/)

View File

@@ -14,10 +14,12 @@ import (
_ "github.com/rclone/rclone/backend/combine"
_ "github.com/rclone/rclone/backend/compress"
_ "github.com/rclone/rclone/backend/crypt"
_ "github.com/rclone/rclone/backend/doi"
_ "github.com/rclone/rclone/backend/drive"
_ "github.com/rclone/rclone/backend/dropbox"
_ "github.com/rclone/rclone/backend/fichier"
_ "github.com/rclone/rclone/backend/filefabric"
_ "github.com/rclone/rclone/backend/filelu"
_ "github.com/rclone/rclone/backend/filescom"
_ "github.com/rclone/rclone/backend/ftp"
_ "github.com/rclone/rclone/backend/gofile"

View File

@@ -44,7 +44,7 @@ import (
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/fs/list"
"github.com/rclone/rclone/lib/atexit"
"github.com/rclone/rclone/lib/bucket"
"github.com/rclone/rclone/lib/encoder"
@@ -612,6 +612,9 @@ func parsePath(path string) (root string) {
// relative to f.root
func (f *Fs) split(rootRelativePath string) (containerName, containerPath string) {
containerName, containerPath = bucket.Split(bucket.Join(f.root, rootRelativePath))
if f.opt.DirectoryMarkers && strings.HasSuffix(containerPath, "//") {
containerPath = containerPath[:len(containerPath)-1]
}
return f.opt.Enc.FromStandardName(containerName), f.opt.Enc.FromStandardPath(containerPath)
}
@@ -928,6 +931,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
}
case opt.ClientID != "" && opt.Tenant != "" && opt.Username != "" && opt.Password != "":
// User with username and password
//nolint:staticcheck // this is deprecated due to Azure policy
options := azidentity.UsernamePasswordCredentialOptions{
ClientOptions: policyClientOptions,
}
@@ -1213,7 +1217,7 @@ func (f *Fs) list(ctx context.Context, containerName, directory, prefix string,
continue
}
// process directory markers as directories
remote = strings.TrimRight(remote, "/")
remote, _ = strings.CutSuffix(remote, "/")
}
remote = remote[len(prefix):]
if addContainer {
@@ -1378,7 +1382,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// of listing recursively that doing a directory traversal.
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
containerName, directory := f.split(dir)
list := walk.NewListRHelper(callback)
list := list.NewHelper(callback)
listR := func(containerName, directory, prefix string, addContainer bool) error {
return f.list(ctx, containerName, directory, prefix, addContainer, true, int32(f.opt.ListChunkSize), func(remote string, object *container.BlobItem, isDirectory bool) error {
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory)
@@ -1534,7 +1538,7 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
// mkdirParent creates the parent bucket/directory if it doesn't exist
func (f *Fs) mkdirParent(ctx context.Context, remote string) error {
remote = strings.TrimRight(remote, "/")
remote, _ = strings.CutSuffix(remote, "/")
dir := path.Dir(remote)
if dir == "/" || dir == "." {
dir = ""
@@ -1768,7 +1772,7 @@ func (f *Fs) copyMultipart(ctx context.Context, remote, dstContainer, dstPath st
var (
srcSize = src.size
partSize = int64(chunksize.Calculator(o, src.size, blockblob.MaxBlocks, f.opt.ChunkSize))
numParts = (srcSize-1)/partSize + 1
numParts = (srcSize + partSize - 1) / partSize
blockIDs = make([]string, numParts) // list of blocks for finalize
g, gCtx = errgroup.WithContext(ctx)
checker = newCheckForInvalidBlockOrBlob("copy", o)
@@ -2176,11 +2180,6 @@ func (o *Object) getTags() (tags map[string]string) {
// getBlobSVC creates a blob client
func (o *Object) getBlobSVC() *blob.Client {
container, directory := o.split()
// If we are trying to remove an all / directory marker then
// this will have one / too many now.
if bucket.IsAllSlashes(o.remote) {
directory = strings.TrimSuffix(directory, "/")
}
return o.fs.getBlobSVC(container, directory)
}
@@ -2863,6 +2862,9 @@ func (o *Object) prepareUpload(ctx context.Context, src fs.ObjectInfo, options [
return ui, err
}
}
// if ui.isDirMarker && strings.HasSuffix(containerPath, "//") {
// containerPath = containerPath[:len(containerPath)-1]
// }
// Update Mod time
o.updateMetadataWithModTime(src.ModTime(ctx))

View File

@@ -516,6 +516,7 @@ func newFsFromOptions(ctx context.Context, name, root string, opt *Options) (fs.
}
case opt.ClientID != "" && opt.Tenant != "" && opt.Username != "" && opt.Password != "":
// User with username and password
//nolint:staticcheck // this is deprecated due to Azure policy
options := azidentity.UsernamePasswordCredentialOptions{
ClientOptions: policyClientOptions,
}

View File

@@ -31,8 +31,8 @@ import (
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/list"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/bucket"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/multipart"
@@ -918,7 +918,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// of listing recursively that doing a directory traversal.
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
bucket, directory := f.split(dir)
list := walk.NewListRHelper(callback)
list := list.NewHelper(callback)
listR := func(bucket, directory, prefix string, addBucket bool) error {
last := ""
return f.list(ctx, bucket, directory, prefix, addBucket, true, 0, f.opt.Versions, false, func(remote string, object *api.File, isDirectory bool) error {
@@ -1883,9 +1883,14 @@ func (o *Object) getOrHead(ctx context.Context, method string, options []fs.Open
// --b2-download-url cloudflare strips the Content-Length
// headers (presumably so it can inject stuff) so use the old
// length read from the listing.
// Additionally, the official examples return S3 headers
// instead of native, i.e. no file ID, use ones from listing.
if info.Size < 0 {
info.Size = o.size
}
if info.ID == "" {
info.ID = o.id
}
return resp, info, nil
}

View File

@@ -29,6 +29,7 @@ import (
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fspath"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/list"
"github.com/rclone/rclone/fs/rc"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/atexit"
@@ -1086,7 +1087,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
return cachedEntries, nil
}
func (f *Fs) recurse(ctx context.Context, dir string, list *walk.ListRHelper) error {
func (f *Fs) recurse(ctx context.Context, dir string, list *list.Helper) error {
entries, err := f.List(ctx, dir)
if err != nil {
return err
@@ -1138,7 +1139,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
}
// if we're here, we're gonna do a standard recursive traversal and cache everything
list := walk.NewListRHelper(callback)
list := list.NewHelper(callback)
err = f.recurse(ctx, dir, list)
if err != nil {
return err

View File

@@ -17,7 +17,7 @@ func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestCache:",
NilObject: (*cache.Object)(nil),
UnimplementableFsMethods: []string{"PublicLink", "OpenWriterAt", "OpenChunkWriter", "DirSetModTime", "MkdirMetadata"},
UnimplementableFsMethods: []string{"PublicLink", "OpenWriterAt", "OpenChunkWriter", "DirSetModTime", "MkdirMetadata", "ListP"},
UnimplementableObjectMethods: []string{"MimeType", "ID", "GetTier", "SetTier", "Metadata", "SetMetadata"},
UnimplementableDirectoryMethods: []string{"Metadata", "SetMetadata", "SetModTime"},
SkipInvalidUTF8: true, // invalid UTF-8 confuses the cache

View File

@@ -356,7 +356,8 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
DirModTimeUpdatesOnWrite: true,
}).Fill(ctx, f).Mask(ctx, baseFs).WrapsFs(f, baseFs)
f.features.Disable("ListR") // Recursive listing may cause chunker skip files
f.features.ListR = nil // Recursive listing may cause chunker skip files
f.features.ListP = nil // ListP not supported yet
return f, err
}
@@ -1860,6 +1861,8 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
// baseMove chains to the wrapped Move or simulates it by Copy+Delete
func (f *Fs) baseMove(ctx context.Context, src fs.Object, remote string, delMode int) (fs.Object, error) {
ctx, ci := fs.AddConfig(ctx)
ci.NameTransform = nil // ensure operations.Move does not double-transform here
var (
dest fs.Object
err error

View File

@@ -46,6 +46,7 @@ func TestIntegration(t *testing.T) {
"DirCacheFlush",
"UserInfo",
"Disconnect",
"ListP",
},
}
if *fstest.RemoteName == "" {

View File

@@ -18,7 +18,7 @@ type CloudinaryEncoder interface {
ToStandardPath(string) string
// ToStandardName takes name in this encoding and converts
// it in Standard encoding.
ToStandardName(string) string
ToStandardName(string, string) string
// Encoded root of the remote (as passed into NewFs)
FromStandardFullPath(string) string
}

View File

@@ -8,7 +8,9 @@ import (
"fmt"
"io"
"net/http"
"net/url"
"path"
"slices"
"strconv"
"strings"
"time"
@@ -103,19 +105,39 @@ func init() {
Advanced: true,
Help: "Wait N seconds for eventual consistency of the databases that support the backend operation",
},
{
Name: "adjust_media_files_extensions",
Default: true,
Advanced: true,
Help: "Cloudinary handles media formats as a file attribute and strips it from the name, which is unlike most other file systems",
},
{
Name: "media_extensions",
Default: []string{
"3ds", "3g2", "3gp", "ai", "arw", "avi", "avif", "bmp", "bw",
"cr2", "cr3", "djvu", "dng", "eps3", "fbx", "flif", "flv", "gif",
"glb", "gltf", "hdp", "heic", "heif", "ico", "indd", "jp2", "jpe",
"jpeg", "jpg", "jxl", "jxr", "m2ts", "mov", "mp4", "mpeg", "mts",
"mxf", "obj", "ogv", "pdf", "ply", "png", "psd", "svg", "tga",
"tif", "tiff", "ts", "u3ma", "usdz", "wdp", "webm", "webp", "wmv"},
Advanced: true,
Help: "Cloudinary supported media extensions",
},
},
})
}
// Options defines the configuration for this backend
type Options struct {
CloudName string `config:"cloud_name"`
APIKey string `config:"api_key"`
APISecret string `config:"api_secret"`
UploadPrefix string `config:"upload_prefix"`
UploadPreset string `config:"upload_preset"`
Enc encoder.MultiEncoder `config:"encoding"`
EventuallyConsistentDelay fs.Duration `config:"eventually_consistent_delay"`
CloudName string `config:"cloud_name"`
APIKey string `config:"api_key"`
APISecret string `config:"api_secret"`
UploadPrefix string `config:"upload_prefix"`
UploadPreset string `config:"upload_preset"`
Enc encoder.MultiEncoder `config:"encoding"`
EventuallyConsistentDelay fs.Duration `config:"eventually_consistent_delay"`
MediaExtensions []string `config:"media_extensions"`
AdjustMediaFilesExtensions bool `config:"adjust_media_files_extensions"`
}
// Fs represents a remote cloudinary server
@@ -203,6 +225,18 @@ func (f *Fs) FromStandardPath(s string) string {
// FromStandardName implementation of the api.CloudinaryEncoder
func (f *Fs) FromStandardName(s string) string {
if f.opt.AdjustMediaFilesExtensions {
parsedURL, err := url.Parse(s)
ext := ""
if err != nil {
fs.Logf(nil, "Error parsing URL: %v", err)
} else {
ext = path.Ext(parsedURL.Path)
if slices.Contains(f.opt.MediaExtensions, strings.ToLower(strings.TrimPrefix(ext, "."))) {
s = strings.TrimSuffix(parsedURL.Path, ext)
}
}
}
return strings.ReplaceAll(f.opt.Enc.FromStandardName(s), "&", "\uFF06")
}
@@ -212,8 +246,20 @@ func (f *Fs) ToStandardPath(s string) string {
}
// ToStandardName implementation of the api.CloudinaryEncoder
func (f *Fs) ToStandardName(s string) string {
return strings.ReplaceAll(f.opt.Enc.ToStandardName(s), "\uFF06", "&")
func (f *Fs) ToStandardName(s string, assetURL string) string {
ext := ""
if f.opt.AdjustMediaFilesExtensions {
parsedURL, err := url.Parse(assetURL)
if err != nil {
fs.Logf(nil, "Error parsing URL: %v", err)
} else {
ext = path.Ext(parsedURL.Path)
if !slices.Contains(f.opt.MediaExtensions, strings.ToLower(strings.TrimPrefix(ext, "."))) {
ext = ""
}
}
}
return strings.ReplaceAll(f.opt.Enc.ToStandardName(s), "\uFF06", "&") + ext
}
// FromStandardFullPath encodes a full path to Cloudinary standard
@@ -331,10 +377,7 @@ func (f *Fs) List(ctx context.Context, dir string) (fs.DirEntries, error) {
}
for _, asset := range results.Assets {
remote := api.CloudinaryEncoder.ToStandardName(f, asset.DisplayName)
if dir != "" {
remote = path.Join(dir, api.CloudinaryEncoder.ToStandardName(f, asset.DisplayName))
}
remote := path.Join(dir, api.CloudinaryEncoder.ToStandardName(f, asset.DisplayName, asset.SecureURL))
o := &Object{
fs: f,
remote: remote,

View File

@@ -20,6 +20,7 @@ import (
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/list"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fs/walk"
"golang.org/x/sync/errgroup"
@@ -265,6 +266,9 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs
}
}
// Enable ListP always
features.ListP = f.ListP
// Enable Purge when any upstreams support it
if features.Purge == nil {
for _, u := range f.upstreams {
@@ -809,24 +813,52 @@ func (u *upstream) wrapEntries(ctx context.Context, entries fs.DirEntries) (fs.D
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
return list.WithListP(ctx, dir, f)
}
// ListP lists the objects and directories of the Fs starting
// from dir non recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
// defer log.Trace(f, "dir=%q", dir)("entries = %v, err=%v", &entries, &err)
if f.root == "" && dir == "" {
entries = make(fs.DirEntries, 0, len(f.upstreams))
entries := make(fs.DirEntries, 0, len(f.upstreams))
for combineDir := range f.upstreams {
d := fs.NewLimitedDirWrapper(combineDir, fs.NewDir(combineDir, f.when))
entries = append(entries, d)
}
return entries, nil
return callback(entries)
}
u, uRemote, err := f.findUpstream(dir)
if err != nil {
return nil, err
return err
}
entries, err = u.f.List(ctx, uRemote)
if err != nil {
return nil, err
wrappedCallback := func(entries fs.DirEntries) error {
entries, err := u.wrapEntries(ctx, entries)
if err != nil {
return err
}
return callback(entries)
}
return u.wrapEntries(ctx, entries)
listP := u.f.Features().ListP
if listP == nil {
entries, err := u.f.List(ctx, uRemote)
if err != nil {
return err
}
return wrappedCallback(entries)
}
return listP(ctx, dir, wrappedCallback)
}
// ListR lists the objects and directories of the Fs starting

View File

@@ -29,6 +29,7 @@ import (
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fspath"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/list"
"github.com/rclone/rclone/fs/log"
"github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/fs/operations"
@@ -208,6 +209,8 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
if !operations.CanServerSideMove(wrappedFs) {
f.features.Disable("PutStream")
}
// Enable ListP always
f.features.ListP = f.ListP
return f, err
}
@@ -352,11 +355,39 @@ func (f *Fs) processEntries(entries fs.DirEntries) (newEntries fs.DirEntries, er
// found.
// List entries and process them
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
entries, err = f.Fs.List(ctx, dir)
if err != nil {
return nil, err
return list.WithListP(ctx, dir, f)
}
// ListP lists the objects and directories of the Fs starting
// from dir non recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
wrappedCallback := func(entries fs.DirEntries) error {
entries, err := f.processEntries(entries)
if err != nil {
return err
}
return callback(entries)
}
return f.processEntries(entries)
listP := f.Fs.Features().ListP
if listP == nil {
entries, err := f.Fs.List(ctx, dir)
if err != nil {
return err
}
return wrappedCallback(entries)
}
return listP(ctx, dir, wrappedCallback)
}
// ListR lists the objects and directories of the Fs starting

View File

@@ -18,6 +18,7 @@ import (
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fspath"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/list"
)
// Globals
@@ -293,6 +294,9 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
PartialUploads: true,
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
// Enable ListP always
f.features.ListP = f.ListP
return f, err
}
@@ -416,11 +420,40 @@ func (f *Fs) encryptEntries(ctx context.Context, entries fs.DirEntries) (newEntr
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
entries, err = f.Fs.List(ctx, f.cipher.EncryptDirName(dir))
if err != nil {
return nil, err
return list.WithListP(ctx, dir, f)
}
// ListP lists the objects and directories of the Fs starting
// from dir non recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
wrappedCallback := func(entries fs.DirEntries) error {
entries, err := f.encryptEntries(ctx, entries)
if err != nil {
return err
}
return callback(entries)
}
return f.encryptEntries(ctx, entries)
listP := f.Fs.Features().ListP
encryptedDir := f.cipher.EncryptDirName(dir)
if listP == nil {
entries, err := f.Fs.List(ctx, encryptedDir)
if err != nil {
return err
}
return wrappedCallback(entries)
}
return listP(ctx, encryptedDir, wrappedCallback)
}
// ListR lists the objects and directories of the Fs starting

View File

@@ -0,0 +1,38 @@
// Type definitions specific to Dataverse
package api
// DataverseDatasetResponse is returned by the Dataverse dataset API
type DataverseDatasetResponse struct {
Status string `json:"status"`
Data DataverseDataset `json:"data"`
}
// DataverseDataset is the representation of a dataset
type DataverseDataset struct {
LatestVersion DataverseDatasetVersion `json:"latestVersion"`
}
// DataverseDatasetVersion is the representation of a dataset version
type DataverseDatasetVersion struct {
LastUpdateTime string `json:"lastUpdateTime"`
Files []DataverseFile `json:"files"`
}
// DataverseFile is the representation of a file found in a dataset
type DataverseFile struct {
DirectoryLabel string `json:"directoryLabel"`
DataFile DataverseDataFile `json:"dataFile"`
}
// DataverseDataFile represents file metadata details
type DataverseDataFile struct {
ID int64 `json:"id"`
Filename string `json:"filename"`
ContentType string `json:"contentType"`
FileSize int64 `json:"filesize"`
OriginalFileFormat string `json:"originalFileFormat"`
OriginalFileSize int64 `json:"originalFileSize"`
OriginalFileName string `json:"originalFileName"`
MD5 string `json:"md5"`
}

View File

@@ -0,0 +1,33 @@
// Type definitions specific to InvenioRDM
package api
// InvenioRecordResponse is the representation of a record stored in InvenioRDM
type InvenioRecordResponse struct {
Links InvenioRecordResponseLinks `json:"links"`
}
// InvenioRecordResponseLinks represents a record's links
type InvenioRecordResponseLinks struct {
Self string `json:"self"`
}
// InvenioFilesResponse is the representation of a record's files
type InvenioFilesResponse struct {
Entries []InvenioFilesResponseEntry `json:"entries"`
}
// InvenioFilesResponseEntry is the representation of a file entry
type InvenioFilesResponseEntry struct {
Key string `json:"key"`
Checksum string `json:"checksum"`
Size int64 `json:"size"`
Updated string `json:"updated"`
MimeType string `json:"mimetype"`
Links InvenioFilesResponseEntryLinks `json:"links"`
}
// InvenioFilesResponseEntryLinks represents file links details
type InvenioFilesResponseEntryLinks struct {
Content string `json:"content"`
}

26
backend/doi/api/types.go Normal file
View File

@@ -0,0 +1,26 @@
// Package api has general type definitions for doi
package api
// DoiResolverResponse is returned by the DOI resolver API
//
// Reference: https://www.doi.org/the-identifier/resources/factsheets/doi-resolution-documentation
type DoiResolverResponse struct {
ResponseCode int `json:"responseCode"`
Handle string `json:"handle"`
Values []DoiResolverResponseValue `json:"values"`
}
// DoiResolverResponseValue is a single handle record value
type DoiResolverResponseValue struct {
Index int `json:"index"`
Type string `json:"type"`
Data DoiResolverResponseValueData `json:"data"`
TTL int `json:"ttl"`
Timestamp string `json:"timestamp"`
}
// DoiResolverResponseValueData is the data held in a handle value
type DoiResolverResponseValueData struct {
Format string `json:"format"`
Value any `json:"value"`
}

112
backend/doi/dataverse.go Normal file
View File

@@ -0,0 +1,112 @@
// Implementation for Dataverse
package doi
import (
"context"
"fmt"
"net/http"
"net/url"
"path"
"strings"
"time"
"github.com/rclone/rclone/backend/doi/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/lib/rest"
)
// Returns true if resolvedURL is likely a DOI hosted on a Dataverse intallation
func activateDataverse(resolvedURL *url.URL) (isActive bool) {
queryValues := resolvedURL.Query()
persistentID := queryValues.Get("persistentId")
return persistentID != ""
}
// Resolve the main API endpoint for a DOI hosted on a Dataverse installation
func resolveDataverseEndpoint(resolvedURL *url.URL) (provider Provider, endpoint *url.URL, err error) {
queryValues := resolvedURL.Query()
persistentID := queryValues.Get("persistentId")
query := url.Values{}
query.Add("persistentId", persistentID)
endpointURL := resolvedURL.ResolveReference(&url.URL{Path: "/api/datasets/:persistentId/", RawQuery: query.Encode()})
return Dataverse, endpointURL, nil
}
// dataverseProvider implements the doiProvider interface for Dataverse installations
type dataverseProvider struct {
f *Fs
}
// ListEntries returns the full list of entries found at the remote, regardless of root
func (dp *dataverseProvider) ListEntries(ctx context.Context) (entries []*Object, err error) {
// Use the cache if populated
cachedEntries, found := dp.f.cache.GetMaybe("files")
if found {
parsedEntries, ok := cachedEntries.([]Object)
if ok {
for _, entry := range parsedEntries {
newEntry := entry
entries = append(entries, &newEntry)
}
return entries, nil
}
}
filesURL := dp.f.endpoint
var res *http.Response
var result api.DataverseDatasetResponse
opts := rest.Opts{
Method: "GET",
Path: strings.TrimLeft(filesURL.EscapedPath(), "/"),
Parameters: filesURL.Query(),
}
err = dp.f.pacer.Call(func() (bool, error) {
res, err = dp.f.srv.CallJSON(ctx, &opts, nil, &result)
return shouldRetry(ctx, res, err)
})
if err != nil {
return nil, fmt.Errorf("readDir failed: %w", err)
}
modTime, modTimeErr := time.Parse(time.RFC3339, result.Data.LatestVersion.LastUpdateTime)
if modTimeErr != nil {
fs.Logf(dp.f, "error: could not parse last update time %v", modTimeErr)
modTime = timeUnset
}
for _, file := range result.Data.LatestVersion.Files {
contentURLPath := fmt.Sprintf("/api/access/datafile/%d", file.DataFile.ID)
query := url.Values{}
query.Add("format", "original")
contentURL := dp.f.endpoint.ResolveReference(&url.URL{Path: contentURLPath, RawQuery: query.Encode()})
entry := &Object{
fs: dp.f,
remote: path.Join(file.DirectoryLabel, file.DataFile.Filename),
contentURL: contentURL.String(),
size: file.DataFile.FileSize,
modTime: modTime,
md5: file.DataFile.MD5,
contentType: file.DataFile.ContentType,
}
if file.DataFile.OriginalFileName != "" {
entry.remote = path.Join(file.DirectoryLabel, file.DataFile.OriginalFileName)
entry.size = file.DataFile.OriginalFileSize
entry.contentType = file.DataFile.OriginalFileFormat
}
entries = append(entries, entry)
}
// Populate the cache
cacheEntries := []Object{}
for _, entry := range entries {
cacheEntries = append(cacheEntries, *entry)
}
dp.f.cache.Put("files", cacheEntries)
return entries, nil
}
func newDataverseProvider(f *Fs) doiProvider {
return &dataverseProvider{
f: f,
}
}

649
backend/doi/doi.go Normal file
View File

@@ -0,0 +1,649 @@
// Package doi provides a filesystem interface for digital objects identified by DOIs.
//
// See: https://www.doi.org/the-identifier/what-is-a-doi/
package doi
import (
"context"
"errors"
"fmt"
"io"
"net/http"
"net/url"
"path"
"strings"
"time"
"github.com/rclone/rclone/backend/doi/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/cache"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/rest"
)
const (
// the URL of the DOI resolver
//
// Reference: https://www.doi.org/the-identifier/resources/factsheets/doi-resolution-documentation
doiResolverAPIURL = "https://doi.org/api"
minSleep = 10 * time.Millisecond
maxSleep = 2 * time.Second
decayConstant = 2 // bigger for slower decay, exponential
)
var (
errorReadOnly = errors.New("doi remotes are read only")
timeUnset = time.Unix(0, 0)
)
func init() {
fsi := &fs.RegInfo{
Name: "doi",
Description: "DOI datasets",
NewFs: NewFs,
CommandHelp: commandHelp,
Options: []fs.Option{{
Name: "doi",
Help: "The DOI or the doi.org URL.",
Required: true,
}, {
Name: fs.ConfigProvider,
Help: `DOI provider.
The DOI provider can be set when rclone does not automatically recognize a supported DOI provider.`,
Examples: []fs.OptionExample{
{
Value: "auto",
Help: "Auto-detect provider",
},
{
Value: string(Zenodo),
Help: "Zenodo",
}, {
Value: string(Dataverse),
Help: "Dataverse",
}, {
Value: string(Invenio),
Help: "Invenio",
}},
Required: false,
Advanced: true,
}, {
Name: "doi_resolver_api_url",
Help: `The URL of the DOI resolver API to use.
The DOI resolver can be set for testing or for cases when the the canonical DOI resolver API cannot be used.
Defaults to "https://doi.org/api".`,
Required: false,
Advanced: true,
}},
}
fs.Register(fsi)
}
// Provider defines the type of provider hosting the DOI
type Provider string
const (
// Zenodo provider, see https://zenodo.org
Zenodo Provider = "zenodo"
// Dataverse provider, see https://dataverse.harvard.edu
Dataverse Provider = "dataverse"
// Invenio provider, see https://inveniordm.docs.cern.ch
Invenio Provider = "invenio"
)
// Options defines the configuration for this backend
type Options struct {
Doi string `config:"doi"` // The DOI, a digital identifier of an object, usually a dataset
Provider string `config:"provider"` // The DOI provider
DoiResolverAPIURL string `config:"doi_resolver_api_url"` // The URL of the DOI resolver API to use.
}
// Fs stores the interface to the remote HTTP files
type Fs struct {
name string // name of this remote
root string // the path we are working on
provider Provider // the DOI provider
doiProvider doiProvider // the interface used to interact with the DOI provider
features *fs.Features // optional features
opt Options // options for this backend
ci *fs.ConfigInfo // global config
endpoint *url.URL // the main API endpoint for this remote
endpointURL string // endpoint as a string
srv *rest.Client // the connection to the server
pacer *fs.Pacer // pacer for API calls
cache *cache.Cache // a cache for the remote metadata
}
// Object is a remote object that has been stat'd (so it exists, but is not necessarily open for reading)
type Object struct {
fs *Fs // what this object is part of
remote string // the remote path
contentURL string // the URL where the contents of the file can be downloaded
size int64 // size of the object
modTime time.Time // modification time of the object
contentType string // content type of the object
md5 string // MD5 hash of the object content
}
// doiProvider is the interface used to list objects in a DOI
type doiProvider interface {
// ListEntries returns the full list of entries found at the remote, regardless of root
ListEntries(ctx context.Context) (entries []*Object, err error)
}
// Parse the input string as a DOI
// Examples:
// 10.1000/182 -> 10.1000/182
// https://doi.org/10.1000/182 -> 10.1000/182
// doi:10.1000/182 -> 10.1000/182
func parseDoi(doi string) string {
doiURL, err := url.Parse(doi)
if err != nil {
return doi
}
if doiURL.Scheme == "doi" {
return strings.TrimLeft(strings.TrimPrefix(doi, "doi:"), "/")
}
if strings.HasSuffix(doiURL.Hostname(), "doi.org") {
return strings.TrimLeft(doiURL.Path, "/")
}
return doi
}
// Resolve a DOI to a URL
// Reference: https://www.doi.org/the-identifier/resources/factsheets/doi-resolution-documentation
func resolveDoiURL(ctx context.Context, srv *rest.Client, pacer *fs.Pacer, opt *Options) (doiURL *url.URL, err error) {
resolverURL := opt.DoiResolverAPIURL
if resolverURL == "" {
resolverURL = doiResolverAPIURL
}
var result api.DoiResolverResponse
params := url.Values{}
params.Add("index", "1")
opts := rest.Opts{
Method: "GET",
RootURL: resolverURL,
Path: "/handles/" + opt.Doi,
Parameters: params,
}
err = pacer.Call(func() (bool, error) {
res, err := srv.CallJSON(ctx, &opts, nil, &result)
return shouldRetry(ctx, res, err)
})
if err != nil {
return nil, err
}
if result.ResponseCode != 1 {
return nil, fmt.Errorf("could not resolve DOI (error code %d)", result.ResponseCode)
}
resolvedURLStr := ""
for _, value := range result.Values {
if value.Type == "URL" && value.Data.Format == "string" {
valueStr, ok := value.Data.Value.(string)
if !ok {
return nil, fmt.Errorf("could not resolve DOI (incorrect response format)")
}
resolvedURLStr = valueStr
}
}
resolvedURL, err := url.Parse(resolvedURLStr)
if err != nil {
return nil, err
}
return resolvedURL, nil
}
// Resolve the passed configuration into a provider and enpoint
func resolveEndpoint(ctx context.Context, srv *rest.Client, pacer *fs.Pacer, opt *Options) (provider Provider, endpoint *url.URL, err error) {
resolvedURL, err := resolveDoiURL(ctx, srv, pacer, opt)
if err != nil {
return "", nil, err
}
switch opt.Provider {
case string(Dataverse):
return resolveDataverseEndpoint(resolvedURL)
case string(Invenio):
return resolveInvenioEndpoint(ctx, srv, pacer, resolvedURL)
case string(Zenodo):
return resolveZenodoEndpoint(ctx, srv, pacer, resolvedURL, opt.Doi)
}
hostname := strings.ToLower(resolvedURL.Hostname())
if hostname == "dataverse.harvard.edu" || activateDataverse(resolvedURL) {
return resolveDataverseEndpoint(resolvedURL)
}
if hostname == "zenodo.org" || strings.HasSuffix(hostname, ".zenodo.org") {
return resolveZenodoEndpoint(ctx, srv, pacer, resolvedURL, opt.Doi)
}
if activateInvenio(ctx, srv, pacer, resolvedURL) {
return resolveInvenioEndpoint(ctx, srv, pacer, resolvedURL)
}
return "", nil, fmt.Errorf("provider '%s' is not supported", resolvedURL.Hostname())
}
// Make the http connection from the passed options
func (f *Fs) httpConnection(ctx context.Context, opt *Options) (isFile bool, err error) {
provider, endpoint, err := resolveEndpoint(ctx, f.srv, f.pacer, opt)
if err != nil {
return false, err
}
// Update f with the new parameters
f.srv.SetRoot(endpoint.ResolveReference(&url.URL{Path: "/"}).String())
f.endpoint = endpoint
f.endpointURL = endpoint.String()
f.provider = provider
f.opt.Provider = string(provider)
switch f.provider {
case Dataverse:
f.doiProvider = newDataverseProvider(f)
case Invenio, Zenodo:
f.doiProvider = newInvenioProvider(f)
default:
return false, fmt.Errorf("provider type '%s' not supported", f.provider)
}
// Determine if the root is a file
entries, err := f.doiProvider.ListEntries(ctx)
if err != nil {
return false, err
}
for _, entry := range entries {
if entry.remote == f.root {
isFile = true
break
}
}
return isFile, nil
}
// retryErrorCodes is a slice of error codes that we will retry
var retryErrorCodes = []int{
429, // Too Many Requests.
500, // Internal Server Error
502, // Bad Gateway
503, // Service Unavailable
504, // Gateway Timeout
509, // Bandwidth Limit Exceeded
}
// shouldRetry returns a boolean as to whether this res and err
// deserve to be retried. It returns the err as a convenience.
func shouldRetry(ctx context.Context, res *http.Response, err error) (bool, error) {
if fserrors.ContextError(ctx, &err) {
return false, err
}
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(res, retryErrorCodes), err
}
// NewFs creates a new Fs object from the name and root. It connects to
// the host specified in the config file.
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
root = strings.Trim(root, "/")
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
opt.Doi = parseDoi(opt.Doi)
client := fshttp.NewClient(ctx)
ci := fs.GetConfig(ctx)
f := &Fs{
name: name,
root: root,
opt: *opt,
ci: ci,
srv: rest.NewClient(client),
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
cache: cache.New(),
}
f.features = (&fs.Features{
CanHaveEmptyDirectories: true,
}).Fill(ctx, f)
isFile, err := f.httpConnection(ctx, opt)
if err != nil {
return nil, err
}
if isFile {
// return an error with an fs which points to the parent
newRoot := path.Dir(f.root)
if newRoot == "." {
newRoot = ""
}
f.root = newRoot
return f, fs.ErrorIsFile
}
return f, nil
}
// Name returns the configured name of the file system
func (f *Fs) Name() string {
return f.name
}
// Root returns the root for the filesystem
func (f *Fs) Root() string {
return f.root
}
// String returns the URL for the filesystem
func (f *Fs) String() string {
return fmt.Sprintf("DOI %s", f.opt.Doi)
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// Precision is the remote http file system's modtime precision, which we have no way of knowing. We estimate at 1s
func (f *Fs) Precision() time.Duration {
return time.Second
}
// Hashes returns hash.HashNone to indicate remote hashing is unavailable
func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.MD5)
// return hash.Set(hash.None)
}
// Mkdir makes the root directory of the Fs object
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
return errorReadOnly
}
// Remove a remote http file object
func (o *Object) Remove(ctx context.Context) error {
return errorReadOnly
}
// Rmdir removes the root directory of the Fs object
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
return errorReadOnly
}
// NewObject creates a new remote http file object
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
entries, err := f.doiProvider.ListEntries(ctx)
if err != nil {
return nil, err
}
remoteFullPath := remote
if f.root != "" {
remoteFullPath = path.Join(f.root, remote)
}
for _, entry := range entries {
if entry.Remote() == remoteFullPath {
return entry, nil
}
}
return nil, fs.ErrorObjectNotFound
}
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
fileEntries, err := f.doiProvider.ListEntries(ctx)
if err != nil {
return nil, fmt.Errorf("error listing %q: %w", dir, err)
}
fullDir := path.Join(f.root, dir)
if fullDir != "" {
fullDir += "/"
}
dirPaths := map[string]bool{}
for _, entry := range fileEntries {
// First, filter out files not in `fullDir`
if !strings.HasPrefix(entry.remote, fullDir) {
continue
}
// Then, find entries in subfolers
remotePath := entry.remote
if fullDir != "" {
remotePath = strings.TrimLeft(strings.TrimPrefix(remotePath, fullDir), "/")
}
parts := strings.SplitN(remotePath, "/", 2)
if len(parts) == 1 {
newEntry := *entry
newEntry.remote = path.Join(dir, remotePath)
entries = append(entries, &newEntry)
} else {
dirPaths[path.Join(dir, parts[0])] = true
}
}
for dirPath := range dirPaths {
entry := fs.NewDir(dirPath, time.Time{})
entries = append(entries, entry)
}
return entries, nil
}
// Put in to the remote path with the modTime given of the given size
//
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
// nil and the error
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return nil, errorReadOnly
}
// PutStream uploads to the remote path with the modTime given of indeterminate size
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return nil, errorReadOnly
}
// Fs is the filesystem this remote http file object is located within
func (o *Object) Fs() fs.Info {
return o.fs
}
// String returns the URL to the remote HTTP file
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.remote
}
// Remote the name of the remote HTTP file, relative to the fs root
func (o *Object) Remote() string {
return o.remote
}
// Hash returns "" since HTTP (in Go or OpenSSH) doesn't support remote calculation of hashes
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
if t != hash.MD5 {
return "", hash.ErrUnsupported
}
return o.md5, nil
}
// Size returns the size in bytes of the remote http file
func (o *Object) Size() int64 {
return o.size
}
// ModTime returns the modification time of the remote http file
func (o *Object) ModTime(ctx context.Context) time.Time {
return o.modTime
}
// SetModTime sets the modification and access time to the specified time
//
// it also updates the info field
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
return errorReadOnly
}
// Storable returns whether the remote http file is a regular file (not a directory, symbolic link, block device, character device, named pipe, etc.)
func (o *Object) Storable() bool {
return true
}
// Open a remote http file object for reading. Seek is supported
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
fs.FixRangeOption(options, o.size)
opts := rest.Opts{
Method: "GET",
RootURL: o.contentURL,
Options: options,
}
var res *http.Response
err = o.fs.pacer.Call(func() (bool, error) {
res, err = o.fs.srv.Call(ctx, &opts)
return shouldRetry(ctx, res, err)
})
if err != nil {
return nil, fmt.Errorf("Open failed: %w", err)
}
// Handle non-compliant redirects
if res.Header.Get("Location") != "" {
newURL, err := res.Location()
if err == nil {
opts.RootURL = newURL.String()
err = o.fs.pacer.Call(func() (bool, error) {
res, err = o.fs.srv.Call(ctx, &opts)
return shouldRetry(ctx, res, err)
})
if err != nil {
return nil, fmt.Errorf("Open failed: %w", err)
}
}
}
return res.Body, nil
}
// Update in to the object with the modTime given of the given size
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
return errorReadOnly
}
// MimeType of an Object if known, "" otherwise
func (o *Object) MimeType(ctx context.Context) string {
return o.contentType
}
var commandHelp = []fs.CommandHelp{{
Name: "metadata",
Short: "Show metadata about the DOI.",
Long: `This command returns a JSON object with some information about the DOI.
rclone backend medatadata doi:
It returns a JSON object representing metadata about the DOI.
`,
}, {
Name: "set",
Short: "Set command for updating the config parameters.",
Long: `This set command can be used to update the config parameters
for a running doi backend.
Usage Examples:
rclone backend set doi: [-o opt_name=opt_value] [-o opt_name2=opt_value2]
rclone rc backend/command command=set fs=doi: [-o opt_name=opt_value] [-o opt_name2=opt_value2]
rclone rc backend/command command=set fs=doi: -o doi=NEW_DOI
The option keys are named as they are in the config file.
This rebuilds the connection to the doi backend when it is called with
the new parameters. Only new parameters need be passed as the values
will default to those currently in use.
It doesn't return anything.
`,
}}
// Command the backend to run a named command
//
// The command run is name
// args may be used to read arguments from
// opts may be used to read optional arguments from
//
// The result should be capable of being JSON encoded
// If it is a string or a []string it will be shown to the user
// otherwise it will be JSON encoded and shown to the user like that
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
switch name {
case "metadata":
return f.ShowMetadata(ctx)
case "set":
newOpt := f.opt
err := configstruct.Set(configmap.Simple(opt), &newOpt)
if err != nil {
return nil, fmt.Errorf("reading config: %w", err)
}
_, err = f.httpConnection(ctx, &newOpt)
if err != nil {
return nil, fmt.Errorf("updating session: %w", err)
}
f.opt = newOpt
keys := []string{}
for k := range opt {
keys = append(keys, k)
}
fs.Logf(f, "Updated config values: %s", strings.Join(keys, ", "))
return nil, nil
default:
return nil, fs.ErrorCommandNotFound
}
}
// ShowMetadata returns some metadata about the corresponding DOI
func (f *Fs) ShowMetadata(ctx context.Context) (metadata interface{}, err error) {
doiURL, err := url.Parse("https://doi.org/" + f.opt.Doi)
if err != nil {
return nil, err
}
info := map[string]any{}
info["DOI"] = f.opt.Doi
info["URL"] = doiURL.String()
info["metadataURL"] = f.endpointURL
info["provider"] = f.provider
return info, nil
}
// Check the interfaces are satisfied
var (
_ fs.Fs = (*Fs)(nil)
_ fs.PutStreamer = (*Fs)(nil)
_ fs.Commander = (*Fs)(nil)
_ fs.Object = (*Object)(nil)
_ fs.MimeTyper = (*Object)(nil)
)

View File

@@ -0,0 +1,260 @@
package doi
import (
"context"
"crypto/md5"
"encoding/hex"
"encoding/json"
"io"
"net/http"
"net/http/httptest"
"net/url"
"sort"
"strings"
"testing"
"time"
"github.com/rclone/rclone/backend/doi/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/hash"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
var remoteName = "TestDoi"
func TestParseDoi(t *testing.T) {
// 10.1000/182 -> 10.1000/182
doi := "10.1000/182"
parsed := parseDoi(doi)
assert.Equal(t, "10.1000/182", parsed)
// https://doi.org/10.1000/182 -> 10.1000/182
doi = "https://doi.org/10.1000/182"
parsed = parseDoi(doi)
assert.Equal(t, "10.1000/182", parsed)
// https://dx.doi.org/10.1000/182 -> 10.1000/182
doi = "https://dxdoi.org/10.1000/182"
parsed = parseDoi(doi)
assert.Equal(t, "10.1000/182", parsed)
// doi:10.1000/182 -> 10.1000/182
doi = "doi:10.1000/182"
parsed = parseDoi(doi)
assert.Equal(t, "10.1000/182", parsed)
// doi://10.1000/182 -> 10.1000/182
doi = "doi://10.1000/182"
parsed = parseDoi(doi)
assert.Equal(t, "10.1000/182", parsed)
}
// prepareMockDoiResolverServer prepares a test server to resolve DOIs
func prepareMockDoiResolverServer(t *testing.T, resolvedURL string) (doiResolverAPIURL string) {
mux := http.NewServeMux()
// Handle requests for resolving DOIs
mux.HandleFunc("GET /api/handles/{handle...}", func(w http.ResponseWriter, r *http.Request) {
// Check that we are resolving a DOI
handle := strings.TrimPrefix(r.URL.Path, "/api/handles/")
assert.NotEmpty(t, handle)
index := r.URL.Query().Get("index")
assert.Equal(t, "1", index)
// Return the most basic response
result := api.DoiResolverResponse{
ResponseCode: 1,
Handle: handle,
Values: []api.DoiResolverResponseValue{
{
Index: 1,
Type: "URL",
Data: api.DoiResolverResponseValueData{
Format: "string",
Value: resolvedURL,
},
},
},
}
resultBytes, err := json.Marshal(result)
require.NoError(t, err)
w.Header().Add("Content-Type", "application/json")
_, err = w.Write(resultBytes)
require.NoError(t, err)
})
// Make the test server
ts := httptest.NewServer(mux)
// Close the server at the end of the test
t.Cleanup(ts.Close)
return ts.URL + "/api"
}
func md5Sum(text string) string {
hash := md5.Sum([]byte(text))
return hex.EncodeToString(hash[:])
}
// prepareMockZenodoServer prepares a test server that mocks Zenodo.org
func prepareMockZenodoServer(t *testing.T, files map[string]string) *httptest.Server {
mux := http.NewServeMux()
// Handle requests for a single record
mux.HandleFunc("GET /api/records/{recordID...}", func(w http.ResponseWriter, r *http.Request) {
// Check that we are returning data about a single record
recordID := strings.TrimPrefix(r.URL.Path, "/api/records/")
assert.NotEmpty(t, recordID)
// Return the most basic response
selfURL, err := url.Parse("http://" + r.Host)
require.NoError(t, err)
selfURL = selfURL.JoinPath(r.URL.String())
result := api.InvenioRecordResponse{
Links: api.InvenioRecordResponseLinks{
Self: selfURL.String(),
},
}
resultBytes, err := json.Marshal(result)
require.NoError(t, err)
w.Header().Add("Content-Type", "application/json")
_, err = w.Write(resultBytes)
require.NoError(t, err)
})
// Handle requests for listing files in a record
mux.HandleFunc("GET /api/records/{record}/files", func(w http.ResponseWriter, r *http.Request) {
// Return the most basic response
filesBaseURL, err := url.Parse("http://" + r.Host)
require.NoError(t, err)
filesBaseURL = filesBaseURL.JoinPath("/api/files/")
entries := []api.InvenioFilesResponseEntry{}
for filename, contents := range files {
entries = append(entries,
api.InvenioFilesResponseEntry{
Key: filename,
Checksum: md5Sum(contents),
Size: int64(len(contents)),
Updated: time.Now().UTC().Format(time.RFC3339),
MimeType: "text/plain; charset=utf-8",
Links: api.InvenioFilesResponseEntryLinks{
Content: filesBaseURL.JoinPath(filename).String(),
},
},
)
}
result := api.InvenioFilesResponse{
Entries: entries,
}
resultBytes, err := json.Marshal(result)
require.NoError(t, err)
w.Header().Add("Content-Type", "application/json")
_, err = w.Write(resultBytes)
require.NoError(t, err)
})
// Handle requests for file contents
mux.HandleFunc("/api/files/{file}", func(w http.ResponseWriter, r *http.Request) {
// Check that we are returning the contents of a file
filename := strings.TrimPrefix(r.URL.Path, "/api/files/")
assert.NotEmpty(t, filename)
contents, found := files[filename]
if !found {
w.WriteHeader(404)
return
}
// Return the most basic response
_, err := w.Write([]byte(contents))
require.NoError(t, err)
})
// Make the test server
ts := httptest.NewServer(mux)
// Close the server at the end of the test
t.Cleanup(ts.Close)
return ts
}
func TestZenodoRemote(t *testing.T) {
recordID := "2600782"
doi := "10.5281/zenodo.2600782"
// The files in the dataset
files := map[string]string{
"README.md": "This is a dataset.",
"data.txt": "Some data",
}
ts := prepareMockZenodoServer(t, files)
resolvedURL := ts.URL + "/record/" + recordID
doiResolverAPIURL := prepareMockDoiResolverServer(t, resolvedURL)
testConfig := configmap.Simple{
"type": "doi",
"doi": doi,
"provider": "zenodo",
"doi_resolver_api_url": doiResolverAPIURL,
}
f, err := NewFs(context.Background(), remoteName, "", testConfig)
require.NoError(t, err)
// Test listing the DOI files
entries, err := f.List(context.Background(), "")
require.NoError(t, err)
sort.Sort(entries)
require.Equal(t, len(files), len(entries))
e := entries[0]
assert.Equal(t, "README.md", e.Remote())
assert.Equal(t, int64(18), e.Size())
_, ok := e.(*Object)
assert.True(t, ok)
e = entries[1]
assert.Equal(t, "data.txt", e.Remote())
assert.Equal(t, int64(9), e.Size())
_, ok = e.(*Object)
assert.True(t, ok)
// Test reading the DOI files
o, err := f.NewObject(context.Background(), "README.md")
require.NoError(t, err)
assert.Equal(t, int64(18), o.Size())
md5Hash, err := o.Hash(context.Background(), hash.MD5)
require.NoError(t, err)
assert.Equal(t, "464352b1cab5240e44528a56fda33d9d", md5Hash)
fd, err := o.Open(context.Background())
require.NoError(t, err)
data, err := io.ReadAll(fd)
require.NoError(t, err)
require.NoError(t, fd.Close())
assert.Equal(t, []byte(files["README.md"]), data)
do, ok := o.(fs.MimeTyper)
require.True(t, ok)
assert.Equal(t, "text/plain; charset=utf-8", do.MimeType(context.Background()))
o, err = f.NewObject(context.Background(), "data.txt")
require.NoError(t, err)
assert.Equal(t, int64(9), o.Size())
md5Hash, err = o.Hash(context.Background(), hash.MD5)
require.NoError(t, err)
assert.Equal(t, "5b82f8bf4df2bfb0e66ccaa7306fd024", md5Hash)
fd, err = o.Open(context.Background())
require.NoError(t, err)
data, err = io.ReadAll(fd)
require.NoError(t, err)
require.NoError(t, fd.Close())
assert.Equal(t, []byte(files["data.txt"]), data)
do, ok = o.(fs.MimeTyper)
require.True(t, ok)
assert.Equal(t, "text/plain; charset=utf-8", do.MimeType(context.Background()))
}

16
backend/doi/doi_test.go Normal file
View File

@@ -0,0 +1,16 @@
// Test DOI filesystem interface
package doi
import (
"testing"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestDoi:",
NilObject: (*Object)(nil),
})
}

164
backend/doi/invenio.go Normal file
View File

@@ -0,0 +1,164 @@
// Implementation for InvenioRDM
package doi
import (
"context"
"fmt"
"net/http"
"net/url"
"regexp"
"strings"
"time"
"github.com/rclone/rclone/backend/doi/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/lib/rest"
)
var invenioRecordRegex = regexp.MustCompile(`\/records?\/(.+)`)
// Returns true if resolvedURL is likely a DOI hosted on an InvenioRDM intallation
func activateInvenio(ctx context.Context, srv *rest.Client, pacer *fs.Pacer, resolvedURL *url.URL) (isActive bool) {
_, _, err := resolveInvenioEndpoint(ctx, srv, pacer, resolvedURL)
return err == nil
}
// Resolve the main API endpoint for a DOI hosted on an InvenioRDM installation
func resolveInvenioEndpoint(ctx context.Context, srv *rest.Client, pacer *fs.Pacer, resolvedURL *url.URL) (provider Provider, endpoint *url.URL, err error) {
var res *http.Response
opts := rest.Opts{
Method: "GET",
RootURL: resolvedURL.String(),
}
err = pacer.Call(func() (bool, error) {
res, err = srv.Call(ctx, &opts)
return shouldRetry(ctx, res, err)
})
if err != nil {
return "", nil, err
}
// First, attempt to grab the API URL from the headers
var linksetURL *url.URL
links := parseLinkHeader(res.Header.Get("Link"))
for _, link := range links {
if link.Rel == "linkset" && link.Type == "application/linkset+json" {
parsed, err := url.Parse(link.Href)
if err == nil {
linksetURL = parsed
break
}
}
}
if linksetURL != nil {
endpoint, err = checkInvenioAPIURL(ctx, srv, pacer, linksetURL)
if err == nil {
return Invenio, endpoint, nil
}
fs.Logf(nil, "using linkset URL failed: %s", err.Error())
}
// If there is no linkset header, try to grab the record ID from the URL
recordID := ""
resURL := res.Request.URL
match := invenioRecordRegex.FindStringSubmatch(resURL.EscapedPath())
if match != nil {
recordID = match[1]
guessedURL := res.Request.URL.ResolveReference(&url.URL{
Path: "/api/records/" + recordID,
})
endpoint, err = checkInvenioAPIURL(ctx, srv, pacer, guessedURL)
if err == nil {
return Invenio, endpoint, nil
}
fs.Logf(nil, "guessing the URL failed: %s", err.Error())
}
return "", nil, fmt.Errorf("could not resolve the Invenio API endpoint for '%s'", resolvedURL.String())
}
func checkInvenioAPIURL(ctx context.Context, srv *rest.Client, pacer *fs.Pacer, resolvedURL *url.URL) (endpoint *url.URL, err error) {
var result api.InvenioRecordResponse
opts := rest.Opts{
Method: "GET",
RootURL: resolvedURL.String(),
}
err = pacer.Call(func() (bool, error) {
res, err := srv.CallJSON(ctx, &opts, nil, &result)
return shouldRetry(ctx, res, err)
})
if err != nil {
return nil, err
}
if result.Links.Self == "" {
return nil, fmt.Errorf("could not parse API response from '%s'", resolvedURL.String())
}
return url.Parse(result.Links.Self)
}
// invenioProvider implements the doiProvider interface for InvenioRDM installations
type invenioProvider struct {
f *Fs
}
// ListEntries returns the full list of entries found at the remote, regardless of root
func (ip *invenioProvider) ListEntries(ctx context.Context) (entries []*Object, err error) {
// Use the cache if populated
cachedEntries, found := ip.f.cache.GetMaybe("files")
if found {
parsedEntries, ok := cachedEntries.([]Object)
if ok {
for _, entry := range parsedEntries {
newEntry := entry
entries = append(entries, &newEntry)
}
return entries, nil
}
}
filesURL := ip.f.endpoint.JoinPath("files")
var result api.InvenioFilesResponse
opts := rest.Opts{
Method: "GET",
Path: strings.TrimLeft(filesURL.EscapedPath(), "/"),
}
err = ip.f.pacer.Call(func() (bool, error) {
res, err := ip.f.srv.CallJSON(ctx, &opts, nil, &result)
return shouldRetry(ctx, res, err)
})
if err != nil {
return nil, fmt.Errorf("readDir failed: %w", err)
}
for _, file := range result.Entries {
modTime, modTimeErr := time.Parse(time.RFC3339, file.Updated)
if modTimeErr != nil {
fs.Logf(ip.f, "error: could not parse last update time %v", modTimeErr)
modTime = timeUnset
}
entry := &Object{
fs: ip.f,
remote: file.Key,
contentURL: file.Links.Content,
size: file.Size,
modTime: modTime,
contentType: file.MimeType,
md5: strings.TrimPrefix(file.Checksum, "md5:"),
}
entries = append(entries, entry)
}
// Populate the cache
cacheEntries := []Object{}
for _, entry := range entries {
cacheEntries = append(cacheEntries, *entry)
}
ip.f.cache.Put("files", cacheEntries)
return entries, nil
}
func newInvenioProvider(f *Fs) doiProvider {
return &invenioProvider{
f: f,
}
}

View File

@@ -0,0 +1,75 @@
package doi
import (
"regexp"
"strings"
)
var linkRegex = regexp.MustCompile(`^<(.+)>$`)
var valueRegex = regexp.MustCompile(`^"(.+)"$`)
// headerLink represents a link as presented in HTTP headers
// MDN Reference: https://developer.mozilla.org/en-US/docs/Web/HTTP/Reference/Headers/Link
type headerLink struct {
Href string
Rel string
Type string
Extras map[string]string
}
func parseLinkHeader(header string) (links []headerLink) {
for _, link := range strings.Split(header, ",") {
link = strings.TrimSpace(link)
parsed := parseLink(link)
if parsed != nil {
links = append(links, *parsed)
}
}
return links
}
func parseLink(link string) (parsedLink *headerLink) {
var parts []string
for _, part := range strings.Split(link, ";") {
parts = append(parts, strings.TrimSpace(part))
}
match := linkRegex.FindStringSubmatch(parts[0])
if match == nil {
return nil
}
result := &headerLink{
Href: match[1],
Extras: map[string]string{},
}
for _, keyValue := range parts[1:] {
parsed := parseKeyValue(keyValue)
if parsed != nil {
key, value := parsed[0], parsed[1]
switch strings.ToLower(key) {
case "rel":
result.Rel = value
case "type":
result.Type = value
default:
result.Extras[key] = value
}
}
}
return result
}
func parseKeyValue(keyValue string) []string {
parts := strings.SplitN(keyValue, "=", 2)
if parts[0] == "" || len(parts) < 2 {
return nil
}
match := valueRegex.FindStringSubmatch(parts[1])
if match != nil {
parts[1] = match[1]
return parts
}
return parts
}

View File

@@ -0,0 +1,44 @@
package doi
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestParseLinkHeader(t *testing.T) {
header := "<https://zenodo.org/api/records/15063252> ; rel=\"linkset\" ; type=\"application/linkset+json\""
links := parseLinkHeader(header)
expected := headerLink{
Href: "https://zenodo.org/api/records/15063252",
Rel: "linkset",
Type: "application/linkset+json",
Extras: map[string]string{},
}
assert.Contains(t, links, expected)
header = "<https://api.example.com/issues?page=2>; rel=\"prev\", <https://api.example.com/issues?page=4>; rel=\"next\", <https://api.example.com/issues?page=10>; rel=\"last\", <https://api.example.com/issues?page=1>; rel=\"first\""
links = parseLinkHeader(header)
expectedList := []headerLink{{
Href: "https://api.example.com/issues?page=2",
Rel: "prev",
Type: "",
Extras: map[string]string{},
}, {
Href: "https://api.example.com/issues?page=4",
Rel: "next",
Type: "",
Extras: map[string]string{},
}, {
Href: "https://api.example.com/issues?page=10",
Rel: "last",
Type: "",
Extras: map[string]string{},
}, {
Href: "https://api.example.com/issues?page=1",
Rel: "first",
Type: "",
Extras: map[string]string{},
}}
assert.Equal(t, links, expectedList)
}

47
backend/doi/zenodo.go Normal file
View File

@@ -0,0 +1,47 @@
// Implementation for Zenodo
package doi
import (
"context"
"fmt"
"net/url"
"regexp"
"github.com/rclone/rclone/backend/doi/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/lib/rest"
)
var zenodoRecordRegex = regexp.MustCompile(`zenodo[.](.+)`)
// Resolve the main API endpoint for a DOI hosted on Zenodo
func resolveZenodoEndpoint(ctx context.Context, srv *rest.Client, pacer *fs.Pacer, resolvedURL *url.URL, doi string) (provider Provider, endpoint *url.URL, err error) {
match := zenodoRecordRegex.FindStringSubmatch(doi)
if match == nil {
return "", nil, fmt.Errorf("could not derive API endpoint URL from '%s'", resolvedURL.String())
}
recordID := match[1]
endpointURL := resolvedURL.ResolveReference(&url.URL{Path: "/api/records/" + recordID})
var result api.InvenioRecordResponse
opts := rest.Opts{
Method: "GET",
RootURL: endpointURL.String(),
}
err = pacer.Call(func() (bool, error) {
res, err := srv.CallJSON(ctx, &opts, nil, &result)
return shouldRetry(ctx, res, err)
})
if err != nil {
return "", nil, err
}
endpointURL, err = url.Parse(result.Links.Self)
if err != nil {
return "", nil, err
}
return Zenodo, endpointURL, nil
}

View File

@@ -38,8 +38,8 @@ import (
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/fspath"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/list"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/dircache"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/env"
@@ -1745,7 +1745,7 @@ func (f *Fs) createDir(ctx context.Context, pathID, leaf string, metadata fs.Met
}
var updateMetadata updateMetadataFn
if len(metadata) > 0 {
updateMetadata, err = f.updateMetadata(ctx, createInfo, metadata, true)
updateMetadata, err = f.updateMetadata(ctx, createInfo, metadata, true, true)
if err != nil {
return nil, fmt.Errorf("create dir: failed to update metadata: %w", err)
}
@@ -1776,7 +1776,7 @@ func (f *Fs) updateDir(ctx context.Context, dirID string, metadata fs.Metadata)
}
dirID = actualID(dirID)
updateInfo := &drive.File{}
updateMetadata, err := f.updateMetadata(ctx, updateInfo, metadata, true)
updateMetadata, err := f.updateMetadata(ctx, updateInfo, metadata, true, true)
if err != nil {
return nil, fmt.Errorf("update dir: failed to update metadata from source object: %w", err)
}
@@ -2189,7 +2189,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
wg := sync.WaitGroup{}
in := make(chan listREntry, listRInputBuffer)
out := make(chan error, f.ci.Checkers)
list := walk.NewListRHelper(callback)
list := list.NewHelper(callback)
overflow := []listREntry{}
listed := 0

View File

@@ -507,7 +507,7 @@ type updateMetadataFn func(context.Context, *drive.File) error
//
// It returns a callback which should be called to finish the updates
// after the data is uploaded.
func (f *Fs) updateMetadata(ctx context.Context, updateInfo *drive.File, meta fs.Metadata, update bool) (callback updateMetadataFn, err error) {
func (f *Fs) updateMetadata(ctx context.Context, updateInfo *drive.File, meta fs.Metadata, update, isFolder bool) (callback updateMetadataFn, err error) {
callbackFns := []updateMetadataFn{}
callback = func(ctx context.Context, info *drive.File) error {
for _, fn := range callbackFns {
@@ -532,7 +532,9 @@ func (f *Fs) updateMetadata(ctx context.Context, updateInfo *drive.File, meta fs
}
switch k {
case "copy-requires-writer-permission":
if err := parseBool(&updateInfo.CopyRequiresWriterPermission); err != nil {
if isFolder {
fs.Debugf(f, "Ignoring %s=%s as can't set on folders", k, v)
} else if err := parseBool(&updateInfo.CopyRequiresWriterPermission); err != nil {
return nil, err
}
case "writers-can-share":
@@ -629,7 +631,7 @@ func (f *Fs) fetchAndUpdateMetadata(ctx context.Context, src fs.ObjectInfo, opti
if err != nil {
return nil, fmt.Errorf("failed to read metadata from source object: %w", err)
}
callback, err = f.updateMetadata(ctx, updateInfo, meta, update)
callback, err = f.updateMetadata(ctx, updateInfo, meta, update, false)
if err != nil {
return nil, fmt.Errorf("failed to update metadata from source object: %w", err)
}

View File

@@ -0,0 +1,81 @@
// Package api defines types for interacting with the FileLu API.
package api
import "encoding/json"
// CreateFolderResponse represents the response for creating a folder.
type CreateFolderResponse struct {
Status int `json:"status"`
Msg string `json:"msg"`
Result struct {
FldID interface{} `json:"fld_id"`
} `json:"result"`
}
// DeleteFolderResponse represents the response for deleting a folder.
type DeleteFolderResponse struct {
Status int `json:"status"`
Msg string `json:"msg"`
}
// FolderListResponse represents the response for listing folders.
type FolderListResponse struct {
Status int `json:"status"`
Msg string `json:"msg"`
Result struct {
Files []struct {
Name string `json:"name"`
FldID json.Number `json:"fld_id"`
Path string `json:"path"`
FileCode string `json:"file_code"`
Size int64 `json:"size"`
} `json:"files"`
Folders []struct {
Name string `json:"name"`
FldID json.Number `json:"fld_id"`
Path string `json:"path"`
} `json:"folders"`
} `json:"result"`
}
// FileDirectLinkResponse represents the response for a direct link to a file.
type FileDirectLinkResponse struct {
Status int `json:"status"`
Msg string `json:"msg"`
Result struct {
URL string `json:"url"`
Size int64 `json:"size"`
} `json:"result"`
}
// FileInfoResponse represents the response for file information.
type FileInfoResponse struct {
Status int `json:"status"`
Msg string `json:"msg"`
Result []struct {
Size string `json:"size"`
Name string `json:"name"`
FileCode string `json:"filecode"`
Hash string `json:"hash"`
Status int `json:"status"`
} `json:"result"`
}
// DeleteFileResponse represents the response for deleting a file.
type DeleteFileResponse struct {
Status int `json:"status"`
Msg string `json:"msg"`
}
// AccountInfoResponse represents the response for account information.
type AccountInfoResponse struct {
Status int `json:"status"` // HTTP status code of the response.
Msg string `json:"msg"` // Message describing the response.
Result struct {
PremiumExpire string `json:"premium_expire"` // Expiration date of premium access.
Email string `json:"email"` // User's email address.
UType string `json:"utype"` // User type (e.g., premium or free).
Storage string `json:"storage"` // Total storage available to the user.
StorageUsed string `json:"storage_used"` // Amount of storage used.
} `json:"result"` // Nested result structure containing account details.
}

366
backend/filelu/filelu.go Normal file
View File

@@ -0,0 +1,366 @@
// Package filelu provides an interface to the FileLu storage system.
package filelu
import (
"context"
"fmt"
"io"
"net/http"
"os"
"path"
"strings"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/rest"
)
// Register the backend with Rclone
func init() {
fs.Register(&fs.RegInfo{
Name: "filelu",
Description: "FileLu Cloud Storage",
NewFs: NewFs,
Options: []fs.Option{{
Name: "key",
Help: "Your FileLu Rclone key from My Account",
Required: true,
Sensitive: true,
},
{
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
Advanced: true,
Default: (encoder.Base | // Slash,LtGt,DoubleQuote,Question,Asterisk,Pipe,Hash,Percent,BackSlash,Del,Ctl,RightSpace,InvalidUtf8,Dot
encoder.EncodeSlash |
encoder.EncodeLtGt |
encoder.EncodeExclamation |
encoder.EncodeDoubleQuote |
encoder.EncodeSingleQuote |
encoder.EncodeBackQuote |
encoder.EncodeQuestion |
encoder.EncodeDollar |
encoder.EncodeColon |
encoder.EncodeAsterisk |
encoder.EncodePipe |
encoder.EncodeHash |
encoder.EncodePercent |
encoder.EncodeBackSlash |
encoder.EncodeCrLf |
encoder.EncodeDel |
encoder.EncodeCtl |
encoder.EncodeLeftSpace |
encoder.EncodeLeftPeriod |
encoder.EncodeLeftTilde |
encoder.EncodeLeftCrLfHtVt |
encoder.EncodeRightPeriod |
encoder.EncodeRightCrLfHtVt |
encoder.EncodeSquareBracket |
encoder.EncodeSemicolon |
encoder.EncodeRightSpace |
encoder.EncodeInvalidUtf8 |
encoder.EncodeDot),
},
}})
}
// Options defines the configuration for the FileLu backend
type Options struct {
Key string `config:"key"`
Enc encoder.MultiEncoder `config:"encoding"`
}
// Fs represents the FileLu file system
type Fs struct {
name string
root string
opt Options
features *fs.Features
endpoint string
pacer *pacer.Pacer
srv *rest.Client
client *http.Client
targetFile string
}
// NewFs creates a new Fs object for FileLu
func NewFs(ctx context.Context, name string, root string, m configmap.Mapper) (fs.Fs, error) {
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, fmt.Errorf("failed to parse config: %w", err)
}
if opt.Key == "" {
return nil, fmt.Errorf("FileLu Rclone Key is required")
}
client := fshttp.NewClient(ctx)
if strings.TrimSpace(root) == "" {
root = ""
}
root = strings.Trim(root, "/")
filename := ""
f := &Fs{
name: name,
opt: *opt,
endpoint: "https://filelu.com/rclone",
client: client,
srv: rest.NewClient(client).SetRoot("https://filelu.com/rclone"),
pacer: pacer.New(),
targetFile: filename,
root: root,
}
f.features = (&fs.Features{
CanHaveEmptyDirectories: true,
WriteMetadata: false,
SlowHash: true,
}).Fill(ctx, f)
rootContainer, rootDirectory := rootSplit(f.root)
if rootContainer != "" && rootDirectory != "" {
// Check to see if the (container,directory) is actually an existing file
oldRoot := f.root
newRoot, leaf := path.Split(oldRoot)
f.root = strings.Trim(newRoot, "/")
_, err := f.NewObject(ctx, leaf)
if err != nil {
if err == fs.ErrorObjectNotFound || err == fs.ErrorNotAFile {
// File doesn't exist or is a directory so return old f
f.root = strings.Trim(oldRoot, "/")
return f, nil
}
return nil, err
}
// return an error with an fs which points to the parent
return f, fs.ErrorIsFile
}
return f, nil
}
// Mkdir to create directory on remote server.
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
fullPath := path.Clean(f.root + "/" + dir)
_, err := f.createFolder(ctx, fullPath)
return err
}
// About provides usage statistics for the remote
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
accountInfo, err := f.getAccountInfo(ctx)
if err != nil {
return nil, err
}
totalStorage, err := parseStorageToBytes(accountInfo.Result.Storage)
if err != nil {
return nil, fmt.Errorf("failed to parse total storage: %w", err)
}
usedStorage, err := parseStorageToBytes(accountInfo.Result.StorageUsed)
if err != nil {
return nil, fmt.Errorf("failed to parse used storage: %w", err)
}
return &fs.Usage{
Total: fs.NewUsageValue(totalStorage), // Total bytes available
Used: fs.NewUsageValue(usedStorage), // Total bytes used
Free: fs.NewUsageValue(totalStorage - usedStorage),
}, nil
}
// Purge deletes the directory and all its contents
func (f *Fs) Purge(ctx context.Context, dir string) error {
fullPath := path.Join(f.root, dir)
if fullPath != "" {
fullPath = "/" + strings.Trim(fullPath, "/")
}
return f.deleteFolder(ctx, fullPath)
}
// List returns a list of files and folders
// List returns a list of files and folders for the given directory
func (f *Fs) List(ctx context.Context, dir string) (fs.DirEntries, error) {
// Compose full path for API call
fullPath := path.Join(f.root, dir)
fullPath = "/" + strings.Trim(fullPath, "/")
if fullPath == "/" {
fullPath = ""
}
var entries fs.DirEntries
result, err := f.getFolderList(ctx, fullPath)
if err != nil {
return nil, err
}
fldMap := map[string]bool{}
for _, folder := range result.Result.Folders {
fldMap[folder.FldID.String()] = true
if f.root == "" && dir == "" && strings.Contains(folder.Path, "/") {
continue
}
paths := strings.Split(folder.Path, fullPath+"/")
remote := paths[0]
if len(paths) > 1 {
remote = paths[1]
}
if strings.Contains(remote, "/") {
continue
}
pathsWithoutRoot := strings.Split(folder.Path, "/"+f.root+"/")
remotePathWithoutRoot := pathsWithoutRoot[0]
if len(pathsWithoutRoot) > 1 {
remotePathWithoutRoot = pathsWithoutRoot[1]
}
remotePathWithoutRoot = strings.TrimPrefix(remotePathWithoutRoot, "/")
entries = append(entries, fs.NewDir(remotePathWithoutRoot, time.Now()))
}
for _, file := range result.Result.Files {
if _, ok := fldMap[file.FldID.String()]; ok {
continue
}
remote := path.Join(dir, file.Name)
// trim leading slashes
remote = strings.TrimPrefix(remote, "/")
obj := &Object{
fs: f,
remote: remote,
size: file.Size,
modTime: time.Now(),
}
entries = append(entries, obj)
}
return entries, nil
}
// Put uploads a file directly to the destination folder in the FileLu storage system.
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
if src.Size() == 0 {
return nil, fs.ErrorCantUploadEmptyFiles
}
err := f.uploadFile(ctx, in, src.Remote())
if err != nil {
return nil, err
}
newObject := &Object{
fs: f,
remote: src.Remote(),
size: src.Size(),
modTime: src.ModTime(ctx),
}
fs.Infof(f, "Put: Successfully uploaded new file %q", src.Remote())
return newObject, nil
}
// Move moves the file to the specified location
func (f *Fs) Move(ctx context.Context, src fs.Object, destinationPath string) (fs.Object, error) {
if strings.HasPrefix(destinationPath, "/") || strings.Contains(destinationPath, ":\\") {
dir := path.Dir(destinationPath)
if err := os.MkdirAll(dir, 0755); err != nil {
return nil, fmt.Errorf("failed to create destination directory: %w", err)
}
reader, err := src.Open(ctx)
if err != nil {
return nil, fmt.Errorf("failed to open source file: %w", err)
}
defer func() {
if err := reader.Close(); err != nil {
fs.Logf(nil, "Failed to close file body: %v", err)
}
}()
dest, err := os.Create(destinationPath)
if err != nil {
return nil, fmt.Errorf("failed to create destination file: %w", err)
}
defer func() {
if err := dest.Close(); err != nil {
fs.Logf(nil, "Failed to close file body: %v", err)
}
}()
if _, err := io.Copy(dest, reader); err != nil {
return nil, fmt.Errorf("failed to copy file content: %w", err)
}
if err := src.Remove(ctx); err != nil {
return nil, fmt.Errorf("failed to remove source file: %w", err)
}
return nil, nil
}
reader, err := src.Open(ctx)
if err != nil {
return nil, fmt.Errorf("failed to open source object: %w", err)
}
defer func() {
if err := reader.Close(); err != nil {
fs.Logf(nil, "Failed to close file body: %v", err)
}
}()
err = f.uploadFile(ctx, reader, destinationPath)
if err != nil {
return nil, fmt.Errorf("failed to upload file to destination: %w", err)
}
if err := src.Remove(ctx); err != nil {
return nil, fmt.Errorf("failed to delete source file: %w", err)
}
return &Object{
fs: f,
remote: destinationPath,
size: src.Size(),
modTime: src.ModTime(ctx),
}, nil
}
// Rmdir removes a directory
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
fullPath := path.Join(f.root, dir)
if fullPath != "" {
fullPath = "/" + strings.Trim(fullPath, "/")
}
// Step 1: Check if folder is empty
listResp, err := f.getFolderList(ctx, fullPath)
if err != nil {
return err
}
if len(listResp.Result.Files) > 0 || len(listResp.Result.Folders) > 0 {
return fmt.Errorf("Rmdir: directory %q is not empty", fullPath)
}
// Step 2: Delete the folder
return f.deleteFolder(ctx, fullPath)
}
// Check the interfaces are satisfied
var (
_ fs.Fs = (*Fs)(nil)
_ fs.Purger = (*Fs)(nil)
_ fs.Abouter = (*Fs)(nil)
_ fs.Mover = (*Fs)(nil)
_ fs.Object = (*Object)(nil)
)

View File

@@ -0,0 +1,324 @@
package filelu
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"net/http"
"net/url"
"strings"
"github.com/rclone/rclone/backend/filelu/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/lib/rest"
)
// createFolder creates a folder at the specified path.
func (f *Fs) createFolder(ctx context.Context, dirPath string) (*api.CreateFolderResponse, error) {
encodedDir := f.fromStandardPath(dirPath)
apiURL := fmt.Sprintf("%s/folder/create?folder_path=%s&key=%s",
f.endpoint,
url.QueryEscape(encodedDir),
url.QueryEscape(f.opt.Key), // assuming f.opt.Key is the correct field
)
req, err := http.NewRequestWithContext(ctx, "GET", apiURL, nil)
if err != nil {
return nil, fmt.Errorf("failed to create request: %w", err)
}
var resp *http.Response
result := api.CreateFolderResponse{}
err = f.pacer.Call(func() (bool, error) {
var innerErr error
resp, innerErr = f.client.Do(req)
return fserrors.ShouldRetry(innerErr), innerErr
})
if err != nil {
return nil, fmt.Errorf("request failed: %w", err)
}
defer func() {
if err := resp.Body.Close(); err != nil {
fs.Logf(nil, "Failed to close response body: %v", err)
}
}()
err = json.NewDecoder(resp.Body).Decode(&result)
if err != nil {
return nil, fmt.Errorf("error decoding response: %w", err)
}
if result.Status != 200 {
return nil, fmt.Errorf("error: %s", result.Msg)
}
fs.Infof(f, "Successfully created folder %q with ID %v", dirPath, result.Result.FldID)
return &result, nil
}
// getFolderList List both files and folders in a directory.
func (f *Fs) getFolderList(ctx context.Context, path string) (*api.FolderListResponse, error) {
encodedDir := f.fromStandardPath(path)
apiURL := fmt.Sprintf("%s/folder/list?folder_path=%s&key=%s",
f.endpoint,
url.QueryEscape(encodedDir),
url.QueryEscape(f.opt.Key),
)
var body []byte
err := f.pacer.Call(func() (bool, error) {
req, err := http.NewRequestWithContext(ctx, "GET", apiURL, nil)
if err != nil {
return false, fmt.Errorf("failed to create request: %w", err)
}
resp, err := f.client.Do(req)
if err != nil {
return shouldRetry(err), fmt.Errorf("failed to list directory: %w", err)
}
defer func() {
if err := resp.Body.Close(); err != nil {
fs.Logf(nil, "Failed to close response body: %v", err)
}
}()
body, err = io.ReadAll(resp.Body)
if err != nil {
return false, fmt.Errorf("error reading response body: %w", err)
}
return shouldRetryHTTP(resp.StatusCode), nil
})
if err != nil {
return nil, err
}
var response api.FolderListResponse
if err := json.NewDecoder(bytes.NewReader(body)).Decode(&response); err != nil {
return nil, fmt.Errorf("error decoding response: %w", err)
}
if response.Status != 200 {
if strings.Contains(response.Msg, "Folder not found") {
return nil, fs.ErrorDirNotFound
}
return nil, fmt.Errorf("API error: %s", response.Msg)
}
for index := range response.Result.Folders {
response.Result.Folders[index].Path = f.toStandardPath(response.Result.Folders[index].Path)
}
for index := range response.Result.Files {
response.Result.Files[index].Name = f.toStandardPath(response.Result.Files[index].Name)
}
return &response, nil
}
// deleteFolder deletes a folder at the specified path.
func (f *Fs) deleteFolder(ctx context.Context, fullPath string) error {
fullPath = f.fromStandardPath(fullPath)
deleteURL := fmt.Sprintf("%s/folder/delete?folder_path=%s&key=%s",
f.endpoint,
url.QueryEscape(fullPath),
url.QueryEscape(f.opt.Key),
)
delResp := api.DeleteFolderResponse{}
err := f.pacer.Call(func() (bool, error) {
req, err := http.NewRequestWithContext(ctx, "GET", deleteURL, nil)
if err != nil {
return false, err
}
resp, err := f.client.Do(req)
if err != nil {
return fserrors.ShouldRetry(err), err
}
defer func() {
if err := resp.Body.Close(); err != nil {
fs.Logf(nil, "Failed to close response body: %v", err)
}
}()
body, err := io.ReadAll(resp.Body)
if err != nil {
return false, err
}
if err := json.Unmarshal(body, &delResp); err != nil {
return false, fmt.Errorf("error decoding delete response: %w", err)
}
if delResp.Status != 200 {
return false, fmt.Errorf("delete error: %s", delResp.Msg)
}
return false, nil
})
if err != nil {
return err
}
fs.Infof(f, "Rmdir: successfully deleted %q", fullPath)
return nil
}
// getDirectLink of file from FileLu to download.
func (f *Fs) getDirectLink(ctx context.Context, filePath string) (string, int64, error) {
filePath = f.fromStandardPath(filePath)
apiURL := fmt.Sprintf("%s/file/direct_link?file_path=%s&key=%s",
f.endpoint,
url.QueryEscape(filePath),
url.QueryEscape(f.opt.Key),
)
result := api.FileDirectLinkResponse{}
err := f.pacer.Call(func() (bool, error) {
req, err := http.NewRequestWithContext(ctx, "GET", apiURL, nil)
if err != nil {
return false, fmt.Errorf("failed to create request: %w", err)
}
resp, err := f.client.Do(req)
if err != nil {
return shouldRetry(err), fmt.Errorf("failed to fetch direct link: %w", err)
}
defer func() {
if err := resp.Body.Close(); err != nil {
fs.Logf(nil, "Failed to close response body: %v", err)
}
}()
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
return false, fmt.Errorf("error decoding response: %w", err)
}
if result.Status != 200 {
return false, fmt.Errorf("API error: %s", result.Msg)
}
return shouldRetryHTTP(resp.StatusCode), nil
})
if err != nil {
return "", 0, err
}
return result.Result.URL, result.Result.Size, nil
}
// deleteFile deletes a file based on filePath
func (f *Fs) deleteFile(ctx context.Context, filePath string) error {
filePath = f.fromStandardPath(filePath)
apiURL := fmt.Sprintf("%s/file/remove?file_path=%s&key=%s",
f.endpoint,
url.QueryEscape(filePath),
url.QueryEscape(f.opt.Key),
)
result := api.DeleteFileResponse{}
err := f.pacer.Call(func() (bool, error) {
req, err := http.NewRequestWithContext(ctx, "GET", apiURL, nil)
if err != nil {
return false, fmt.Errorf("failed to create request: %w", err)
}
resp, err := f.client.Do(req)
if err != nil {
return shouldRetry(err), fmt.Errorf("failed to fetch direct link: %w", err)
}
defer func() {
if err := resp.Body.Close(); err != nil {
fs.Logf(nil, "Failed to close response body: %v", err)
}
}()
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
return false, fmt.Errorf("error decoding response: %w", err)
}
if result.Status != 200 {
return false, fmt.Errorf("API error: %s", result.Msg)
}
return shouldRetryHTTP(resp.StatusCode), nil
})
return err
}
// getAccountInfo retrieves account information
func (f *Fs) getAccountInfo(ctx context.Context) (*api.AccountInfoResponse, error) {
opts := rest.Opts{
Method: "GET",
Path: "/account/info",
Parameters: url.Values{
"key": {f.opt.Key},
},
}
var result api.AccountInfoResponse
err := f.pacer.Call(func() (bool, error) {
_, callErr := f.srv.CallJSON(ctx, &opts, nil, &result)
return fserrors.ShouldRetry(callErr), callErr
})
if err != nil {
return nil, err
}
if result.Status != 200 {
return nil, fmt.Errorf("error: %s", result.Msg)
}
return &result, nil
}
// getFileInfo retrieves file information based on file code
func (f *Fs) getFileInfo(ctx context.Context, fileCode string) (*api.FileInfoResponse, error) {
u, _ := url.Parse(f.endpoint + "/file/info2")
q := u.Query()
q.Set("file_code", fileCode) // raw path — Go handles escaping properly here
q.Set("key", f.opt.Key)
u.RawQuery = q.Encode()
apiURL := f.endpoint + "/file/info2?" + u.RawQuery
var body []byte
err := f.pacer.Call(func() (bool, error) {
req, err := http.NewRequestWithContext(ctx, "GET", apiURL, nil)
if err != nil {
return false, fmt.Errorf("failed to create request: %w", err)
}
resp, err := f.client.Do(req)
if err != nil {
return shouldRetry(err), fmt.Errorf("failed to fetch file info: %w", err)
}
defer func() {
if err := resp.Body.Close(); err != nil {
fs.Logf(nil, "Failed to close response body: %v", err)
}
}()
body, err = io.ReadAll(resp.Body)
if err != nil {
return false, fmt.Errorf("error reading response body: %w", err)
}
return shouldRetryHTTP(resp.StatusCode), nil
})
if err != nil {
return nil, err
}
result := api.FileInfoResponse{}
if err := json.NewDecoder(bytes.NewReader(body)).Decode(&result); err != nil {
return nil, fmt.Errorf("error decoding response: %w", err)
}
if result.Status != 200 || len(result.Result) == 0 {
return nil, fs.ErrorObjectNotFound
}
return &result, nil
}

View File

@@ -0,0 +1,193 @@
package filelu
import (
"context"
"encoding/json"
"errors"
"fmt"
"io"
"mime/multipart"
"net/http"
"net/url"
"path"
"strings"
"github.com/rclone/rclone/fs"
)
// uploadFile uploads a file to FileLu
func (f *Fs) uploadFile(ctx context.Context, fileContent io.Reader, fileFullPath string) error {
directory := path.Dir(fileFullPath)
fileName := path.Base(fileFullPath)
if directory == "." {
directory = ""
}
destinationFolderPath := path.Join(f.root, directory)
if destinationFolderPath != "" {
destinationFolderPath = "/" + strings.Trim(destinationFolderPath, "/")
}
existingEntries, err := f.List(ctx, path.Dir(fileFullPath))
if err != nil {
if errors.Is(err, fs.ErrorDirNotFound) {
err = f.Mkdir(ctx, path.Dir(fileFullPath))
if err != nil {
return fmt.Errorf("failed to create directory: %w", err)
}
} else {
return fmt.Errorf("failed to list existing files: %w", err)
}
}
for _, entry := range existingEntries {
if entry.Remote() == fileFullPath {
_, ok := entry.(fs.Object)
if !ok {
continue
}
// If the file exists but is different, remove it
filePath := "/" + strings.Trim(destinationFolderPath+"/"+fileName, "/")
err = f.deleteFile(ctx, filePath)
if err != nil {
return fmt.Errorf("failed to delete existing file: %w", err)
}
}
}
uploadURL, sessID, err := f.getUploadServer(ctx)
if err != nil {
return fmt.Errorf("failed to retrieve upload server: %w", err)
}
// Since the fileCode isn't used, just handle the error
if _, err := f.uploadFileWithDestination(ctx, uploadURL, sessID, fileName, fileContent, destinationFolderPath); err != nil {
return fmt.Errorf("failed to upload file: %w", err)
}
return nil
}
// getUploadServer gets the upload server URL with proper key authentication
func (f *Fs) getUploadServer(ctx context.Context) (string, string, error) {
apiURL := fmt.Sprintf("%s/upload/server?key=%s", f.endpoint, url.QueryEscape(f.opt.Key))
var result struct {
Status int `json:"status"`
SessID string `json:"sess_id"`
Result string `json:"result"`
Msg string `json:"msg"`
}
err := f.pacer.Call(func() (bool, error) {
req, err := http.NewRequestWithContext(ctx, "GET", apiURL, nil)
if err != nil {
return false, fmt.Errorf("failed to create request: %w", err)
}
resp, err := f.client.Do(req)
if err != nil {
return shouldRetry(err), fmt.Errorf("failed to get upload server: %w", err)
}
defer func() {
if err := resp.Body.Close(); err != nil {
fs.Logf(nil, "Failed to close response body: %v", err)
}
}()
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
return false, fmt.Errorf("error decoding response: %w", err)
}
if result.Status != 200 {
return false, fmt.Errorf("API error: %s", result.Msg)
}
return shouldRetryHTTP(resp.StatusCode), nil
})
if err != nil {
return "", "", err
}
return result.Result, result.SessID, nil
}
// uploadFileWithDestination uploads a file directly to a specified folder using file content reader.
func (f *Fs) uploadFileWithDestination(ctx context.Context, uploadURL, sessID, fileName string, fileContent io.Reader, dirPath string) (string, error) {
destinationPath := f.fromStandardPath(dirPath)
encodedFileName := f.fromStandardPath(fileName)
pr, pw := io.Pipe()
writer := multipart.NewWriter(pw)
isDeletionRequired := false
go func() {
defer func() {
if err := pw.Close(); err != nil {
fs.Logf(nil, "Failed to close: %v", err)
}
}()
_ = writer.WriteField("sess_id", sessID)
_ = writer.WriteField("utype", "prem")
_ = writer.WriteField("fld_path", destinationPath)
part, err := writer.CreateFormFile("file_0", encodedFileName)
if err != nil {
pw.CloseWithError(fmt.Errorf("failed to create form file: %w", err))
return
}
if _, err := io.Copy(part, fileContent); err != nil {
isDeletionRequired = true
pw.CloseWithError(fmt.Errorf("failed to copy file content: %w", err))
return
}
if err := writer.Close(); err != nil {
pw.CloseWithError(fmt.Errorf("failed to close writer: %w", err))
}
}()
var fileCode string
err := f.pacer.Call(func() (bool, error) {
req, err := http.NewRequestWithContext(ctx, "POST", uploadURL, pr)
if err != nil {
return false, fmt.Errorf("failed to create upload request: %w", err)
}
req.Header.Set("Content-Type", writer.FormDataContentType())
resp, err := f.client.Do(req)
if err != nil {
return shouldRetry(err), fmt.Errorf("failed to send upload request: %w", err)
}
defer respBodyClose(resp.Body)
var result []struct {
FileCode string `json:"file_code"`
FileStatus string `json:"file_status"`
}
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
return false, fmt.Errorf("failed to parse upload response: %w", err)
}
if len(result) == 0 || result[0].FileStatus != "OK" {
return false, fmt.Errorf("upload failed with status: %s", result[0].FileStatus)
}
fileCode = result[0].FileCode
return shouldRetryHTTP(resp.StatusCode), nil
})
if err != nil && isDeletionRequired {
// Attempt to delete the file if upload fails
_ = f.deleteFile(ctx, destinationPath+"/"+fileName)
}
return fileCode, err
}
// respBodyClose to check body response.
func respBodyClose(responseBody io.Closer) {
if cerr := responseBody.Close(); cerr != nil {
fmt.Printf("Error closing response body: %v\n", cerr)
}
}

View File

@@ -0,0 +1,112 @@
package filelu
import (
"context"
"errors"
"fmt"
"path"
"strings"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/hash"
)
// errFileNotFound represent file not found error
var errFileNotFound error = errors.New("file not found")
// getFileCode retrieves the file code for a given file path
func (f *Fs) getFileCode(ctx context.Context, filePath string) (string, error) {
// Prepare parent directory
parentDir := path.Dir(filePath)
// Call List to get all the files
result, err := f.getFolderList(ctx, parentDir)
if err != nil {
return "", err
}
for _, file := range result.Result.Files {
filePathFromServer := parentDir + "/" + file.Name
if parentDir == "/" {
filePathFromServer = "/" + file.Name
}
if filePath == filePathFromServer {
return file.FileCode, nil
}
}
return "", errFileNotFound
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
func (f *Fs) fromStandardPath(remote string) string {
return f.opt.Enc.FromStandardPath(remote)
}
func (f *Fs) toStandardPath(remote string) string {
return f.opt.Enc.ToStandardPath(remote)
}
// Hashes returns an empty hash set, indicating no hash support
func (f *Fs) Hashes() hash.Set {
return hash.NewHashSet() // Properly creates an empty hash set
}
// Name returns the remote name
func (f *Fs) Name() string {
return f.name
}
// Root returns the root path
func (f *Fs) Root() string {
return f.root
}
// Precision returns the precision of the remote
func (f *Fs) Precision() time.Duration {
return fs.ModTimeNotSupported
}
func (f *Fs) String() string {
return fmt.Sprintf("FileLu root '%s'", f.root)
}
// isFileCode checks if a string looks like a file code
func isFileCode(s string) bool {
if len(s) != 12 {
return false
}
for _, c := range s {
if !((c >= 'a' && c <= 'z') || (c >= '0' && c <= '9')) {
return false
}
}
return true
}
func shouldRetry(err error) bool {
return fserrors.ShouldRetry(err)
}
func shouldRetryHTTP(code int) bool {
return code == 429 || code >= 500
}
func rootSplit(absPath string) (bucket, bucketPath string) {
// No bucket
if absPath == "" {
return "", ""
}
slash := strings.IndexRune(absPath, '/')
// Bucket but no path
if slash < 0 {
return absPath, ""
}
return absPath[:slash], absPath[slash+1:]
}

View File

@@ -0,0 +1,259 @@
package filelu
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"net/http"
"net/url"
"path"
"regexp"
"strconv"
"strings"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
)
// Object describes a FileLu object
type Object struct {
fs *Fs
remote string
size int64
modTime time.Time
}
// NewObject creates a new Object for the given remote path
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
var filePath string
filePath = path.Join(f.root, remote)
filePath = "/" + strings.Trim(filePath, "/")
// Get File code
fileCode, err := f.getFileCode(ctx, filePath)
if err != nil {
return nil, fs.ErrorObjectNotFound
}
// Get File info
fileInfos, err := f.getFileInfo(ctx, fileCode)
if err != nil {
return nil, fmt.Errorf("failed to get file info: %w", err)
}
fileInfo := fileInfos.Result[0]
size, _ := strconv.ParseInt(fileInfo.Size, 10, 64)
returnedRemote := remote
return &Object{
fs: f,
remote: returnedRemote,
size: size,
modTime: time.Now(),
}, nil
}
// Open opens the object for reading
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) {
filePath := path.Join(o.fs.root, o.remote)
// Get direct link
directLink, size, err := o.fs.getDirectLink(ctx, filePath)
if err != nil {
return nil, fmt.Errorf("failed to get direct link: %w", err)
}
o.size = size
// Offset and Count for range download
var offset int64
var count int64
fs.FixRangeOption(options, o.size)
for _, option := range options {
switch x := option.(type) {
case *fs.RangeOption:
offset, count = x.Decode(o.size)
if count < 0 {
count = o.size - offset
}
case *fs.SeekOption:
offset = x.Offset
count = o.size
default:
if option.Mandatory() {
fs.Logf(o, "Unsupported mandatory option: %v", option)
}
}
}
var reader io.ReadCloser
err = o.fs.pacer.Call(func() (bool, error) {
req, err := http.NewRequestWithContext(ctx, "GET", directLink, nil)
if err != nil {
return false, fmt.Errorf("failed to create download request: %w", err)
}
resp, err := o.fs.client.Do(req)
if err != nil {
return shouldRetry(err), fmt.Errorf("failed to download file: %w", err)
}
if resp.StatusCode != http.StatusOK {
defer func() {
if err := resp.Body.Close(); err != nil {
fs.Logf(nil, "Failed to close response body: %v", err)
}
}()
return false, fmt.Errorf("failed to download file: HTTP %d", resp.StatusCode)
}
// Wrap the response body to handle offset and count
currentContents, err := io.ReadAll(resp.Body)
if err != nil {
return false, fmt.Errorf("failed to read response body: %w", err)
}
if offset > 0 {
if offset > int64(len(currentContents)) {
return false, fmt.Errorf("offset %d exceeds file size %d", offset, len(currentContents))
}
currentContents = currentContents[offset:]
}
if count > 0 && count < int64(len(currentContents)) {
currentContents = currentContents[:count]
}
reader = io.NopCloser(bytes.NewReader(currentContents))
return false, nil
})
if err != nil {
return nil, err
}
return reader, nil
}
// Update updates the object with new data
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
if src.Size() <= 0 {
return fs.ErrorCantUploadEmptyFiles
}
err := o.fs.uploadFile(ctx, in, o.remote)
if err != nil {
return fmt.Errorf("failed to upload file: %w", err)
}
o.size = src.Size()
return nil
}
// Remove deletes the object from FileLu
func (o *Object) Remove(ctx context.Context) error {
fullPath := "/" + strings.Trim(path.Join(o.fs.root, o.remote), "/")
err := o.fs.deleteFile(ctx, fullPath)
if err != nil {
return err
}
fs.Infof(o.fs, "Successfully deleted file: %s", fullPath)
return nil
}
// Hash returns the MD5 hash of an object
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
if t != hash.MD5 {
return "", hash.ErrUnsupported
}
var fileCode string
if isFileCode(o.fs.root) {
fileCode = o.fs.root
} else {
matches := regexp.MustCompile(`\((.*?)\)`).FindAllStringSubmatch(o.remote, -1)
for _, match := range matches {
if len(match) > 1 && len(match[1]) == 12 {
fileCode = match[1]
break
}
}
}
if fileCode == "" {
return "", fmt.Errorf("no valid file code found in the remote path")
}
apiURL := fmt.Sprintf("%s/file/info?file_code=%s&key=%s",
o.fs.endpoint, url.QueryEscape(fileCode), url.QueryEscape(o.fs.opt.Key))
var result struct {
Status int `json:"status"`
Msg string `json:"msg"`
Result []struct {
Hash string `json:"hash"`
} `json:"result"`
}
err := o.fs.pacer.Call(func() (bool, error) {
req, err := http.NewRequestWithContext(ctx, "GET", apiURL, nil)
if err != nil {
return false, err
}
resp, err := o.fs.client.Do(req)
if err != nil {
return shouldRetry(err), err
}
defer func() {
if err := resp.Body.Close(); err != nil {
fs.Logf(nil, "Failed to close response body: %v", err)
}
}()
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
return false, err
}
return shouldRetryHTTP(resp.StatusCode), nil
})
if err != nil {
return "", err
}
if result.Status != 200 || len(result.Result) == 0 {
return "", fmt.Errorf("error: unable to fetch hash: %s", result.Msg)
}
return result.Result[0].Hash, nil
}
// String returns a string representation of the object
func (o *Object) String() string {
return o.remote
}
// Fs returns the parent Fs
func (o *Object) Fs() fs.Info {
return o.fs
}
// Remote returns the remote path
func (o *Object) Remote() string {
return o.remote
}
// Size returns the size of the object
func (o *Object) Size() int64 {
return o.size
}
// ModTime returns the modification time of the object
func (o *Object) ModTime(ctx context.Context) time.Time {
return o.modTime
}
// SetModTime sets the modification time of the object
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
return fs.ErrorCantSetModTime
}
// Storable indicates whether the object is storable
func (o *Object) Storable() bool {
return true
}

View File

@@ -0,0 +1,16 @@
package filelu_test
import (
"testing"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests for the FileLu backend
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestFileLu:",
NilObject: nil,
SkipInvalidUTF8: true,
})
}

15
backend/filelu/utils.go Normal file
View File

@@ -0,0 +1,15 @@
package filelu
import (
"fmt"
)
// parseStorageToBytes converts a storage string (e.g., "10") to bytes
func parseStorageToBytes(storage string) (int64, error) {
var gb float64
_, err := fmt.Sscanf(storage, "%f", &gb)
if err != nil {
return 0, fmt.Errorf("failed to parse storage: %w", err)
}
return int64(gb * 1024 * 1024 * 1024), nil
}

View File

@@ -9,6 +9,7 @@ import (
"io"
"net"
"net/textproto"
"net/url"
"path"
"runtime"
"strings"
@@ -185,6 +186,14 @@ Supports the format user:pass@host:port, user@host:port, host:port.
Example:
myUser:myPass@localhost:9005
`,
Advanced: true,
}, {
Name: "http_proxy",
Default: "",
Help: `URL for HTTP CONNECT proxy
Set this to a URL for an HTTP proxy which supports the HTTP CONNECT verb.
`,
Advanced: true,
}, {
@@ -248,6 +257,7 @@ type Options struct {
AskPassword bool `config:"ask_password"`
Enc encoder.MultiEncoder `config:"encoding"`
SocksProxy string `config:"socks_proxy"`
HTTPProxy string `config:"http_proxy"`
NoCheckUpload bool `config:"no_check_upload"`
}
@@ -266,6 +276,7 @@ type Fs struct {
pool []*ftp.ServerConn
drain *time.Timer // used to drain the pool when we stop using the connections
tokens *pacer.TokenDispenser
proxyURL *url.URL // address of HTTP proxy read from environment
pacer *fs.Pacer // pacer for FTP connections
fGetTime bool // true if the ftp library accepts GetTime
fSetTime bool // true if the ftp library accepts SetTime
@@ -413,11 +424,26 @@ func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
dial := func(network, address string) (conn net.Conn, err error) {
fs.Debugf(f, "dial(%q,%q)", network, address)
defer func() {
fs.Debugf(f, "> dial: conn=%T, err=%v", conn, err)
if err != nil {
fs.Debugf(f, "> dial: conn=%v, err=%v", conn, err)
} else {
fs.Debugf(f, "> dial: conn=%s->%s, err=%v", conn.LocalAddr(), conn.RemoteAddr(), err)
}
}()
baseDialer := fshttp.NewDialer(ctx)
if f.opt.SocksProxy != "" {
conn, err = proxy.SOCKS5Dial(network, address, f.opt.SocksProxy, baseDialer)
} else if f.proxyURL != nil {
// We need to make the onward connection to f.opt.Host. However the FTP
// library sets the host to the proxy IP after using EPSV or PASV so we need
// to correct that here.
var dialPort string
_, dialPort, err = net.SplitHostPort(address)
if err != nil {
return nil, err
}
dialAddress := net.JoinHostPort(f.opt.Host, dialPort)
conn, err = proxy.HTTPConnectDial(network, dialAddress, f.proxyURL, baseDialer)
} else {
conn, err = baseDialer.Dial(network, address)
}
@@ -631,6 +657,14 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
CanHaveEmptyDirectories: true,
PartialUploads: true,
}).Fill(ctx, f)
// get proxy URL if set
if opt.HTTPProxy != "" {
proxyURL, err := url.Parse(opt.HTTPProxy)
if err != nil {
return nil, fmt.Errorf("failed to parse HTTP Proxy URL: %w", err)
}
f.proxyURL = proxyURL
}
// set the pool drainer timer going
if f.opt.IdleTimeout > 0 {
f.drain = time.AfterFunc(time.Duration(opt.IdleTimeout), func() { _ = f.drainPool(ctx) })

View File

@@ -194,33 +194,9 @@ type DeleteResponse struct {
Data map[string]Error
}
// Server is an upload server
type Server struct {
Name string `json:"name"`
Zone string `json:"zone"`
}
// String returns a string representation of the Server
func (s *Server) String() string {
return fmt.Sprintf("%s (%s)", s.Name, s.Zone)
}
// Root returns the root URL for the server
func (s *Server) Root() string {
return fmt.Sprintf("https://%s.gofile.io/", s.Name)
}
// URL returns the upload URL for the server
func (s *Server) URL() string {
return fmt.Sprintf("https://%s.gofile.io/contents/uploadfile", s.Name)
}
// ServersResponse is the output from /servers
type ServersResponse struct {
Error
Data struct {
Servers []Server `json:"servers"`
} `json:"data"`
// DirectUploadURL returns the direct upload URL for Gofile
func DirectUploadURL() string {
return "https://upload.gofile.io/uploadfile"
}
// UploadResponse is returned by POST /contents/uploadfile

View File

@@ -8,13 +8,11 @@ import (
"errors"
"fmt"
"io"
"math/rand"
"net/http"
"net/url"
"path"
"strconv"
"strings"
"sync"
"time"
"github.com/rclone/rclone/backend/gofile/api"
@@ -25,7 +23,7 @@ import (
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/fs/list"
"github.com/rclone/rclone/lib/dircache"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/pacer"
@@ -37,10 +35,8 @@ const (
maxSleep = 20 * time.Second
decayConstant = 1 // bigger for slower decay, exponential
rootURL = "https://api.gofile.io"
serversExpiry = 60 * time.Second // check for new upload servers this often
serversActive = 2 // choose this many closest upload servers to use
rateLimitSleep = 5 * time.Second // penalise a goroutine by this long for making a rate limit error
maxDepth = 4 // in ListR recursive list this deep (maximum is 16)
rateLimitSleep = 5 * time.Second // penalise a goroutine by this long for making a rate limit error
maxDepth = 4 // in ListR recursive list this deep (maximum is 16)
)
/*
@@ -128,16 +124,13 @@ type Options struct {
// Fs represents a remote gofile
type Fs struct {
name string // name of this remote
root string // the path we are working on
opt Options // parsed options
features *fs.Features // optional features
srv *rest.Client // the connection to the server
dirCache *dircache.DirCache // Map of directory path to directory id
pacer *fs.Pacer // pacer for API calls
serversMu *sync.Mutex // protect the servers info below
servers []api.Server // upload servers we can use
serversChecked time.Time // time the servers were refreshed
name string // name of this remote
root string // the path we are working on
opt Options // parsed options
features *fs.Features // optional features
srv *rest.Client // the connection to the server
dirCache *dircache.DirCache // Map of directory path to directory id
pacer *fs.Pacer // pacer for API calls
}
// Object describes a gofile object
@@ -311,12 +304,11 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
client := fshttp.NewClient(ctx)
f := &Fs{
name: name,
root: root,
opt: *opt,
srv: rest.NewClient(client).SetRoot(rootURL),
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
serversMu: new(sync.Mutex),
name: name,
root: root,
opt: *opt,
srv: rest.NewClient(client).SetRoot(rootURL),
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
}
f.features = (&fs.Features{
CaseInsensitive: false,
@@ -435,98 +427,6 @@ func (f *Fs) readRootFolderID(ctx context.Context, m configmap.Mapper) (err erro
return nil
}
// Find the top n servers measured by response time
func (f *Fs) bestServers(ctx context.Context, servers []api.Server, n int) (newServers []api.Server) {
ctx, cancel := context.WithDeadline(ctx, time.Now().Add(10*time.Second))
defer cancel()
if n > len(servers) {
n = len(servers)
}
results := make(chan int, len(servers))
// Test how long the servers take to respond
for i := range servers {
i := i // for closure
go func() {
opts := rest.Opts{
Method: "GET",
RootURL: servers[i].Root(),
}
var result api.UploadServerStatus
start := time.Now()
_, err := f.srv.CallJSON(ctx, &opts, nil, &result)
ping := time.Since(start)
err = result.Err(err)
if err != nil {
results <- -1 // send a -ve number on error
return
}
fs.Debugf(nil, "Upload server %v responded in %v", &servers[i], ping)
results <- i
}()
}
// Wait for n servers to respond
newServers = make([]api.Server, 0, n)
for range servers {
i := <-results
if i >= 0 {
newServers = append(newServers, servers[i])
}
if len(newServers) >= n {
break
}
}
return newServers
}
// Clear all the upload servers - call on an error
func (f *Fs) clearServers() {
f.serversMu.Lock()
defer f.serversMu.Unlock()
fs.Debugf(f, "Clearing upload servers")
f.servers = nil
}
// Gets an upload server
func (f *Fs) getServer(ctx context.Context) (server *api.Server, err error) {
f.serversMu.Lock()
defer f.serversMu.Unlock()
if len(f.servers) == 0 || time.Since(f.serversChecked) >= serversExpiry {
opts := rest.Opts{
Method: "GET",
Path: "/servers",
}
var result api.ServersResponse
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
return shouldRetry(ctx, resp, err)
})
if err = result.Err(err); err != nil {
if len(f.servers) == 0 {
return nil, fmt.Errorf("failed to read upload servers: %w", err)
}
fs.Errorf(f, "failed to read new upload servers: %v", err)
} else {
// Find the top servers measured by response time
f.servers = f.bestServers(ctx, result.Data.Servers, serversActive)
f.serversChecked = time.Now()
}
}
if len(f.servers) == 0 {
return nil, errors.New("no upload servers found")
}
// Pick a server at random since we've already found the top ones
i := rand.Intn(len(f.servers))
return &f.servers[i], nil
}
// rootSlash returns root with a slash on if it is empty, otherwise empty string
func (f *Fs) rootSlash() string {
if f.root == "" {
@@ -734,7 +634,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
}
// implementation of ListR
func (f *Fs) listR(ctx context.Context, dir string, list *walk.ListRHelper) (err error) {
func (f *Fs) listR(ctx context.Context, dir string, list *list.Helper) (err error) {
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
if err != nil {
return err
@@ -820,7 +720,7 @@ func (f *Fs) listR(ctx context.Context, dir string, list *walk.ListRHelper) (err
// Don't implement this unless you have a more efficient way
// of listing recursively than doing a directory traversal.
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
list := walk.NewListRHelper(callback)
list := list.NewHelper(callback)
err = f.listR(ctx, dir, list)
if err != nil {
return err
@@ -1526,13 +1426,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return err
}
// Find an upload server
server, err := o.fs.getServer(ctx)
if err != nil {
return err
}
fs.Debugf(o, "Using upload server %v", server)
// If the file exists, delete it after a successful upload
if o.id != "" {
id := o.id
@@ -1561,7 +1454,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
},
MultipartContentName: "file",
MultipartFileName: o.fs.opt.Enc.FromStandardName(leaf),
RootURL: server.URL(),
RootURL: api.DirectUploadURL(),
Options: options,
}
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
@@ -1569,10 +1462,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return shouldRetry(ctx, resp, err)
})
if err = result.Err(err); err != nil {
if isAPIErr(err, "error-freespace") {
fs.Errorf(o, "Upload server out of space - need to retry upload")
}
o.fs.clearServers()
return fmt.Errorf("failed to upload file: %w", err)
}
return o.setMetaData(&result.Data)

View File

@@ -35,7 +35,7 @@ import (
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/fs/list"
"github.com/rclone/rclone/lib/bucket"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/env"
@@ -483,6 +483,9 @@ func parsePath(path string) (root string) {
// relative to f.root
func (f *Fs) split(rootRelativePath string) (bucketName, bucketPath string) {
bucketName, bucketPath = bucket.Split(bucket.Join(f.root, rootRelativePath))
if f.opt.DirectoryMarkers && strings.HasSuffix(bucketPath, "//") {
bucketPath = bucketPath[:len(bucketPath)-1]
}
return f.opt.Enc.FromStandardName(bucketName), f.opt.Enc.FromStandardPath(bucketPath)
}
@@ -712,7 +715,7 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
continue
}
// process directory markers as directories
remote = strings.TrimRight(remote, "/")
remote, _ = strings.CutSuffix(remote, "/")
}
remote = remote[len(prefix):]
if addBucket {
@@ -845,7 +848,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// of listing recursively that doing a directory traversal.
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
bucket, directory := f.split(dir)
list := walk.NewListRHelper(callback)
list := list.NewHelper(callback)
listR := func(bucket, directory, prefix string, addBucket bool) error {
return f.list(ctx, bucket, directory, prefix, addBucket, true, func(remote string, object *storage.Object, isDirectory bool) error {
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory)
@@ -959,7 +962,7 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
// mkdirParent creates the parent bucket/directory if it doesn't exist
func (f *Fs) mkdirParent(ctx context.Context, remote string) error {
remote = strings.TrimRight(remote, "/")
remote, _ = strings.CutSuffix(remote, "/")
dir := path.Dir(remote)
if dir == "/" || dir == "." {
dir = ""

View File

@@ -43,6 +43,7 @@ var (
errAlbumDelete = errors.New("google photos API does not implement deleting albums")
errRemove = errors.New("google photos API only implements removing files from albums")
errOwnAlbums = errors.New("google photos API only allows uploading to albums rclone created")
errReadOnly = errors.New("can't upload files in read only mode")
)
const (
@@ -52,19 +53,31 @@ const (
listChunks = 100 // chunk size to read directory listings
albumChunks = 50 // chunk size to read album listings
minSleep = 10 * time.Millisecond
scopeReadOnly = "https://www.googleapis.com/auth/photoslibrary.readonly"
scopeReadWrite = "https://www.googleapis.com/auth/photoslibrary"
scopeAccess = 2 // position of access scope in list
scopeAppendOnly = "https://www.googleapis.com/auth/photoslibrary.appendonly"
scopeReadOnly = "https://www.googleapis.com/auth/photoslibrary.readonly.appcreateddata"
scopeReadWrite = "https://www.googleapis.com/auth/photoslibrary.edit.appcreateddata"
)
var (
// scopes needed for read write access
scopesReadWrite = []string{
"openid",
"profile",
scopeAppendOnly,
scopeReadOnly,
scopeReadWrite,
}
// scopes needed for read only access
scopesReadOnly = []string{
"openid",
"profile",
scopeReadOnly,
}
// Description of how to auth for this app
oauthConfig = &oauthutil.Config{
Scopes: []string{
"openid",
"profile",
scopeReadWrite, // this must be at position scopeAccess
},
Scopes: scopesReadWrite,
AuthURL: google.Endpoint.AuthURL,
TokenURL: google.Endpoint.TokenURL,
ClientID: rcloneClientID,
@@ -100,9 +113,9 @@ func init() {
case "":
// Fill in the scopes
if opt.ReadOnly {
oauthConfig.Scopes[scopeAccess] = scopeReadOnly
oauthConfig.Scopes = scopesReadOnly
} else {
oauthConfig.Scopes[scopeAccess] = scopeReadWrite
oauthConfig.Scopes = scopesReadWrite
}
return oauthutil.ConfigOut("warning", &oauthutil.Options{
OAuth2Config: oauthConfig,
@@ -167,7 +180,7 @@ listings and won't be transferred.`,
The Google API will deliver images and video which aren't full
resolution, and/or have EXIF data missing.
However if you ue the gphotosdl proxy tnen you can download original,
However if you use the gphotosdl proxy then you can download original,
unchanged images.
This runs a headless browser in the background.
@@ -333,7 +346,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
baseClient := fshttp.NewClient(ctx)
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(ctx, name, m, oauthConfig, baseClient)
if err != nil {
return nil, fmt.Errorf("failed to configure Box: %w", err)
return nil, fmt.Errorf("failed to configure google photos: %w", err)
}
root = strings.Trim(path.Clean(root), "/")
@@ -1120,6 +1133,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
}
if !album.IsWriteable {
if o.fs.opt.ReadOnly {
return errReadOnly
}
return errOwnAlbums
}

View File

@@ -18,6 +18,7 @@ import (
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fspath"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/list"
"github.com/rclone/rclone/lib/kv"
)
@@ -182,6 +183,9 @@ func NewFs(ctx context.Context, fsname, rpath string, cmap configmap.Mapper) (fs
}
f.features = stubFeatures.Fill(ctx, f).Mask(ctx, f.Fs).WrapsFs(f, f.Fs)
// Enable ListP always
f.features.ListP = f.ListP
cache.PinUntilFinalized(f.Fs, f)
return f, err
}
@@ -237,10 +241,39 @@ func (f *Fs) wrapEntries(baseEntries fs.DirEntries) (hashEntries fs.DirEntries,
// List the objects and directories in dir into entries.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
if entries, err = f.Fs.List(ctx, dir); err != nil {
return nil, err
return list.WithListP(ctx, dir, f)
}
// ListP lists the objects and directories of the Fs starting
// from dir non recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
wrappedCallback := func(entries fs.DirEntries) error {
entries, err := f.wrapEntries(entries)
if err != nil {
return err
}
return callback(entries)
}
return f.wrapEntries(entries)
listP := f.Fs.Features().ListP
if listP == nil {
entries, err := f.Fs.List(ctx, dir)
if err != nil {
return err
}
return wrappedCallback(entries)
}
return listP(ctx, dir, wrappedCallback)
}
// ListR lists the objects and directories recursively into out.

View File

@@ -180,7 +180,6 @@ func getFsEndpoint(ctx context.Context, client *http.Client, url string, opt *Op
}
addHeaders(req, opt)
res, err := noRedir.Do(req)
if err != nil {
fs.Debugf(nil, "Assuming path is a file as HEAD request could not be sent: %v", err)
return createFileResult()
@@ -249,6 +248,14 @@ func (f *Fs) httpConnection(ctx context.Context, opt *Options) (isFile bool, err
f.httpClient = client
f.endpoint = u
f.endpointURL = u.String()
if isFile {
// Correct root if definitely pointing to a file
f.root = path.Dir(f.root)
if f.root == "." || f.root == "/" {
f.root = ""
}
}
return isFile, nil
}

View File

@@ -252,18 +252,14 @@ func (d *DriveService) DownloadFile(ctx context.Context, url string, opt []fs.Op
}
resp, err := d.icloud.srv.Call(ctx, opts)
if err != nil {
// icloud has some weird http codes
if resp.StatusCode == 330 {
loc, err := resp.Location()
if err == nil {
return d.DownloadFile(ctx, loc.String(), opt)
}
// icloud has some weird http codes
if err != nil && resp != nil && resp.StatusCode == 330 {
loc, err := resp.Location()
if err == nil {
return d.DownloadFile(ctx, loc.String(), opt)
}
return resp, err
}
return d.icloud.srv.Call(ctx, opts)
return resp, err
}
// MoveItemToTrashByItemID moves an item to the trash based on the item ID.
@@ -631,7 +627,7 @@ func NewUpdateFileInfo() UpdateFileInfo {
FileFlags: FileFlags{
IsExecutable: true,
IsHidden: false,
IsWritable: false,
IsWritable: true,
},
}
}

View File

@@ -31,7 +31,7 @@ import (
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/fs/list"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/oauthutil"
"github.com/rclone/rclone/lib/pacer"
@@ -1264,7 +1264,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
Parameters: url.Values{},
}
opts.Parameters.Set("mode", "liststream")
list := walk.NewListRHelper(callback)
list := list.NewHelper(callback)
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {

View File

@@ -1090,6 +1090,10 @@ func (o *Object) Remote() string {
// Hash returns the requested hash of a file as a lowercase hex string
func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
if r == hash.None {
return "", nil
}
// Check that the underlying file hasn't changed
o.fs.objectMetaMu.RLock()
oldtime := o.modTime

View File

@@ -204,6 +204,23 @@ func TestSymlinkError(t *testing.T) {
assert.Equal(t, errLinksAndCopyLinks, err)
}
func TestHashWithTypeNone(t *testing.T) {
ctx := context.Background()
r := fstest.NewRun(t)
const filePath = "file.txt"
r.WriteFile(filePath, "content", time.Now())
f := r.Flocal.(*Fs)
// Get the object
o, err := f.NewObject(ctx, filePath)
require.NoError(t, err)
// Test the hash is as we expect
h, err := o.Hash(ctx, hash.None)
require.Empty(t, h)
require.NoError(t, err)
}
// Test hashes on updating an object
func TestHashOnUpdate(t *testing.T) {
ctx := context.Background()

View File

@@ -17,7 +17,7 @@ import (
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/fs/list"
"github.com/rclone/rclone/lib/bucket"
)
@@ -383,7 +383,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// of listing recursively that doing a directory traversal.
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
bucket, directory := f.split(dir)
list := walk.NewListRHelper(callback)
list := list.NewHelper(callback)
entries := fs.DirEntries{}
listR := func(bucket, directory, prefix string, addBucket bool) error {
err = f.list(ctx, bucket, directory, prefix, addBucket, true, func(remote string, entry fs.DirEntry, isDirectory bool) error {

View File

@@ -28,7 +28,7 @@ import (
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/fs/list"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/rest"
)
@@ -516,7 +516,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
return fs.ErrorDirNotFound
}
list := walk.NewListRHelper(callback)
list := list.NewHelper(callback)
for resumeStart := u.Path; resumeStart != ""; {
var files []File
files, resumeStart, err = f.netStorageListRequest(ctx, URL, u.Path)

View File

@@ -396,10 +396,57 @@ func (m *Metadata) WritePermissions(ctx context.Context) (err error) {
return nil
}
// Order the permissions so that any with users come first.
//
// This is to work around a quirk with Graph:
//
// 1. You are adding permissions for both a group and a user.
// 2. The user is a member of the group.
// 3. The permissions for the group and user are the same.
// 4. You are adding the group permission before the user permission.
//
// When all of the above are true, Graph indicates it has added the
// user permission, but it immediately drops it
//
// See: https://github.com/rclone/rclone/issues/8465
func (m *Metadata) orderPermissions(xs []*api.PermissionsType) {
// Return true if identity has any user permissions
hasUserIdentity := func(identity *api.IdentitySet) bool {
if identity == nil {
return false
}
return identity.User.ID != "" || identity.User.DisplayName != "" || identity.User.Email != "" || identity.User.LoginName != ""
}
// Return true if p has any user permissions
hasUser := func(p *api.PermissionsType) bool {
if hasUserIdentity(p.GetGrantedTo(m.fs.driveType)) {
return true
}
for _, identity := range p.GetGrantedToIdentities(m.fs.driveType) {
if hasUserIdentity(identity) {
return true
}
}
return false
}
// Put Permissions with a user first, leaving unsorted otherwise
slices.SortStableFunc(xs, func(a, b *api.PermissionsType) int {
aHasUser := hasUser(a)
bHasUser := hasUser(b)
if aHasUser && !bHasUser {
return -1
} else if !aHasUser && bHasUser {
return 1
}
return 0
})
}
// sortPermissions sorts the permissions (to be written) into add, update, and remove queues
func (m *Metadata) sortPermissions() (add, update, remove []*api.PermissionsType) {
new, old := m.queuedPermissions, m.permissions
if len(old) == 0 || m.permsAddOnly {
m.orderPermissions(new)
return new, nil, nil // they must all be "add"
}
@@ -447,6 +494,9 @@ func (m *Metadata) sortPermissions() (add, update, remove []*api.PermissionsType
remove = append(remove, o)
}
}
m.orderPermissions(add)
m.orderPermissions(update)
m.orderPermissions(remove)
return add, update, remove
}
@@ -699,6 +749,8 @@ func (o *Object) fetchMetadataForCreate(ctx context.Context, src fs.ObjectInfo,
// Fetch metadata and update updateInfo if --metadata is in use
// modtime will still be set when there is no metadata to set
//
// May return info=nil and err=nil if there was no metadata to update.
func (f *Fs) fetchAndUpdateMetadata(ctx context.Context, src fs.ObjectInfo, options []fs.OpenOption, updateInfo *Object) (info *api.Item, err error) {
meta, err := fs.GetMetadataOptions(ctx, f, src, options)
if err != nil {
@@ -718,6 +770,8 @@ func (f *Fs) fetchAndUpdateMetadata(ctx context.Context, src fs.ObjectInfo, opti
}
// updateMetadata calls Get, Set, and Write
//
// May return info=nil and err=nil if there was no metadata to update.
func (o *Object) updateMetadata(ctx context.Context, meta fs.Metadata) (info *api.Item, err error) {
_, err = o.meta.Get(ctx) // refresh permissions
if err != nil {

View File

@@ -0,0 +1,125 @@
package onedrive
import (
"encoding/json"
"testing"
"github.com/rclone/rclone/backend/onedrive/api"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestOrderPermissions(t *testing.T) {
tests := []struct {
name string
input []*api.PermissionsType
expected []string
}{
{
name: "empty",
input: []*api.PermissionsType{},
expected: []string(nil),
},
{
name: "users first, then group, then none",
input: []*api.PermissionsType{
{ID: "1", GrantedTo: &api.IdentitySet{Group: api.Identity{DisplayName: "Group1"}}},
{ID: "2", GrantedToIdentities: []*api.IdentitySet{{User: api.Identity{DisplayName: "Alice"}}}},
{ID: "3", GrantedTo: &api.IdentitySet{User: api.Identity{DisplayName: "Alice"}}},
{ID: "4"},
},
expected: []string{"2", "3", "1", "4"},
},
{
name: "same type unsorted",
input: []*api.PermissionsType{
{ID: "b", GrantedTo: &api.IdentitySet{Group: api.Identity{DisplayName: "Group B"}}},
{ID: "a", GrantedTo: &api.IdentitySet{Group: api.Identity{DisplayName: "Group A"}}},
{ID: "c", GrantedToIdentities: []*api.IdentitySet{{Group: api.Identity{DisplayName: "Group A"}}, {User: api.Identity{DisplayName: "Alice"}}}},
},
expected: []string{"c", "b", "a"},
},
{
name: "all user identities",
input: []*api.PermissionsType{
{ID: "c", GrantedTo: &api.IdentitySet{User: api.Identity{DisplayName: "Bob"}}},
{ID: "a", GrantedTo: &api.IdentitySet{User: api.Identity{Email: "alice@example.com"}}},
{ID: "b", GrantedToIdentities: []*api.IdentitySet{{User: api.Identity{LoginName: "user3"}}}},
},
expected: []string{"c", "a", "b"},
},
{
name: "no user or group info",
input: []*api.PermissionsType{
{ID: "z"},
{ID: "x"},
{ID: "y"},
},
expected: []string{"z", "x", "y"},
},
}
for _, driveType := range []string{driveTypePersonal, driveTypeBusiness} {
t.Run(driveType, func(t *testing.T) {
for _, tt := range tests {
m := &Metadata{fs: &Fs{driveType: driveType}}
t.Run(tt.name, func(t *testing.T) {
if driveType == driveTypeBusiness {
for i := range tt.input {
tt.input[i].GrantedToV2 = tt.input[i].GrantedTo
tt.input[i].GrantedTo = nil
tt.input[i].GrantedToIdentitiesV2 = tt.input[i].GrantedToIdentities
tt.input[i].GrantedToIdentities = nil
}
}
m.orderPermissions(tt.input)
var gotIDs []string
for _, p := range tt.input {
gotIDs = append(gotIDs, p.ID)
}
assert.Equal(t, tt.expected, gotIDs)
})
}
})
}
}
func TestOrderPermissionsJSON(t *testing.T) {
testJSON := `[
{
"id": "1",
"grantedToV2": {
"group": {
"id": "group@example.com"
}
},
"roles": [
"write"
]
},
{
"id": "2",
"grantedToV2": {
"user": {
"id": "user@example.com"
}
},
"roles": [
"write"
]
}
]`
var testPerms []*api.PermissionsType
err := json.Unmarshal([]byte(testJSON), &testPerms)
require.NoError(t, err)
m := &Metadata{fs: &Fs{driveType: driveTypeBusiness}}
m.orderPermissions(testPerms)
var gotIDs []string
for _, p := range testPerms {
gotIDs = append(gotIDs, p.ID)
}
assert.Equal(t, []string{"2", "1"}, gotIDs)
}

View File

@@ -30,6 +30,7 @@ import (
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/list"
"github.com/rclone/rclone/fs/log"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fs/walk"
@@ -55,6 +56,7 @@ const (
driveTypeSharepoint = "documentLibrary"
defaultChunkSize = 10 * fs.Mebi
chunkSizeMultiple = 320 * fs.Kibi
maxSinglePartSize = 4 * fs.Mebi
regionGlobal = "global"
regionUS = "us"
@@ -137,6 +139,21 @@ func init() {
Help: "Azure and Office 365 operated by Vnet Group in China",
},
},
}, {
Name: "upload_cutoff",
Help: `Cutoff for switching to chunked upload.
Any files larger than this will be uploaded in chunks of chunk_size.
This is disabled by default as uploading using single part uploads
causes rclone to use twice the storage on Onedrive business as when
rclone sets the modification time after the upload Onedrive creates a
new version.
See: https://github.com/rclone/rclone/issues/1716
`,
Default: fs.SizeSuffix(-1),
Advanced: true,
}, {
Name: "chunk_size",
Help: `Chunk size to upload files with - must be multiple of 320k (327,680 bytes).
@@ -745,6 +762,7 @@ Examples:
// Options defines the configuration for this backend
type Options struct {
Region string `config:"region"`
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
ChunkSize fs.SizeSuffix `config:"chunk_size"`
DriveID string `config:"drive_id"`
DriveType string `config:"drive_type"`
@@ -1021,6 +1039,13 @@ func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error)
return
}
func checkUploadCutoff(cs fs.SizeSuffix) error {
if cs > maxSinglePartSize {
return fmt.Errorf("%v is greater than %v", cs, maxSinglePartSize)
}
return nil
}
// NewFs constructs an Fs from the path, container:path
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
@@ -1034,6 +1059,10 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
if err != nil {
return nil, fmt.Errorf("onedrive: chunk size: %w", err)
}
err = checkUploadCutoff(opt.UploadCutoff)
if err != nil {
return nil, fmt.Errorf("onedrive: upload cutoff: %w", err)
}
if opt.DriveID == "" || opt.DriveType == "" {
return nil, errors.New("unable to get drive_id and drive_type - if you are upgrading from older versions of rclone, please run `rclone config` and re-configure this backend")
@@ -1396,7 +1425,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
// So we have to filter things outside of the root which is
// inefficient.
list := walk.NewListRHelper(callback)
list := list.NewHelper(callback)
// list a folder conventionally - used for shared folders
var listFolder func(dir string) error
@@ -1753,7 +1782,9 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dst fs.Obj
if err != nil {
return nil, err
}
err = dstObj.setMetaData(info)
if info != nil {
err = dstObj.setMetaData(info)
}
return dstObj, err
}
@@ -1833,7 +1864,9 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
if err != nil {
return nil, err
}
err = dstObj.setMetaData(info)
if info != nil {
err = dstObj.setMetaData(info)
}
return dstObj, err
}
@@ -2468,6 +2501,10 @@ func (o *Object) uploadFragment(ctx context.Context, url string, start int64, to
return false, nil
}
return true, fmt.Errorf("retry this chunk skipping %d bytes: %w", skip, err)
} else if err != nil && resp != nil && resp.StatusCode == http.StatusNotFound {
fs.Debugf(o, "Received 404 error: assuming eventual consistency problem with session - retrying chunk: %v", err)
time.Sleep(5 * time.Second) // a little delay to help things along
return true, err
}
if err != nil {
return shouldRetry(ctx, resp, err)
@@ -2562,8 +2599,8 @@ func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, src fs.Objec
// This function will set modtime and metadata after uploading, which will create a new version for the remote file
func (o *Object) uploadSinglepart(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (info *api.Item, err error) {
size := src.Size()
if size < 0 || size > int64(fs.SizeSuffix(4*1024*1024)) {
return nil, errors.New("size passed into uploadSinglepart must be >= 0 and <= 4 MiB")
if size < 0 || size > int64(maxSinglePartSize) {
return nil, fmt.Errorf("size passed into uploadSinglepart must be >= 0 and <= %v", maxSinglePartSize)
}
fs.Debugf(o, "Starting singlepart upload")
@@ -2596,7 +2633,10 @@ func (o *Object) uploadSinglepart(ctx context.Context, in io.Reader, src fs.Obje
if err != nil {
return nil, fmt.Errorf("failed to fetch and update metadata: %w", err)
}
return info, o.setMetaData(info)
if info != nil {
err = o.setMetaData(info)
}
return info, err
}
// Update the object with the contents of the io.Reader, modTime and size
@@ -2616,9 +2656,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
size := src.Size()
var info *api.Item
if size > 0 {
if size > 0 && size >= int64(o.fs.opt.UploadCutoff) {
info, err = o.uploadMultipart(ctx, in, src, options...)
} else if size == 0 {
} else if size >= 0 {
info, err = o.uploadSinglepart(ctx, in, src, options...)
} else {
return errors.New("unknown-sized upload not supported")

View File

@@ -18,8 +18,8 @@ import (
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/list"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/bucket"
"github.com/rclone/rclone/lib/pacer"
)
@@ -649,7 +649,7 @@ of listing recursively that doing a directory traversal.
*/
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
bucketName, directory := f.split(dir)
list := walk.NewListRHelper(callback)
list := list.NewHelper(callback)
listR := func(bucket, directory, prefix string, addBucket bool) error {
return f.list(ctx, bucket, directory, prefix, addBucket, true, 0, func(remote string, object *objectstorage.ObjectSummary, isDirectory bool) error {
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory)

View File

@@ -27,7 +27,7 @@ import (
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/fs/list"
"github.com/rclone/rclone/lib/dircache"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/oauthutil"
@@ -378,12 +378,20 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
return f, nil
}
// OpenWriterAt opens with a handle for random access writes
// XOpenWriterAt opens with a handle for random access writes
//
// Pass in the remote desired and the size if known.
//
// It truncates any existing object
func (f *Fs) OpenWriterAt(ctx context.Context, remote string, size int64) (fs.WriterAtCloser, error) {
// It truncates any existing object.
//
// OpenWriterAt disabled because it seems to have been disabled at pcloud
// PUT /file_open?flags=XXX&folderid=XXX&name=XXX HTTP/1.1
//
// {
// "result": 2003,
// "error": "Access denied. You do not have permissions to perform this operation."
// }
func (f *Fs) XOpenWriterAt(ctx context.Context, remote string, size int64) (fs.WriterAtCloser, error) {
client, err := f.newSingleConnClient(ctx)
if err != nil {
return nil, fmt.Errorf("create client: %w", err)
@@ -631,7 +639,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// ListR lists the objects and directories of the Fs starting
// from dir recursively into out.
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
list := walk.NewListRHelper(callback)
list := list.NewHelper(callback)
err = f.listHelper(ctx, dir, true, func(o fs.DirEntry) error {
return list.Add(o)
})

View File

@@ -22,7 +22,7 @@ import (
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/fs/list"
"github.com/rclone/rclone/lib/bucket"
"github.com/rclone/rclone/lib/encoder"
qsConfig "github.com/yunify/qingstor-sdk-go/v3/config"
@@ -704,7 +704,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// of listing recursively that doing a directory traversal.
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
bucket, directory := f.split(dir)
list := walk.NewListRHelper(callback)
list := list.NewHelper(callback)
listR := func(bucket, directory, prefix string, addBucket bool) error {
return f.list(ctx, bucket, directory, prefix, addBucket, true, func(remote string, object *qs.KeyType, isDirectory bool) error {
entry, err := f.itemToDirEntry(remote, object, isDirectory)

View File

@@ -48,8 +48,8 @@ import (
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/list"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/atexit"
"github.com/rclone/rclone/lib/bucket"
"github.com/rclone/rclone/lib/encoder"
@@ -101,6 +101,12 @@ var providerOption = fs.Option{
}, {
Value: "Dreamhost",
Help: "Dreamhost DreamObjects",
}, {
Value: "Exaba",
Help: "Exaba Object Storage",
}, {
Value: "FlashBlade",
Help: "Pure Storage FlashBlade Object Storage",
}, {
Value: "GCS",
Help: "Google Cloud Storage",
@@ -131,6 +137,9 @@ var providerOption = fs.Option{
}, {
Value: "Magalu",
Help: "Magalu Object Storage",
}, {
Value: "Mega",
Help: "MEGA S4 Object Storage",
}, {
Value: "Minio",
Help: "Minio Object Storage",
@@ -567,7 +576,7 @@ func init() {
}, {
Name: "region",
Help: "Region to connect to.\n\nLeave blank if you are using an S3 clone and you don't have a region.",
Provider: "!AWS,Alibaba,ArvanCloud,ChinaMobile,Cloudflare,IONOS,Petabox,Liara,Linode,Magalu,Qiniu,RackCorp,Scaleway,Selectel,Storj,Synology,TencentCOS,HuaweiOBS,IDrive",
Provider: "!AWS,Alibaba,ArvanCloud,ChinaMobile,Cloudflare,FlashBlade,IONOS,Petabox,Liara,Linode,Magalu,Qiniu,RackCorp,Scaleway,Selectel,Storj,Synology,TencentCOS,HuaweiOBS,IDrive,Mega",
Examples: []fs.OptionExample{{
Value: "",
Help: "Use this if unsure.\nWill use v4 signatures and an empty region.",
@@ -1003,6 +1012,12 @@ func init() {
Value: "us-iad-1.linodeobjects.com",
Help: "Washington, DC, (USA), us-iad-1",
}},
}, {
// Lyve Cloud endpoints
Name: "endpoint",
Help: "Endpoint for Lyve Cloud S3 API.\nRequired when using an S3 clone. Please type in your LyveCloud endpoint.\nExamples:\n- s3.us-west-1.{account_name}.lyve.seagate.com (US West 1 - California)\n- s3.eu-west-1.{account_name}.lyve.seagate.com (EU West 1 - Ireland)",
Provider: "LyveCloud",
Required: true,
}, {
// Magalu endpoints: https://docs.magalu.cloud/docs/object-storage/how-to/copy-url
Name: "endpoint",
@@ -1377,7 +1392,7 @@ func init() {
}, {
Name: "endpoint",
Help: "Endpoint for S3 API.\n\nRequired when using an S3 clone.",
Provider: "!AWS,ArvanCloud,IBMCOS,IDrive,IONOS,TencentCOS,HuaweiOBS,Alibaba,ChinaMobile,GCS,Liara,Linode,Magalu,Scaleway,Selectel,StackPath,Storj,Synology,RackCorp,Qiniu,Petabox",
Provider: "!AWS,ArvanCloud,IBMCOS,IDrive,IONOS,TencentCOS,HuaweiOBS,Alibaba,ChinaMobile,GCS,Liara,Linode,LyveCloud,Magalu,Scaleway,Selectel,StackPath,Storj,Synology,RackCorp,Qiniu,Petabox",
Examples: []fs.OptionExample{{
Value: "objects-us-east-1.dream.io",
Help: "Dream Objects endpoint",
@@ -1426,18 +1441,6 @@ func init() {
Value: "localhost:8333",
Help: "SeaweedFS S3 localhost",
Provider: "SeaweedFS",
}, {
Value: "s3.us-east-1.lyvecloud.seagate.com",
Help: "Seagate Lyve Cloud US East 1 (Virginia)",
Provider: "LyveCloud",
}, {
Value: "s3.us-west-1.lyvecloud.seagate.com",
Help: "Seagate Lyve Cloud US West 1 (California)",
Provider: "LyveCloud",
}, {
Value: "s3.ap-southeast-1.lyvecloud.seagate.com",
Help: "Seagate Lyve Cloud AP Southeast 1 (Singapore)",
Provider: "LyveCloud",
}, {
Value: "oos.eu-west-2.outscale.com",
Help: "Outscale EU West 2 (Paris)",
@@ -1526,6 +1529,22 @@ func init() {
Value: "s3.ir-tbz-sh1.arvanstorage.ir",
Help: "ArvanCloud Tabriz Iran (Shahriar) endpoint",
Provider: "ArvanCloud",
}, {
Value: "s3.eu-central-1.s4.mega.io",
Help: "Mega S4 eu-central-1 (Amsterdam)",
Provider: "Mega",
}, {
Value: "s3.eu-central-2.s4.mega.io",
Help: "Mega S4 eu-central-2 (Bettembourg)",
Provider: "Mega",
}, {
Value: "s3.ca-central-1.s4.mega.io",
Help: "Mega S4 ca-central-1 (Montreal)",
Provider: "Mega",
}, {
Value: "s3.ca-west-1.s4.mega.io",
Help: "Mega S4 ca-west-1 (Vancouver)",
Provider: "Mega",
}},
}, {
Name: "location_constraint",
@@ -1908,7 +1927,7 @@ func init() {
}, {
Name: "location_constraint",
Help: "Location constraint - must be set to match the Region.\n\nLeave blank if not sure. Used when creating buckets only.",
Provider: "!AWS,Alibaba,ArvanCloud,HuaweiOBS,ChinaMobile,Cloudflare,IBMCOS,IDrive,IONOS,Leviia,Liara,Linode,Magalu,Outscale,Qiniu,RackCorp,Scaleway,Selectel,StackPath,Storj,TencentCOS,Petabox",
Provider: "!AWS,Alibaba,ArvanCloud,HuaweiOBS,ChinaMobile,Cloudflare,FlashBlade,IBMCOS,IDrive,IONOS,Leviia,Liara,Linode,Magalu,Outscale,Qiniu,RackCorp,Scaleway,Selectel,StackPath,Storj,TencentCOS,Petabox,Mega",
}, {
Name: "acl",
Help: `Canned ACL used when creating buckets and storing or copying objects.
@@ -1923,7 +1942,7 @@ doesn't copy the ACL from the source but rather writes a fresh one.
If the acl is an empty string then no X-Amz-Acl: header is added and
the default (private) will be used.
`,
Provider: "!Storj,Selectel,Synology,Cloudflare",
Provider: "!Storj,Selectel,Synology,Cloudflare,FlashBlade,Mega",
Examples: []fs.OptionExample{{
Value: "default",
Help: "Owner gets Full_CONTROL.\nNo one else has access rights (default).",
@@ -1981,6 +2000,7 @@ isn't set then "acl" is used instead.
If the "acl" and "bucket_acl" are empty strings then no X-Amz-Acl:
header is added and the default (private) will be used.
`,
Provider: "!Storj,Selectel,Synology,Cloudflare,FlashBlade",
Advanced: true,
Examples: []fs.OptionExample{{
Value: "private",
@@ -3116,6 +3136,9 @@ func parsePath(path string) (root string) {
// relative to f.root
func (f *Fs) split(rootRelativePath string) (bucketName, bucketPath string) {
bucketName, bucketPath = bucket.Split(bucket.Join(f.root, rootRelativePath))
if f.opt.DirectoryMarkers && strings.HasSuffix(bucketPath, "//") {
bucketPath = bucketPath[:len(bucketPath)-1]
}
return f.opt.Enc.FromStandardName(bucketName), f.opt.Enc.FromStandardPath(bucketPath)
}
@@ -3495,6 +3518,9 @@ func setQuirks(opt *Options) {
case "Dreamhost":
urlEncodeListings = false
useAlreadyExists = false // untested
case "FlashBlade":
mightGzip = false // Never auto gzips objects
virtualHostStyle = false // supports vhost but defaults to paths
case "IBMCOS":
listObjectsV2 = false // untested
virtualHostStyle = false
@@ -3527,6 +3553,14 @@ func setQuirks(opt *Options) {
urlEncodeListings = false
useMultipartEtag = false
useAlreadyExists = false
case "Mega":
listObjectsV2 = true
virtualHostStyle = false
urlEncodeListings = true
useMultipartEtag = false
useAlreadyExists = false
// Multipart server side copies not supported
opt.CopyCutoff = math.MaxInt64
case "Minio":
virtualHostStyle = false
case "Netease":
@@ -3597,6 +3631,8 @@ func setQuirks(opt *Options) {
urlEncodeListings = false
virtualHostStyle = false
useAlreadyExists = false // untested
case "Exaba":
virtualHostStyle = false
case "GCS":
// Google break request Signature by mutating accept-encoding HTTP header
// https://github.com/rclone/rclone/issues/6670
@@ -4425,7 +4461,7 @@ func (f *Fs) list(ctx context.Context, opt listOpt, fn listFn) error {
remote = remote[len(opt.prefix):]
if isDirectory {
// process directory markers as directories
remote = strings.TrimRight(remote, "/")
remote, _ = strings.CutSuffix(remote, "/")
}
if opt.addBucket {
remote = bucket.Join(opt.bucket, remote)
@@ -4481,7 +4517,7 @@ func (f *Fs) itemToDirEntry(ctx context.Context, remote string, object *types.Ob
}
// listDir lists files and directories to out
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool) (entries fs.DirEntries, err error) {
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool, callback func(fs.DirEntry) error) (err error) {
// List the objects and directories
err = f.list(ctx, listOpt{
bucket: bucket,
@@ -4497,16 +4533,16 @@ func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addB
return err
}
if entry != nil {
entries = append(entries, entry)
return callback(entry)
}
return nil
})
if err != nil {
return nil, err
return err
}
// bucket must be present if listing succeeded
f.cache.MarkOK(bucket)
return entries, nil
return nil
}
// listBuckets lists the buckets to out
@@ -4539,14 +4575,46 @@ func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error)
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
return list.WithListP(ctx, dir, f)
}
// ListP lists the objects and directories of the Fs starting
// from dir non recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
list := list.NewHelper(callback)
bucket, directory := f.split(dir)
if bucket == "" {
if directory != "" {
return nil, fs.ErrorListBucketRequired
return fs.ErrorListBucketRequired
}
entries, err := f.listBuckets(ctx)
if err != nil {
return err
}
for _, entry := range entries {
err = list.Add(entry)
if err != nil {
return err
}
}
} else {
err := f.listDir(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "", list.Add)
if err != nil {
return err
}
return f.listBuckets(ctx)
}
return f.listDir(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "")
return list.Flush()
}
// ListR lists the objects and directories of the Fs starting
@@ -4567,7 +4635,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// of listing recursively than doing a directory traversal.
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
bucket, directory := f.split(dir)
list := walk.NewListRHelper(callback)
list := list.NewHelper(callback)
listR := func(bucket, directory, prefix string, addBucket bool) error {
return f.list(ctx, listOpt{
bucket: bucket,
@@ -4708,7 +4776,7 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
// mkdirParent creates the parent bucket/directory if it doesn't exist
func (f *Fs) mkdirParent(ctx context.Context, remote string) error {
remote = strings.TrimRight(remote, "/")
remote, _ = strings.CutSuffix(remote, "/")
dir := path.Dir(remote)
if dir == "/" || dir == "." {
dir = ""
@@ -5061,7 +5129,7 @@ or from INTELLIGENT-TIERING Archive Access / Deep Archive Access tier to the Fre
Usage Examples:
rclone backend restore s3:bucket/path/to/object -o priority=PRIORITY -o lifetime=DAYS
rclone backend restore s3:bucket/path/to/ --include /object -o priority=PRIORITY -o lifetime=DAYS
rclone backend restore s3:bucket/path/to/directory -o priority=PRIORITY -o lifetime=DAYS
rclone backend restore s3:bucket -o priority=PRIORITY -o lifetime=DAYS
rclone backend restore s3:bucket/path/to/directory -o priority=PRIORITY
@@ -6843,6 +6911,7 @@ var (
_ fs.Copier = &Fs{}
_ fs.PutStreamer = &Fs{}
_ fs.ListRer = &Fs{}
_ fs.ListPer = &Fs{}
_ fs.Commander = &Fs{}
_ fs.CleanUpper = &Fs{}
_ fs.OpenChunkWriter = &Fs{}

View File

@@ -10,6 +10,7 @@ import (
"fmt"
"io"
iofs "io/fs"
"net/url"
"os"
"path"
"regexp"
@@ -482,6 +483,14 @@ Example:
myUser:myPass@localhost:9005
`,
Advanced: true,
}, {
Name: "http_proxy",
Default: "",
Help: `URL for HTTP CONNECT proxy
Set this to a URL for an HTTP proxy which supports the HTTP CONNECT verb.
`,
Advanced: true,
}, {
Name: "copy_is_hardlink",
Default: false,
@@ -545,6 +554,7 @@ type Options struct {
HostKeyAlgorithms fs.SpaceSepList `config:"host_key_algorithms"`
SSH fs.SpaceSepList `config:"ssh"`
SocksProxy string `config:"socks_proxy"`
HTTPProxy string `config:"http_proxy"`
CopyIsHardlink bool `config:"copy_is_hardlink"`
}
@@ -570,6 +580,7 @@ type Fs struct {
savedpswd string
sessions atomic.Int32 // count in use sessions
tokens *pacer.TokenDispenser
proxyURL *url.URL // address of HTTP proxy read from environment
}
// Object is a remote SFTP file that has been stat'd (so it exists, but is not necessarily open for reading)
@@ -867,6 +878,15 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
opt.Port = "22"
}
// get proxy URL if set
if opt.HTTPProxy != "" {
proxyURL, err := url.Parse(opt.HTTPProxy)
if err != nil {
return nil, fmt.Errorf("failed to parse HTTP Proxy URL: %w", err)
}
f.proxyURL = proxyURL
}
sshConfig := &ssh.ClientConfig{
User: opt.User,
Auth: []ssh.AuthMethod{},

View File

@@ -31,6 +31,8 @@ func (f *Fs) newSSHClientInternal(ctx context.Context, network, addr string, ssh
)
if f.opt.SocksProxy != "" {
conn, err = proxy.SOCKS5Dial(network, addr, f.opt.SocksProxy, baseDialer)
} else if f.proxyURL != nil {
conn, err = proxy.HTTPConnectDial(network, addr, f.proxyURL, baseDialer)
} else {
conn, err = baseDialer.Dial(network, addr)
}

View File

@@ -25,8 +25,8 @@ import (
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/list"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/atexit"
"github.com/rclone/rclone/lib/bucket"
"github.com/rclone/rclone/lib/encoder"
@@ -846,7 +846,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// of listing recursively than doing a directory traversal.
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
container, directory := f.split(dir)
list := walk.NewListRHelper(callback)
list := list.NewHelper(callback)
listR := func(container, directory, prefix string, addContainer bool) error {
return f.list(ctx, container, directory, prefix, addContainer, true, false, func(entry fs.DirEntry) error {
return list.Add(entry)

View File

@@ -1020,6 +1020,9 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
}
}
// Disable ListP always
features.ListP = nil
// show that we wrap other backends
features.Overlay = true

View File

@@ -12,7 +12,7 @@ import (
)
var (
unimplementableFsMethods = []string{"UnWrap", "WrapFs", "SetWrapper", "UserInfo", "Disconnect", "PublicLink", "PutUnchecked", "MergeDirs", "OpenWriterAt", "OpenChunkWriter"}
unimplementableFsMethods = []string{"UnWrap", "WrapFs", "SetWrapper", "UserInfo", "Disconnect", "PublicLink", "PutUnchecked", "MergeDirs", "OpenWriterAt", "OpenChunkWriter", "ListP"}
unimplementableObjectMethods = []string{}
)

View File

@@ -82,22 +82,37 @@ type Prop struct {
// Parse a status of the form "HTTP/1.1 200 OK" or "HTTP/1.1 200"
var parseStatus = regexp.MustCompile(`^HTTP/[0-9.]+\s+(\d+)`)
// StatusOK examines the Status and returns an OK flag
func (p *Prop) StatusOK() bool {
// Assume OK if no statuses received
// Code extracts the status code from the first status
func (p *Prop) Code() int {
if len(p.Status) == 0 {
return true
return -1
}
match := parseStatus.FindStringSubmatch(p.Status[0])
if len(match) < 2 {
return false
return 0
}
code, err := strconv.Atoi(match[1])
if err != nil {
return 0
}
return code
}
// StatusOK examines the Status and returns an OK flag
func (p *Prop) StatusOK() bool {
// Fetch status code as int
c := p.Code()
// Assume OK if no statuses received
if c == -1 {
return true
}
if c == 0 {
return false
}
if code >= 200 && code < 300 {
if c >= 200 && c < 300 {
return true
}
return false
}

View File

@@ -0,0 +1,40 @@
package webdav
import (
"errors"
"fmt"
)
var (
// ErrChunkSize is returned when the chunk size is zero
ErrChunkSize = errors.New("tus chunk size must be greater than zero")
// ErrNilLogger is returned when the logger is nil
ErrNilLogger = errors.New("tus logger can't be nil")
// ErrNilStore is returned when the store is nil
ErrNilStore = errors.New("tus store can't be nil if resume is enable")
// ErrNilUpload is returned when the upload is nil
ErrNilUpload = errors.New("tus upload can't be nil")
// ErrLargeUpload is returned when the upload body is to large
ErrLargeUpload = errors.New("tus upload body is to large")
// ErrVersionMismatch is returned when the tus protocol version is mismatching
ErrVersionMismatch = errors.New("tus protocol version mismatch")
// ErrOffsetMismatch is returned when the tus upload offset is mismatching
ErrOffsetMismatch = errors.New("tus upload offset mismatch")
// ErrUploadNotFound is returned when the tus upload is not found
ErrUploadNotFound = errors.New("tus upload not found")
// ErrResumeNotEnabled is returned when the tus resuming is not enabled
ErrResumeNotEnabled = errors.New("tus resuming not enabled")
// ErrFingerprintNotSet is returned when the tus fingerprint is not set
ErrFingerprintNotSet = errors.New("tus fingerprint not set")
)
// ClientError represents an error state of a client
type ClientError struct {
Code int
Body []byte
}
// Error returns an error string containing the client error code
func (c ClientError) Error() string {
return fmt.Sprintf("unexpected status code: %d", c.Code)
}

View File

@@ -0,0 +1,88 @@
package webdav
import (
"bytes"
"encoding/base64"
"fmt"
"io"
"strings"
)
// Metadata is a typedef for a string to string map to hold metadata
type Metadata map[string]string
// Upload is a struct containing the file status during upload
type Upload struct {
stream io.ReadSeeker
size int64
offset int64
Fingerprint string
Metadata Metadata
}
// Updates the Upload information based on offset.
func (u *Upload) updateProgress(offset int64) {
u.offset = offset
}
// Finished returns whether this upload is finished or not.
func (u *Upload) Finished() bool {
return u.offset >= u.size
}
// Progress returns the progress in a percentage.
func (u *Upload) Progress() int64 {
return (u.offset * 100) / u.size
}
// Offset returns the current upload offset.
func (u *Upload) Offset() int64 {
return u.offset
}
// Size returns the size of the upload body.
func (u *Upload) Size() int64 {
return u.size
}
// EncodedMetadata encodes the upload metadata.
func (u *Upload) EncodedMetadata() string {
var encoded []string
for k, v := range u.Metadata {
encoded = append(encoded, fmt.Sprintf("%s %s", k, b64encode(v)))
}
return strings.Join(encoded, ",")
}
func b64encode(s string) string {
return base64.StdEncoding.EncodeToString([]byte(s))
}
// NewUpload creates a new upload from an io.Reader.
func NewUpload(reader io.Reader, size int64, metadata Metadata, fingerprint string) *Upload {
stream, ok := reader.(io.ReadSeeker)
if !ok {
buf := new(bytes.Buffer)
_, err := buf.ReadFrom(reader)
if err != nil {
return nil
}
stream = bytes.NewReader(buf.Bytes())
}
if metadata == nil {
metadata = make(Metadata)
}
return &Upload{
stream: stream,
size: size,
Fingerprint: fingerprint,
Metadata: metadata,
}
}

View File

@@ -0,0 +1,191 @@
package webdav
import (
"bytes"
"context"
"fmt"
"io"
"net/http"
"net/url"
"strconv"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/lib/rest"
)
// Uploader holds all information about a currently running upload
type Uploader struct {
fs *Fs
url string
upload *Upload
offset int64
aborted bool
uploadSubs []chan Upload
notifyChan chan bool
overridePatchMethod bool
}
// NotifyUploadProgress subscribes to progress updates.
func (u *Uploader) NotifyUploadProgress(c chan Upload) {
u.uploadSubs = append(u.uploadSubs, c)
}
func (f *Fs) shouldRetryChunk(ctx context.Context, resp *http.Response, err error, newOff *int64) (bool, error) {
if resp == nil {
return true, err
}
switch resp.StatusCode {
case 204:
if off, err := strconv.ParseInt(resp.Header.Get("Upload-Offset"), 10, 64); err == nil {
*newOff = off
return false, nil
}
return false, err
case 409:
return false, ErrOffsetMismatch
case 412:
return false, ErrVersionMismatch
case 413:
return false, ErrLargeUpload
}
return f.shouldRetry(ctx, resp, err)
}
func (u *Uploader) uploadChunk(ctx context.Context, body io.Reader, size int64, offset int64, options ...fs.OpenOption) (int64, error) {
var method string
if !u.overridePatchMethod {
method = "PATCH"
} else {
method = "POST"
}
extraHeaders := map[string]string{} // FIXME: Use extraHeaders(ctx, src) from Object maybe?
extraHeaders["Upload-Offset"] = strconv.FormatInt(offset, 10)
extraHeaders["Tus-Resumable"] = "1.0.0"
extraHeaders["filetype"] = u.upload.Metadata["filetype"]
if u.overridePatchMethod {
extraHeaders["X-HTTP-Method-Override"] = "PATCH"
}
url, err := url.Parse(u.url)
if err != nil {
return 0, fmt.Errorf("upload Chunk failed, could not parse url")
}
// FIXME: Use GetBody func as in chunking.go
opts := rest.Opts{
Method: method,
Path: url.Path,
NoResponse: true,
RootURL: fmt.Sprintf("%s://%s", url.Scheme, url.Host),
ContentLength: &size,
Body: body,
ContentType: "application/offset+octet-stream",
ExtraHeaders: extraHeaders,
Options: options,
}
var newOffset int64
err = u.fs.pacer.CallNoRetry(func() (bool, error) {
res, err := u.fs.srv.Call(ctx, &opts)
return u.fs.shouldRetryChunk(ctx, res, err, &newOffset)
})
if err != nil {
return 0, fmt.Errorf("uploadChunk failed: %w", err)
// FIXME What do we do here? Remove the entire upload?
// See https://github.com/tus/tusd/issues/176
}
return newOffset, nil
}
// Upload uploads the entire body to the server.
func (u *Uploader) Upload(ctx context.Context, options ...fs.OpenOption) error {
cnt := 1
fs.Debug(u.fs, "Uploaded starts")
for u.offset < u.upload.size && !u.aborted {
err := u.UploadChunk(ctx, cnt, options...)
cnt++
if err != nil {
return err
}
}
fs.Debug(u.fs, "-- Uploaded finished")
return nil
}
// UploadChunk uploads a single chunk.
func (u *Uploader) UploadChunk(ctx context.Context, cnt int, options ...fs.OpenOption) error {
chunkSize := u.fs.opt.ChunkSize
data := make([]byte, chunkSize)
_, err := u.upload.stream.Seek(u.offset, 0)
if err != nil {
fs.Errorf(u.fs, "Chunk %d: Error seek in stream failed: %v", cnt, err)
return err
}
size, err := u.upload.stream.Read(data)
if err != nil {
fs.Errorf(u.fs, "Chunk %d: Error: Can not read from data strem: %v", cnt, err)
return err
}
body := bytes.NewBuffer(data[:size])
newOffset, err := u.uploadChunk(ctx, body, int64(size), u.offset, options...)
if err == nil {
fs.Debugf(u.fs, "Uploaded chunk no %d ok, range %d -> %d", cnt, u.offset, newOffset)
} else {
fs.Errorf(u.fs, "Uploaded chunk no %d failed: %v", cnt, err)
return err
}
u.offset = newOffset
u.upload.updateProgress(u.offset)
u.notifyChan <- true
return nil
}
// Waits for a signal to broadcast to all subscribers
func (u *Uploader) broadcastProgress() {
for range u.notifyChan {
for _, c := range u.uploadSubs {
c <- *u.upload
}
}
}
// NewUploader creates a new Uploader.
func NewUploader(f *Fs, url string, upload *Upload, offset int64) *Uploader {
notifyChan := make(chan bool)
uploader := &Uploader{
f,
url,
upload,
offset,
false,
nil,
notifyChan,
false,
}
go uploader.broadcastProgress()
return uploader
}

108
backend/webdav/tus.go Normal file
View File

@@ -0,0 +1,108 @@
package webdav
/*
Chunked upload based on the tus protocol for ownCloud Infinite Scale
See https://tus.io/protocols/resumable-upload
*/
import (
"context"
"fmt"
"io"
"net/http"
"path/filepath"
"strconv"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/lib/rest"
)
func (o *Object) updateViaTus(ctx context.Context, in io.Reader, contentType string, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
fn := filepath.Base(src.Remote())
metadata := map[string]string{
"filename": fn,
"mtime": strconv.FormatInt(src.ModTime(ctx).Unix(), 10),
"filetype": contentType,
}
// Fingerprint is used to identify the upload when resuming. That is not yet implemented
fingerprint := ""
// create an upload from a file.
upload := NewUpload(in, src.Size(), metadata, fingerprint)
// create the uploader.
uploader, err := o.CreateUploader(ctx, upload, options...)
if err == nil {
// start the uploading process.
err = uploader.Upload(ctx, options...)
}
return err
}
func (f *Fs) getTusLocationOrRetry(ctx context.Context, resp *http.Response, err error) (bool, string, error) {
switch resp.StatusCode {
case 201:
location := resp.Header.Get("Location")
return false, location, nil
case 412:
return false, "", ErrVersionMismatch
case 413:
return false, "", ErrLargeUpload
}
retry, err := f.shouldRetry(ctx, resp, err)
return retry, "", err
}
// CreateUploader creates a new upload to the server.
func (o *Object) CreateUploader(ctx context.Context, u *Upload, options ...fs.OpenOption) (*Uploader, error) {
if u == nil {
return nil, ErrNilUpload
}
// if c.Config.Resume && len(u.Fingerprint) == 0 {
// return nil, ErrFingerprintNotSet
// }
l := int64(0)
p := o.filePath()
// cut the filename off
dir, _ := filepath.Split(p)
if dir == "" {
dir = "/"
}
opts := rest.Opts{
Method: "POST",
Path: dir,
NoResponse: true,
RootURL: o.fs.endpointURL,
ContentLength: &l,
ExtraHeaders: o.extraHeaders(ctx, o),
Options: options,
}
opts.ExtraHeaders["Upload-Length"] = strconv.FormatInt(u.size, 10)
opts.ExtraHeaders["Upload-Metadata"] = u.EncodedMetadata()
opts.ExtraHeaders["Tus-Resumable"] = "1.0.0"
// opts.ExtraHeaders["mtime"] = strconv.FormatInt(src.ModTime(ctx).Unix(), 10)
var tusLocation string
// rclone http call
err := o.fs.pacer.CallNoRetry(func() (bool, error) {
var retry bool
res, err := o.fs.srv.Call(ctx, &opts)
retry, tusLocation, err = o.fs.getTusLocationOrRetry(ctx, res, err)
return retry, err
})
if err != nil {
return nil, fmt.Errorf("making upload directory failed: %w", err)
}
uploader := NewUploader(o.fs, tusLocation, u, 0)
return uploader, nil
}

View File

@@ -84,7 +84,10 @@ func init() {
Help: "Nextcloud",
}, {
Value: "owncloud",
Help: "Owncloud",
Help: "Owncloud 10 PHP based WebDAV server",
}, {
Value: "infinitescale",
Help: "ownCloud Infinite Scale",
}, {
Value: "sharepoint",
Help: "Sharepoint Online, authenticated by Microsoft account",
@@ -212,6 +215,7 @@ type Fs struct {
pacer *fs.Pacer // pacer for API calls
precision time.Duration // mod time precision
canStream bool // set if can stream
canTus bool // supports the TUS upload protocol
useOCMtime bool // set if can use X-OC-Mtime
propsetMtime bool // set if can use propset
retryWithZeroDepth bool // some vendors (sharepoint) won't list files when Depth is 1 (our default)
@@ -262,6 +266,7 @@ func (f *Fs) Features() *fs.Features {
// retryErrorCodes is a slice of error codes that we will retry
var retryErrorCodes = []int{
423, // Locked
425, // Too Early
429, // Too Many Requests.
500, // Internal Server Error
502, // Bad Gateway
@@ -373,7 +378,8 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string, depth string)
return nil, fs.ErrorObjectNotFound
}
item := result.Responses[0]
if !item.Props.StatusOK() {
// status code 425 is accepted here as well
if !(item.Props.StatusOK() || item.Props.Code() == 425) {
return nil, fs.ErrorObjectNotFound
}
if itemIsDir(&item) {
@@ -630,6 +636,15 @@ func (f *Fs) setQuirks(ctx context.Context, vendor string) error {
f.propsetMtime = true
f.hasOCMD5 = true
f.hasOCSHA1 = true
case "infinitescale":
f.precision = time.Second
f.useOCMtime = true
f.propsetMtime = true
f.hasOCMD5 = false
f.hasOCSHA1 = true
f.canChunk = false
f.canTus = true
f.opt.ChunkSize = 10 * fs.Mebi
case "nextcloud":
f.precision = time.Second
f.useOCMtime = true
@@ -1327,7 +1342,7 @@ func (o *Object) Size() int64 {
ctx := context.TODO()
err := o.readMetaData(ctx)
if err != nil {
fs.Logf(o, "Failed to read metadata: %v", err)
fs.Infof(o, "Failed to read metadata: %v", err)
return 0
}
return o.size
@@ -1371,7 +1386,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
func (o *Object) ModTime(ctx context.Context) time.Time {
err := o.readMetaData(ctx)
if err != nil {
fs.Logf(o, "Failed to read metadata: %v", err)
fs.Infof(o, "Failed to read metadata: %v", err)
return time.Now()
}
return o.modTime
@@ -1497,9 +1512,21 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return fmt.Errorf("Update mkParentDir failed: %w", err)
}
if o.shouldUseChunkedUpload(src) {
fs.Debugf(src, "Update will use the chunked upload strategy")
err = o.updateChunked(ctx, in, src, options...)
if o.fs.canTus { // supports the tus upload protocol, ie. InfiniteScale
fs.Debugf(src, "Update will use the tus protocol to upload")
contentType := fs.MimeType(ctx, src)
err = o.updateViaTus(ctx, in, contentType, src, options...)
if err != nil {
fs.Debug(src, "tus update failed.")
return fmt.Errorf("tus update failed: %w", err)
}
} else if o.shouldUseChunkedUpload(src) {
if o.fs.opt.Vendor == "nextcloud" {
fs.Debugf(src, "Update will use the chunked upload strategy")
err = o.updateChunked(ctx, in, src, options...)
} else {
fs.Debug(src, "Chunking - unknown vendor")
}
if err != nil {
return err
}
@@ -1511,10 +1538,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// TODO: define getBody() to enable low-level HTTP/2 retries
err = o.updateSimple(ctx, in, nil, filePath, src.Size(), contentType, extraHeaders, o.fs.endpointURL, options...)
if err != nil {
return err
return fmt.Errorf("unchunked simple update failed: %w", err)
}
}
// read metadata from remote
o.hasMetaData = false
return o.readMetaData(ctx)
@@ -1524,7 +1550,7 @@ func (o *Object) extraHeaders(ctx context.Context, src fs.ObjectInfo) map[string
extraHeaders := map[string]string{}
if o.fs.useOCMtime || o.fs.hasOCMD5 || o.fs.hasOCSHA1 {
if o.fs.useOCMtime {
extraHeaders["X-OC-Mtime"] = fmt.Sprintf("%d", src.ModTime(ctx).Unix())
extraHeaders["X-OC-Mtime"] = fmt.Sprintf("%d", o.modTime.Unix())
}
// Set one upload checksum
// Owncloud uses one checksum only to check the upload and stores its own SHA1 and MD5

137
bin/check_autogenerated_edits.py Executable file
View File

@@ -0,0 +1,137 @@
#!/usr/bin/env python3
"""
This script checks for unauthorized modifications in autogenerated sections of markdown files.
It is designed to be used in a GitHub Actions workflow or a local pre-commit hook.
Features:
- Detects markdown files changed in the last commit.
- Identifies modified autogenerated sections marked by specific comments.
- Reports violations using GitHub Actions error messages.
- Exits with a nonzero status code if unauthorized changes are found.
It currently only checks the last commit.
"""
import re
import subprocess
import sys
def run_git(args):
"""
Run a Git command with the provided arguments and return its output as a string.
"""
return subprocess.run(["git"] + args, stdout=subprocess.PIPE, text=True, check=True).stdout.strip()
def get_changed_files():
"""
Retrieve a list of markdown files that were changed in the last commit.
"""
files = run_git(["diff", "--name-only", "HEAD~1", "HEAD"]).splitlines()
return [f for f in files if f.endswith(".md")]
def get_diff(file):
"""
Get the diff of a given file between the last commit and the current version.
"""
return run_git(["diff", "-U0", "HEAD~1", "HEAD", "--", file]).splitlines()
def get_file_content(ref, file):
"""
Retrieve the content of a file from a given Git reference.
"""
try:
return run_git(["show", f"{ref}:{file}"]).splitlines()
except Exception:
return []
def find_regions(lines):
"""
Identify the start and end line numbers of autogenerated regions in a file.
"""
regions = []
start = None
for i, line in enumerate(lines, 1):
if "rem autogenerated options start" in line:
start = i
elif "rem autogenerated options stop" in line and start is not None:
regions.append((start, i))
start = None
return regions
def in_region(ln, regions):
"""
Check if a given line number falls within an autogenerated region.
"""
return any(start <= ln <= end for start, end in regions)
def show_error(file_name, line, message):
"""
Print an error message in a GitHub Actions-compatible format.
"""
print(f"::error file={file_name},line={line}::{message} at {file_name} line {line}")
def check_file(file):
"""
Check a markdown file for modifications in autogenerated regions.
"""
viol = False
new_lines = get_file_content("HEAD", file)
old_lines = get_file_content("HEAD~1", file)
# If old file did not exist or was empty then don't check
if not old_lines:
return
# Entire autogenerated file check.
if any("autogenerated - DO NOT EDIT" in l for l in new_lines[:10]):
if get_diff(file):
show_error(file, 1, "Autogenerated file modified")
return True
return False
# Partial autogenerated regions.
regions_new = find_regions(new_lines)
regions_old = find_regions(old_lines)
diff = get_diff(file)
hunk_re = re.compile(r"^@@ -(\d+),?(\d*) \+(\d+),?(\d*) @@")
new_ln = old_ln = None
for line in diff:
if line.startswith("@@"):
m = hunk_re.match(line)
if m:
old_ln = int(m.group(1))
new_ln = int(m.group(3))
elif new_ln is None:
continue
elif line.startswith("+"):
if in_region(new_ln, regions_new):
show_error(file, new_ln, "Autogenerated region of file modified")
viol = True
new_ln += 1
elif line.startswith("-"):
if in_region(old_ln, regions_old):
show_error(file, old_ln, "Autogenerated region of file modified")
viol = True
old_ln += 1
else:
new_ln += 1
old_ln += 1
return viol
def main():
"""
Main function that iterates over changed files and checks them for violations.
"""
found = False
for f in get_changed_files():
if check_file(f):
found = True
if found:
sys.exit(1)
print("No unauthorized edits found in autogenerated sections.")
sys.exit(0)
if __name__ == "__main__":
main()

View File

@@ -41,8 +41,10 @@ docs = [
"crypt.md",
"compress.md",
"combine.md",
"doi.md",
"dropbox.md",
"filefabric.md",
"filelu.md",
"filescom.md",
"ftp.md",
"gofile.md",

View File

@@ -53,6 +53,15 @@ import (
_ "github.com/rclone/rclone/cmd/rmdirs"
_ "github.com/rclone/rclone/cmd/selfupdate"
_ "github.com/rclone/rclone/cmd/serve"
_ "github.com/rclone/rclone/cmd/serve/dlna"
_ "github.com/rclone/rclone/cmd/serve/docker"
_ "github.com/rclone/rclone/cmd/serve/ftp"
_ "github.com/rclone/rclone/cmd/serve/http"
_ "github.com/rclone/rclone/cmd/serve/nfs"
_ "github.com/rclone/rclone/cmd/serve/restic"
_ "github.com/rclone/rclone/cmd/serve/s3"
_ "github.com/rclone/rclone/cmd/serve/sftp"
_ "github.com/rclone/rclone/cmd/serve/webdav"
_ "github.com/rclone/rclone/cmd/settier"
_ "github.com/rclone/rclone/cmd/sha1sum"
_ "github.com/rclone/rclone/cmd/size"

View File

@@ -23,19 +23,23 @@ func init() {
}
var commandDefinition = &cobra.Command{
Use: "authorize",
Use: "authorize <fs name> [base64_json_blob | client_id client_secret]",
Short: `Remote authorization.`,
Long: `Remote authorization. Used to authorize a remote or headless
rclone from a machine with a browser - use as instructed by
rclone config.
The command requires 1-3 arguments:
- fs name (e.g., "drive", "s3", etc.)
- Either a base64 encoded JSON blob obtained from a previous rclone config session
- Or a client_id and client_secret pair obtained from the remote service
Use --auth-no-open-browser to prevent rclone to open auth
link in default browser automatically.
Use --template to generate HTML output via a custom Go template. If a blank string is provided as an argument to this flag, the default template is used.`,
Annotations: map[string]string{
"versionIntroduced": "v1.27",
// "groups": "",
},
RunE: func(command *cobra.Command, args []string) error {
cmd.CheckArgs(1, 3, command, args)

View File

@@ -0,0 +1,32 @@
package authorize
import (
"bytes"
"strings"
"testing"
"github.com/spf13/cobra"
)
func TestAuthorizeCommand(t *testing.T) {
// Test that the Use string is correctly formatted
if commandDefinition.Use != "authorize <fs name> [base64_json_blob | client_id client_secret]" {
t.Errorf("Command Use string doesn't match expected format: %s", commandDefinition.Use)
}
// Test that help output contains the argument information
buf := &bytes.Buffer{}
cmd := &cobra.Command{}
cmd.AddCommand(commandDefinition)
cmd.SetOut(buf)
cmd.SetArgs([]string{"authorize", "--help"})
err := cmd.Execute()
if err != nil {
t.Fatalf("Failed to execute help command: %v", err)
}
helpOutput := buf.String()
if !strings.Contains(helpOutput, "authorize <fs name>") {
t.Errorf("Help output doesn't contain correct usage information")
}
}

View File

@@ -5,8 +5,6 @@ import (
"os"
"sort"
"strconv"
"strings"
"time"
)
// Names comprises a set of file names
@@ -85,81 +83,3 @@ func (am AliasMap) Alias(name1 string) string {
}
return name1
}
// ParseGlobs determines whether a string contains {brackets}
// and returns the substring (including both brackets) for replacing
// substring is first opening bracket to last closing bracket --
// good for {{this}} but not {this}{this}
func ParseGlobs(s string) (hasGlobs bool, substring string) {
open := strings.Index(s, "{")
close := strings.LastIndex(s, "}")
if open >= 0 && close > open {
return true, s[open : close+1]
}
return false, ""
}
// TrimBrackets converts {{this}} to this
func TrimBrackets(s string) string {
return strings.Trim(s, "{}")
}
// TimeFormat converts a user-supplied string to a Go time constant, if possible
func TimeFormat(timeFormat string) string {
switch timeFormat {
case "Layout":
timeFormat = time.Layout
case "ANSIC":
timeFormat = time.ANSIC
case "UnixDate":
timeFormat = time.UnixDate
case "RubyDate":
timeFormat = time.RubyDate
case "RFC822":
timeFormat = time.RFC822
case "RFC822Z":
timeFormat = time.RFC822Z
case "RFC850":
timeFormat = time.RFC850
case "RFC1123":
timeFormat = time.RFC1123
case "RFC1123Z":
timeFormat = time.RFC1123Z
case "RFC3339":
timeFormat = time.RFC3339
case "RFC3339Nano":
timeFormat = time.RFC3339Nano
case "Kitchen":
timeFormat = time.Kitchen
case "Stamp":
timeFormat = time.Stamp
case "StampMilli":
timeFormat = time.StampMilli
case "StampMicro":
timeFormat = time.StampMicro
case "StampNano":
timeFormat = time.StampNano
case "DateTime":
// timeFormat = time.DateTime // missing in go1.19
timeFormat = "2006-01-02 15:04:05"
case "DateOnly":
// timeFormat = time.DateOnly // missing in go1.19
timeFormat = "2006-01-02"
case "TimeOnly":
// timeFormat = time.TimeOnly // missing in go1.19
timeFormat = "15:04:05"
case "MacFriendlyTime", "macfriendlytime", "mac":
timeFormat = "2006-01-02 0304PM" // not actually a Go constant -- but useful as macOS filenames can't have colons
}
return timeFormat
}
// AppyTimeGlobs converts "myfile-{DateOnly}.txt" to "myfile-2006-01-02.txt"
func AppyTimeGlobs(s string, t time.Time) string {
hasGlobs, substring := ParseGlobs(s)
if !hasGlobs {
return s
}
timeString := t.Local().Format(TimeFormat(TrimBrackets(substring)))
return strings.ReplaceAll(s, substring, timeString)
}

View File

@@ -3,20 +3,22 @@ package bilib
import (
"bytes"
"log"
"log/slog"
"github.com/sirupsen/logrus"
"github.com/rclone/rclone/fs/log"
)
// CaptureOutput runs a function capturing its output.
// CaptureOutput runs a function capturing its output at log level INFO.
func CaptureOutput(fun func()) []byte {
logSave := log.Writer()
logrusSave := logrus.StandardLogger().Out
buf := &bytes.Buffer{}
log.SetOutput(buf)
logrus.SetOutput(buf)
oldLevel := log.Handler.SetLevel(slog.LevelInfo)
log.Handler.SetOutput(func(level slog.Level, text string) {
buf.WriteString(text)
})
defer func() {
log.Handler.ResetOutput()
log.Handler.SetLevel(oldLevel)
}()
fun()
log.SetOutput(logSave)
logrus.SetOutput(logrusSave)
return buf.Bytes()
}

View File

@@ -4,8 +4,6 @@ import (
"context"
"fmt"
"math"
"mime"
"path"
"strings"
"time"
@@ -13,6 +11,7 @@ import (
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/lib/terminal"
"github.com/rclone/rclone/lib/transform"
)
// Prefer describes strategies for resolving sync conflicts
@@ -97,8 +96,8 @@ func (b *bisyncRun) setResolveDefaults(ctx context.Context) error {
}
// replace glob variables, if any
t := time.Now() // capture static time here so it is the same for all files throughout this run
b.opt.ConflictSuffix1 = bilib.AppyTimeGlobs(b.opt.ConflictSuffix1, t)
b.opt.ConflictSuffix2 = bilib.AppyTimeGlobs(b.opt.ConflictSuffix2, t)
b.opt.ConflictSuffix1 = transform.AppyTimeGlobs(b.opt.ConflictSuffix1, t)
b.opt.ConflictSuffix2 = transform.AppyTimeGlobs(b.opt.ConflictSuffix2, t)
// append dot (intentionally allow more than one)
b.opt.ConflictSuffix1 = "." + b.opt.ConflictSuffix1
@@ -130,6 +129,7 @@ type (
path2 namePair
}
)
type namePair struct {
oldName string
newName string
@@ -240,24 +240,7 @@ func SuffixName(ctx context.Context, remote, suffix string) string {
}
ci := fs.GetConfig(ctx)
if ci.SuffixKeepExtension {
var (
base = remote
exts = ""
first = true
ext = path.Ext(remote)
)
for ext != "" {
// Look second and subsequent extensions in mime types.
// If they aren't found then don't keep it as an extension.
if !first && mime.TypeByExtension(ext) == "" {
break
}
base = base[:len(base)-len(ext)]
exts = ext + exts
first = false
ext = path.Ext(base)
}
return base + suffix + exts
return transform.SuffixKeepExtension(remote, suffix)
}
return remote + suffix
}

View File

@@ -3,502 +3,106 @@ package convmv
import (
"context"
"encoding/base64"
"errors"
"fmt"
"os"
"path/filepath"
"strings"
"unicode/utf8"
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/flags"
"github.com/rclone/rclone/fs/list"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/random"
"github.com/rclone/rclone/fs/sync"
"github.com/rclone/rclone/lib/transform"
"github.com/spf13/cobra"
"golang.org/x/text/encoding/charmap"
"golang.org/x/text/unicode/norm"
)
// Globals
var (
Opt ConvOpt
Cmaps = map[int]*charmap.Charmap{}
deleteEmptySrcDirs = false
createEmptySrcDirs = false
)
// ConvOpt sets the conversion options
type ConvOpt struct {
ctx context.Context
f fs.Fs
ConvertAlgo Convert
FindReplace []string
Prefix string
Suffix string
Max int
Enc encoder.MultiEncoder
CmapFlag fs.Enum[cmapChoices]
Cmap *charmap.Charmap
List bool
}
func init() {
cmd.Root.AddCommand(commandDefinition)
cmdFlags := commandDefinition.Flags()
flags.FVarP(cmdFlags, &Opt.ConvertAlgo, "conv", "t", "Conversion algorithm: "+Opt.ConvertAlgo.Help(), "")
flags.StringVarP(cmdFlags, &Opt.Prefix, "prefix", "", "", "In 'prefix' or 'trimprefix' mode, append or trim this prefix", "")
flags.StringVarP(cmdFlags, &Opt.Suffix, "suffix", "", "", "In 'suffix' or 'trimsuffix' mode, append or trim this suffix", "")
flags.IntVarP(cmdFlags, &Opt.Max, "max", "m", -1, "In 'truncate' mode, truncate all path segments longer than this many characters", "")
flags.StringArrayVarP(cmdFlags, &Opt.FindReplace, "replace", "r", nil, "In 'replace' mode, this is a pair of find,replace values (can repeat flag more than once)", "")
flags.FVarP(cmdFlags, &Opt.Enc, "encoding", "", "Custom backend encoding: (use --list to see full list)", "")
flags.FVarP(cmdFlags, &Opt.CmapFlag, "charmap", "", "Other character encoding (use --list to see full list) ", "")
flags.BoolVarP(cmdFlags, &Opt.List, "list", "", false, "Print full list of options", "")
}
// Convert describes conversion setting
type Convert = fs.Enum[convertChoices]
// Supported conversion options
const (
ConvNone Convert = iota
ConvToNFC
ConvToNFD
ConvToNFKC
ConvToNFKD
ConvFindReplace
ConvPrefix
ConvSuffix
ConvTrimPrefix
ConvTrimSuffix
ConvIndex
ConvDate
ConvTruncate
ConvBase64Encode
ConvBase64Decode
ConvEncoder
ConvDecoder
ConvISO8859_1
ConvWindows1252
ConvMacintosh
ConvCharmap
ConvLowercase
ConvUppercase
ConvTitlecase
ConvASCII
ConvURL
ConvMapper
)
type convertChoices struct{}
func (convertChoices) Choices() []string {
return []string{
ConvNone: "none",
ConvToNFC: "nfc",
ConvToNFD: "nfd",
ConvToNFKC: "nfkc",
ConvToNFKD: "nfkd",
ConvFindReplace: "replace",
ConvPrefix: "prefix",
ConvSuffix: "suffix",
ConvTrimPrefix: "trimprefix",
ConvTrimSuffix: "trimsuffix",
ConvIndex: "index",
ConvDate: "date",
ConvTruncate: "truncate",
ConvBase64Encode: "base64encode",
ConvBase64Decode: "base64decode",
ConvEncoder: "encoder",
ConvDecoder: "decoder",
ConvISO8859_1: "ISO-8859-1",
ConvWindows1252: "Windows-1252",
ConvMacintosh: "Macintosh",
ConvCharmap: "charmap",
ConvLowercase: "lowercase",
ConvUppercase: "uppercase",
ConvTitlecase: "titlecase",
ConvASCII: "ascii",
ConvURL: "url",
ConvMapper: "mapper",
}
}
func (convertChoices) Type() string {
return "string"
}
type cmapChoices struct{}
func (cmapChoices) Choices() []string {
choices := make([]string, 1)
i := 0
for _, enc := range charmap.All {
c, ok := enc.(*charmap.Charmap)
if !ok {
continue
}
name := strings.ReplaceAll(c.String(), " ", "-")
if name == "" {
name = fmt.Sprintf("unknown-%d", i)
}
Cmaps[i] = c
choices = append(choices, name)
i++
}
return choices
}
func (cmapChoices) Type() string {
return "string"
}
func charmapByID(cm fs.Enum[cmapChoices]) *charmap.Charmap {
c, ok := Cmaps[int(cm)]
if ok {
return c
}
return nil
flags.BoolVarP(cmdFlags, &deleteEmptySrcDirs, "delete-empty-src-dirs", "", deleteEmptySrcDirs, "Delete empty source dirs after move", "")
flags.BoolVarP(cmdFlags, &createEmptySrcDirs, "create-empty-src-dirs", "", createEmptySrcDirs, "Create empty source dirs on destination after move", "")
}
var commandDefinition = &cobra.Command{
Use: "convmv source:path",
Short: `Convert file and directory names`,
// Warning! "|" will be replaced by backticks below
Use: "convmv dest:path --name-transform XXX",
Short: `Convert file and directory names in place.`,
// Warning¡ "¡" will be replaced by backticks below
Long: strings.ReplaceAll(`
This command renames files and directory names according a user supplied conversion.
convmv supports advanced path name transformations for converting and renaming files and directories by applying prefixes, suffixes, and other alterations.
It is useful for renaming a lot of files in an automated way.
`+transform.SprintList()+`
`+sprintList()+`
Multiple transformations can be used in sequence, applied in the order they are specified on the command line.
`, "|", "`"),
The ¡--name-transform¡ flag is also available in ¡sync¡, ¡copy¡, and ¡move¡.
## Files vs Directories ##
By default ¡--name-transform¡ will only apply to file names. The means only the leaf file name will be transformed.
However some of the transforms would be better applied to the whole path or just directories.
To choose which which part of the file path is affected some tags can be added to the ¡--name-transform¡
| Tag | Effect |
|------|------|
| ¡file¡ | Only transform the leaf name of files (DEFAULT) |
| ¡dir¡ | Only transform name of directories - these may appear anywhere in the path |
| ¡all¡ | Transform the entire path for files and directories |
This is used by adding the tag into the transform name like this: ¡--name-transform file,prefix=ABC¡ or ¡--name-transform dir,prefix=DEF¡.
For some conversions using all is more likely to be useful, for example ¡--name-transform all,nfc¡
Note that ¡--name-transform¡ may not add path separators ¡/¡ to the name. This will cause an error.
## Ordering and Conflicts ##
* Transformations will be applied in the order specified by the user.
* If the ¡file¡ tag is in use (the default) then only the leaf name of files will be transformed.
* If the ¡dir¡ tag is in use then directories anywhere in the path will be transformed
* If the ¡all¡ tag is in use then directories and files anywhere in the path will be transformed
* Each transformation will be run one path segment at a time.
* If a transformation adds a ¡/¡ or ends up with an empty path segment then that will be an error.
* It is up to the user to put the transformations in a sensible order.
* Conflicting transformations, such as ¡prefix¡ followed by ¡trimprefix¡ or ¡nfc¡ followed by ¡nfd¡, are possible.
* Instead of enforcing mutual exclusivity, transformations are applied in sequence as specified by the
user, allowing for intentional use cases (e.g., trimming one prefix before adding another).
* Users should be aware that certain combinations may lead to unexpected results and should verify
transformations using ¡--dry-run¡ before execution.
## Race Conditions and Non-Deterministic Behavior ##
Some transformations, such as ¡replace=old:new¡, may introduce conflicts where multiple source files map to the same destination name.
This can lead to race conditions when performing concurrent transfers. It is up to the user to anticipate these.
* If two files from the source are transformed into the same name at the destination, the final state may be non-deterministic.
* Running rclone check after a sync using such transformations may erroneously report missing or differing files due to overwritten results.
* To minimize risks, users should:
* Carefully review transformations that may introduce conflicts.
* Use ¡--dry-run¡ to inspect changes before executing a sync (but keep in mind that it won't show the effect of non-deterministic transformations).
* Avoid transformations that cause multiple distinct source files to map to the same destination name.
* Consider disabling concurrency with ¡--transfers=1¡ if necessary.
* Certain transformations (e.g. ¡prefix¡) will have a multiplying effect every time they are used. Avoid these when using ¡bisync¡.
`, "¡", "`"),
Annotations: map[string]string{
"versionIntroduced": "v1.70",
"groups": "Filter,Listing,Important,Copy",
},
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(1, 1, command, args)
fsrc, srcFileName := cmd.NewFsFile(args[0])
cmd.Run(false, true, command, func() error { // retries switched off to prevent double-encoding
return Convmv(context.Background(), fsrc, srcFileName)
fdst, srcFileName := cmd.NewFsFile(args[0])
cmd.Run(false, true, command, func() error {
if !transform.Transforming(context.Background()) {
return errors.New("--name-transform must be set")
}
if srcFileName == "" {
return sync.Transform(context.Background(), fdst, deleteEmptySrcDirs, createEmptySrcDirs)
}
return operations.TransformFile(context.Background(), fdst, srcFileName)
})
},
}
// Convmv converts and renames files and directories
// pass srcFileName == "" to convmv every object in fsrc instead of a single object
func Convmv(ctx context.Context, f fs.Fs, srcFileName string) error {
Opt.ctx = ctx
Opt.f = f
if Opt.List {
printList()
return nil
}
err := Opt.validate()
if err != nil {
return err
}
if srcFileName == "" {
// it's a dir
return walkConv(ctx, f, "")
}
// it's a file
obj, err := f.NewObject(Opt.ctx, srcFileName)
if err != nil {
return err
}
oldName, newName, skip, err := parseEntry(obj)
if err != nil {
return err
}
if skip {
return nil
}
return operations.MoveFile(Opt.ctx, Opt.f, Opt.f, newName, oldName)
}
func (opt *ConvOpt) validate() error {
switch opt.ConvertAlgo {
case ConvNone:
return errors.New("must choose a conversion mode with -t flag")
case ConvFindReplace:
if len(opt.FindReplace) == 0 {
return errors.New("must include --replace flag in replace mode")
}
for _, set := range opt.FindReplace {
split := strings.Split(set, ",")
if len(split) != 2 {
return errors.New("--replace must include exactly two comma-separated values")
}
if split[0] == "" {
return errors.New("'find' value cannot be blank ('replace' can be)")
}
}
case ConvPrefix, ConvTrimPrefix:
if opt.Prefix == "" {
return errors.New("must include a --prefix")
}
case ConvSuffix, ConvTrimSuffix:
if opt.Suffix == "" {
return errors.New("must include a --suffix")
}
case ConvTruncate:
if opt.Max < 1 {
return errors.New("--max cannot be less than 1 in 'truncate' mode")
}
case ConvCharmap:
if opt.CmapFlag == 0 {
return errors.New("must specify a charmap with --charmap flag")
}
c := charmapByID(opt.CmapFlag)
if c == nil {
return errors.New("unknown charmap")
}
opt.Cmap = c
}
return nil
}
// keeps track of which dirs we've already renamed
func walkConv(ctx context.Context, f fs.Fs, dir string) error {
entries, err := list.DirSorted(ctx, f, false, dir)
if err != nil {
return err
}
return walkFunc(dir, entries, nil)
}
func walkFunc(path string, entries fs.DirEntries, err error) error {
fs.Debugf(path, "walking dir")
if err != nil {
return err
}
for _, entry := range entries {
switch x := entry.(type) {
case fs.Object:
oldName, newName, skip, err := parseEntry(x)
if err != nil {
return err
}
if skip {
continue
}
fs.Debugf(x, "%v %v %v %v %v", Opt.ctx, Opt.f, Opt.f, newName, oldName)
err = operations.MoveFile(Opt.ctx, Opt.f, Opt.f, newName, oldName)
if err != nil {
return err
}
case fs.Directory:
oldName, newName, skip, err := parseEntry(x)
if err != nil {
return err
}
if !skip { // still want to recurse during dry-runs to get accurate logs
err = DirMoveCaseInsensitive(Opt.ctx, Opt.f, oldName, newName)
if err != nil {
return err
}
} else {
newName = oldName // otherwise dry-runs won't be able to find it
}
// recurse, calling it by its new name
err = walkConv(Opt.ctx, Opt.f, newName)
if err != nil {
return err
}
}
}
return nil
}
// ConvertPath converts a path string according to the chosen ConvertAlgo.
// Each path segment is converted separately, to preserve path separators.
// If baseOnly is true, only the base will be converted (useful for renaming while walking a dir tree recursively.)
// for example, "some/nested/path" -> "some/nested/CONVERTEDPATH"
// otherwise, the entire is path is converted.
func ConvertPath(s string, ConvertAlgo Convert, baseOnly bool) (string, error) {
if s == "" || s == "/" || s == "\\" || s == "." {
return "", nil
}
if baseOnly {
convertedBase, err := ConvertPathSegment(filepath.Base(s), ConvertAlgo)
return filepath.Join(filepath.Dir(s), convertedBase), err
}
segments := strings.Split(s, string(os.PathSeparator))
convertedSegments := make([]string, len(segments))
for _, seg := range segments {
convSeg, err := ConvertPathSegment(seg, ConvertAlgo)
if err != nil {
return "", err
}
convertedSegments = append(convertedSegments, convSeg)
}
return filepath.Join(convertedSegments...), nil
}
// ConvertPathSegment converts one path segment (or really any string) according to the chosen ConvertAlgo.
// It assumes path separators have already been trimmed.
func ConvertPathSegment(s string, ConvertAlgo Convert) (string, error) {
fs.Debugf(s, "converting")
switch ConvertAlgo {
case ConvNone:
return s, nil
case ConvToNFC:
return norm.NFC.String(s), nil
case ConvToNFD:
return norm.NFD.String(s), nil
case ConvToNFKC:
return norm.NFKC.String(s), nil
case ConvToNFKD:
return norm.NFKD.String(s), nil
case ConvBase64Encode:
return base64.URLEncoding.EncodeToString([]byte(s)), nil // URLEncoding to avoid slashes
case ConvBase64Decode:
if s == ".DS_Store" {
return s, nil
}
b, err := base64.URLEncoding.DecodeString(s)
return string(b), err
case ConvFindReplace:
oldNews := []string{}
for _, pair := range Opt.FindReplace {
split := strings.Split(pair, ",")
oldNews = append(oldNews, split...)
}
replacer := strings.NewReplacer(oldNews...)
return replacer.Replace(s), nil
case ConvPrefix:
return Opt.Prefix + s, nil
case ConvSuffix:
return s + Opt.Suffix, nil
case ConvTrimPrefix:
return strings.TrimPrefix(s, Opt.Prefix), nil
case ConvTrimSuffix:
return strings.TrimSuffix(s, Opt.Suffix), nil
case ConvTruncate:
if Opt.Max <= 0 {
return s, nil
}
if utf8.RuneCountInString(s) <= Opt.Max {
return s, nil
}
runes := []rune(s)
return string(runes[:Opt.Max]), nil
case ConvEncoder:
return Opt.Enc.Encode(s), nil
case ConvDecoder:
return Opt.Enc.Decode(s), nil
case ConvISO8859_1:
return encodeWithReplacement(s, charmap.ISO8859_1), nil
case ConvWindows1252:
return encodeWithReplacement(s, charmap.Windows1252), nil
case ConvMacintosh:
return encodeWithReplacement(s, charmap.Macintosh), nil
case ConvCharmap:
return encodeWithReplacement(s, Opt.Cmap), nil
case ConvLowercase:
return strings.ToLower(s), nil
case ConvUppercase:
return strings.ToUpper(s), nil
case ConvTitlecase:
return strings.ToTitle(s), nil
case ConvASCII:
return toASCII(s), nil
default:
return "", errors.New("this option is not yet implemented")
}
}
func parseEntry(e fs.DirEntry) (oldName, newName string, skip bool, err error) {
oldName = e.Remote()
newName, err = ConvertPath(oldName, Opt.ConvertAlgo, true)
if err != nil {
fs.Errorf(oldName, "error converting: %v", err)
return oldName, newName, true, err
}
if oldName == newName {
fs.Debugf(oldName, "name is already correct - skipping")
return oldName, newName, true, nil
}
skip = operations.SkipDestructive(Opt.ctx, oldName, "rename to "+newName)
return oldName, newName, skip, nil
}
// DirMoveCaseInsensitive does DirMove in two steps (to temp name, then real name)
// which is necessary for some case-insensitive backends
func DirMoveCaseInsensitive(ctx context.Context, f fs.Fs, srcRemote, dstRemote string) (err error) {
tmpDstRemote := dstRemote + "-rclone-move-" + random.String(8)
err = operations.DirMove(ctx, f, srcRemote, tmpDstRemote)
if err != nil {
return err
}
return operations.DirMove(ctx, f, tmpDstRemote, dstRemote)
}
func encodeWithReplacement(s string, cmap *charmap.Charmap) string {
return strings.Map(func(r rune) rune {
b, ok := cmap.EncodeRune(r)
if !ok {
return '_'
}
return cmap.DecodeByte(b)
}, s)
}
func toASCII(s string) string {
return strings.Map(func(r rune) rune {
if r <= 127 {
return r
}
return -1
}, s)
}
func sprintList() string {
var out strings.Builder
_, _ = out.WriteString(`### Conversion modes
The conversion mode |-t| or |--conv| flag must be specified. This
defines what transformation the |convmv| command will make.
`)
for _, v := range Opt.ConvertAlgo.Choices() {
_, _ = fmt.Fprintf(&out, "- `%s`\n", v)
}
_, _ = out.WriteRune('\n')
_, _ = out.WriteString(`### Char maps
These are the choices for the |--charmap| flag.
`)
for _, v := range Opt.CmapFlag.Choices() {
_, _ = fmt.Fprintf(&out, "- `%s`\n", v)
}
_, _ = out.WriteRune('\n')
_, _ = out.WriteString(`### Encoding masks
These are the valid options for the --encoding flag.
`)
for _, v := range strings.Split(encoder.ValidStrings(), ", ") {
_, _ = fmt.Fprintf(&out, "- `%s`\n", v)
}
_, _ = out.WriteRune('\n')
sprintExamples(&out)
return out.String()
}
func printList() {
fmt.Println(sprintList())
}

View File

@@ -1,87 +0,0 @@
package convmv
import (
"fmt"
"strings"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/lib/encoder"
)
type example struct {
Opt ConvOpt
Path string
}
var examples = []example{
{Path: `stories/The Quick Brown Fox!.txt`, Opt: ConvOpt{ConvertAlgo: ConvUppercase}},
{Path: `stories/The Quick Brown Fox!.txt`, Opt: ConvOpt{ConvertAlgo: ConvFindReplace, FindReplace: []string{"Fox,Turtle", "Quick,Slow"}}},
{Path: `stories/The Quick Brown Fox!.txt`, Opt: ConvOpt{ConvertAlgo: ConvBase64Encode}},
{Path: `c3Rvcmllcw==/VGhlIFF1aWNrIEJyb3duIEZveCEudHh0`, Opt: ConvOpt{ConvertAlgo: ConvBase64Decode}},
{Path: `stories/The Quick Brown 🦊 Fox Went to the Café!.txt`, Opt: ConvOpt{ConvertAlgo: ConvToNFC}},
{Path: `stories/The Quick Brown 🦊 Fox Went to the Café!.txt`, Opt: ConvOpt{ConvertAlgo: ConvToNFD}},
{Path: `stories/The Quick Brown 🦊 Fox!.txt`, Opt: ConvOpt{ConvertAlgo: ConvASCII}},
{Path: `stories/The Quick Brown Fox!.txt`, Opt: ConvOpt{ConvertAlgo: ConvTrimSuffix, Suffix: ".txt"}},
{Path: `stories/The Quick Brown Fox!.txt`, Opt: ConvOpt{ConvertAlgo: ConvPrefix, Prefix: "OLD_"}},
{Path: `stories/The Quick Brown 🦊 Fox Went to the Café!.txt`, Opt: ConvOpt{ConvertAlgo: ConvCharmap, CmapFlag: 20}},
{Path: `stories/The Quick Brown Fox: A Memoir [draft].txt`, Opt: ConvOpt{ConvertAlgo: ConvEncoder, Enc: encoder.EncodeColon | encoder.EncodeSquareBracket}},
{Path: `stories/The Quick Brown 🦊 Fox Went to the Café!.txt`, Opt: ConvOpt{ConvertAlgo: ConvTruncate, Max: 21}},
}
func (e example) command() string {
s := fmt.Sprintf(`rclone convmv %q -t %s`, e.Path, e.Opt.ConvertAlgo)
switch e.Opt.ConvertAlgo {
case ConvFindReplace:
for _, r := range e.Opt.FindReplace {
s += fmt.Sprintf(` -r %q`, r)
}
case ConvTrimPrefix, ConvPrefix:
s += fmt.Sprintf(` --prefix %q`, e.Opt.Prefix)
case ConvTrimSuffix, ConvSuffix:
s += fmt.Sprintf(` --suffix %q`, e.Opt.Suffix)
case ConvCharmap:
s += fmt.Sprintf(` --charmap %q`, e.Opt.CmapFlag.String())
case ConvEncoder:
s += fmt.Sprintf(` --encoding %q`, e.Opt.Enc.String())
case ConvTruncate:
s += fmt.Sprintf(` --max %d`, e.Opt.Max)
}
return s
}
func (e example) output() string {
_ = e.Opt.validate()
Opt = e.Opt
s, err := ConvertPath(e.Path, e.Opt.ConvertAlgo, false)
if err != nil {
fs.Errorf(s, "error: %v", err)
}
return s
}
// go run ./ convmv --help
func sprintExamples(out *strings.Builder) {
_, _ = fmt.Fprintf(out, `### Examples:
Here are some examples of rclone convmv in action.
`)
for _, e := range examples {
_, _ = fmt.Fprintf(out, "```\n%s\n", e.command())
_, _ = fmt.Fprintf(out, "// Output: %s\n```\n\n", e.output())
}
Opt = ConvOpt{} // reset
}
/* func sprintAllCharmapExamples() string {
s := ""
e := example{Path: `stories/The Quick Brown 🦊 Fox Went to the Café!.txt`, Opt: ConvOpt{ConvertAlgo: ConvCharmap, CmapFlag: 0}}
for i := range Cmaps {
e.Opt.CmapFlag++
_ = e.Opt.validate()
Opt = e.Opt
s += fmt.Sprintf("%d Command: %s \n", i, e.command())
s += fmt.Sprintf("Result: %s \n\n", e.output())
}
return s
} */

View File

@@ -5,22 +5,22 @@ import (
"cmp"
"context"
"fmt"
"path/filepath"
"path"
"slices"
"strings"
"testing"
_ "github.com/rclone/rclone/backend/all" // import all backends
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/filter"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fs/sync"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/encoder"
"golang.org/x/text/unicode/norm"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/lib/transform"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"golang.org/x/text/unicode/norm"
)
// Some times used in the tests
@@ -34,62 +34,114 @@ func TestMain(m *testing.M) {
fstest.TestMain(m)
}
func TestConvmv(t *testing.T) {
func TestTransform(t *testing.T) {
type args struct {
ConvertAlgo fs.Enum[convertChoices]
ConvertBackAlgo fs.Enum[convertChoices]
Lossless bool // whether the ConvertBackAlgo is always losslessly invertible
ExtraOpt ConvOpt
TransformOpt []string
TransformBackOpt []string
Lossless bool // whether the TransformBackAlgo is always losslessly invertible
}
tests := []struct {
name string
args args
}{
{name: "NFC", args: args{ConvertAlgo: ConvToNFC, ConvertBackAlgo: ConvToNFD, Lossless: false}},
{name: "NFD", args: args{ConvertAlgo: ConvToNFD, ConvertBackAlgo: ConvToNFC, Lossless: false}},
{name: "NFKC", args: args{ConvertAlgo: ConvToNFKC, ConvertBackAlgo: ConvToNFKD, Lossless: false}},
{name: "NFKD", args: args{ConvertAlgo: ConvToNFKD, ConvertBackAlgo: ConvToNFKC, Lossless: false}},
{name: "base64", args: args{ConvertAlgo: ConvBase64Encode, ConvertBackAlgo: ConvBase64Decode, Lossless: true}},
{name: "replace", args: args{ConvertAlgo: ConvFindReplace, ConvertBackAlgo: ConvFindReplace, Lossless: true, ExtraOpt: ConvOpt{FindReplace: []string{"bread,banana", "pie,apple", "apple,pie", "banana,bread"}}}},
{name: "prefix", args: args{ConvertAlgo: ConvPrefix, ConvertBackAlgo: ConvTrimPrefix, Lossless: true, ExtraOpt: ConvOpt{Prefix: "PREFIX"}}},
{name: "suffix", args: args{ConvertAlgo: ConvSuffix, ConvertBackAlgo: ConvTrimSuffix, Lossless: true, ExtraOpt: ConvOpt{Suffix: "SUFFIX"}}},
{name: "truncate", args: args{ConvertAlgo: ConvTruncate, ConvertBackAlgo: ConvTruncate, Lossless: false, ExtraOpt: ConvOpt{Max: 10}}},
{name: "encoder", args: args{ConvertAlgo: ConvEncoder, ConvertBackAlgo: ConvDecoder, Lossless: true, ExtraOpt: ConvOpt{Enc: encoder.OS}}},
{name: "ISO-8859-1", args: args{ConvertAlgo: ConvISO8859_1, ConvertBackAlgo: ConvISO8859_1, Lossless: false}},
{name: "charmap", args: args{ConvertAlgo: ConvCharmap, ConvertBackAlgo: ConvCharmap, Lossless: false, ExtraOpt: ConvOpt{CmapFlag: 3}}},
{name: "lowercase", args: args{ConvertAlgo: ConvLowercase, ConvertBackAlgo: ConvUppercase, Lossless: false}},
{name: "ascii", args: args{ConvertAlgo: ConvASCII, ConvertBackAlgo: ConvASCII, Lossless: false}},
{name: "NFC", args: args{
TransformOpt: []string{"all,nfc"},
TransformBackOpt: []string{"all,nfd"},
Lossless: false,
}},
{name: "NFD", args: args{
TransformOpt: []string{"all,nfd"},
TransformBackOpt: []string{"all,nfc"},
Lossless: false,
}},
{name: "base64", args: args{
TransformOpt: []string{"all,base64encode"},
TransformBackOpt: []string{"all,base64encode"},
Lossless: false,
}},
{name: "prefix", args: args{
TransformOpt: []string{"all,prefix=PREFIX"},
TransformBackOpt: []string{"all,trimprefix=PREFIX"},
Lossless: true,
}},
{name: "suffix", args: args{
TransformOpt: []string{"all,suffix=SUFFIX"},
TransformBackOpt: []string{"all,trimsuffix=SUFFIX"},
Lossless: true,
}},
{name: "truncate", args: args{
TransformOpt: []string{"all,truncate=10"},
TransformBackOpt: []string{"all,truncate=10"},
Lossless: false,
}},
{name: "encoder", args: args{
TransformOpt: []string{"all,encoder=Colon,SquareBracket"},
TransformBackOpt: []string{"all,decoder=Colon,SquareBracket"},
Lossless: true,
}},
{name: "ISO-8859-1", args: args{
TransformOpt: []string{"all,ISO-8859-1"},
TransformBackOpt: []string{"all,ISO-8859-1"},
Lossless: false,
}},
{name: "charmap", args: args{
TransformOpt: []string{"all,charmap=ISO-8859-7"},
TransformBackOpt: []string{"all,charmap=ISO-8859-7"},
Lossless: false,
}},
{name: "lowercase", args: args{
TransformOpt: []string{"all,lowercase"},
TransformBackOpt: []string{"all,lowercase"},
Lossless: false,
}},
{name: "ascii", args: args{
TransformOpt: []string{"all,ascii"},
TransformBackOpt: []string{"all,ascii"},
Lossless: false,
}},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
r := fstest.NewRun(t)
defer r.Finalise()
ctx := context.Background()
r.Mkdir(ctx, r.Flocal)
r.Mkdir(ctx, r.Fremote)
items := makeTestFiles(t, r, "dir1")
err := r.Fremote.Mkdir(ctx, "empty/empty")
require.NoError(t, err)
err = r.Flocal.Mkdir(ctx, "empty/empty")
require.NoError(t, err)
deleteDSStore(t, r)
r.CheckRemoteListing(t, items, nil)
r.CheckRemoteListing(t, items, []string{"dir1", "empty", "empty/empty"})
r.CheckLocalListing(t, items, []string{"dir1", "empty", "empty/empty"})
Opt = tt.args.ExtraOpt
Opt.ConvertAlgo = tt.args.ConvertAlgo
err := Convmv(context.Background(), r.Fremote, "")
assert.NoError(t, err)
compareNames(t, r, items)
err = transform.SetOptions(ctx, tt.args.TransformOpt...)
require.NoError(t, err)
convertedItems := convertItems(t, items)
Opt.ConvertAlgo = tt.args.ConvertBackAlgo
err = Convmv(context.Background(), r.Fremote, "")
err = sync.Transform(ctx, r.Fremote, true, true)
assert.NoError(t, err)
compareNames(t, r, convertedItems)
compareNames(ctx, t, r, items)
transformedItems := transformItems(ctx, t, items)
r.CheckRemoteListing(t, transformedItems, []string{transform.Path(ctx, "dir1", true), transform.Path(ctx, "empty", true), transform.Path(ctx, "empty/empty", true)})
err = transform.SetOptions(ctx, tt.args.TransformBackOpt...)
require.NoError(t, err)
err = sync.Transform(ctx, r.Fremote, true, true)
assert.NoError(t, err)
compareNames(ctx, t, r, transformedItems)
if tt.args.Lossless {
deleteDSStore(t, r)
r.CheckRemoteItems(t, items...)
r.CheckRemoteListing(t, items, []string{"dir1", "empty", "empty/empty"})
}
})
}
}
const alphabet = "ƀɀɠʀҠԀڀڠݠހ߀ကႠᄀᄠᅀᆀᇠሠበዠጠᐠᑀᑠᒀᒠᓀᓠᔀᔠᕀᕠᖀᖠᗀᗠᘀᘠᙀᚠᛀកᠠᡀᣀᦀ᧠ᨠᯀᰀᴀ⇠⋀⍀⍠⎀⎠⏀␀─┠╀╠▀■◀◠☀☠♀♠⚀⚠⛀⛠✀✠❀➀➠⠠⡀⡠⢀⢠⣀⣠⤀⤠⥀⥠⦠⨠⩀⪀⪠⫠⬀⬠⭀ⰀⲀⲠⳀⴀⵀ⺠⻀㇀㐀㐠㑀㑠㒀㒠㓀㓠㔀㔠㕀㕠㖀㖠㗀㗠㘀㘠㙀㙠㚀㚠㛀㛠㜀㜠㝀㝠㞀㞠㟀㟠㠀㠠㡀㡠㢀㢠㣀㣠㤀㤠㥀㥠㦀㦠㧀㧠㨀㨠㩀㩠㪀㪠㫀㫠㬀㬠㭀㭠㮀㮠㯀㯠㰀㰠㱀㱠㲀㲠㳀㳠㴀㴠㵀㵠㶀㶠㷀㷠㸀㸠㹀㹠㺀㺠㻀㻠㼀㼠㽀㽠㾀㾠㿀㿠䀀䀠䁀䁠䂀䂠䃀䃠䄀䄠䅀䅠䆀䆠䇀䇠䈀䈠䉀䉠䊀䊠䋀䋠䌀䌠䍀䍠䎀䎠䏀䏠䐀䐠䑀䑠䒀䒠䓀䓠䔀䔠䕀䕠䖀䖠䗀䗠䘀䘠䙀䙠䚀䚠䛀䛠䜀䜠䝀䝠䞀䞠䟀䟠䠀䠠䡀䡠䢀䢠䣀䣠䤀䤠䥀䥠䦀䦠䧀䧠䨀䨠䩀䩠䪀䪠䫀䫠䬀䬠䭀䭠䮀䮠䯀䯠䰀䰠䱀䱠䲀䲠䳀䳠䴀䴠䵀䵠䶀䷀䷠一丠乀习亀亠什仠伀传佀你侀侠俀俠倀倠偀偠傀傠僀僠儀儠兀兠冀冠净几刀删剀剠劀加勀勠匀匠區占厀厠叀叠吀吠呀呠咀咠哀哠唀唠啀啠喀喠嗀嗠嘀嘠噀噠嚀嚠囀因圀圠址坠垀垠埀埠堀堠塀塠墀墠壀壠夀夠奀奠妀妠姀姠娀娠婀婠媀媠嫀嫠嬀嬠孀孠宀宠寀寠尀尠局屠岀岠峀峠崀崠嵀嵠嶀嶠巀巠帀帠幀幠庀庠廀廠开张彀彠往徠忀忠怀怠恀恠悀悠惀惠愀愠慀慠憀憠懀懠戀戠所扠技抠拀拠挀挠捀捠掀掠揀揠搀搠摀摠撀撠擀擠攀攠敀敠斀斠旀无昀映晀晠暀暠曀曠最朠杀杠枀枠柀柠栀栠桀桠梀梠检棠椀椠楀楠榀榠槀槠樀樠橀橠檀檠櫀櫠欀欠歀歠殀殠毀毠氀氠汀池沀沠泀泠洀洠浀浠涀涠淀淠渀渠湀湠満溠滀滠漀漠潀潠澀澠激濠瀀瀠灀灠炀炠烀烠焀焠煀煠熀熠燀燠爀爠牀牠犀犠狀狠猀猠獀獠玀玠珀珠琀琠瑀瑠璀璠瓀瓠甀甠畀畠疀疠痀痠瘀瘠癀癠皀皠盀盠眀眠着睠瞀瞠矀矠砀砠础硠碀碠磀磠礀礠祀祠禀禠秀秠稀稠穀穠窀窠竀章笀笠筀筠简箠節篠簀簠籀籠粀粠糀糠紀素絀絠綀綠緀締縀縠繀繠纀纠绀绠缀缠罀罠羀羠翀翠耀耠聀聠肀肠胀胠脀脠腀腠膀膠臀臠舀舠艀艠芀芠苀苠茀茠荀荠莀莠菀菠萀萠葀葠蒀蒠蓀蓠蔀蔠蕀蕠薀薠藀藠蘀蘠虀虠蚀蚠蛀蛠蜀蜠蝀蝠螀螠蟀蟠蠀蠠血衠袀袠裀裠褀褠襀襠覀覠觀觠言訠詀詠誀誠諀諠謀謠譀譠讀讠诀诠谀谠豀豠貀負賀賠贀贠赀赠趀趠跀跠踀踠蹀蹠躀躠軀軠輀輠轀轠辀辠迀迠退造遀遠邀邠郀郠鄀鄠酀酠醀醠釀釠鈀鈠鉀鉠銀銠鋀鋠錀錠鍀鍠鎀鎠鏀鏠鐀鐠鑀鑠钀钠铀铠销锠镀镠門閠闀闠阀阠陀陠隀隠雀雠需霠靀靠鞀鞠韀韠頀頠顀顠颀颠飀飠餀餠饀饠馀馠駀駠騀騠驀驠骀骠髀髠鬀鬠魀魠鮀鮠鯀鯠鰀鰠鱀鱠鲀鲠鳀鳠鴀鴠鵀鵠鶀鶠鷀鷠鸀鸠鹀鹠麀麠黀黠鼀鼠齀齠龀龠ꀀꀠꁀꁠꂀꂠꃀꃠꄀꄠꅀꅠꆀꆠꇀꇠꈀꈠꉀꉠꊀꊠꋀꋠꌀꌠꍀꍠꎀꎠꏀꏠꐀꐠꑀꑠ꒠ꔀꔠꕀꕠꖀꖠꗀꗠꙀꚠꛀ꜀꜠ꝀꞀꡀ測試_Русский___ě_áñ"
// const alphabet = "ƀɀɠʀҠԀڀڠݠހ߀ကႠᄀᄠᅀᆀᇠሠበዠጠᐠᑀᑠᒀᒠᓀᓠᔀᔠᕀᕠᖀᖠᗀᗠᘀᘠᙀᚠᛀកᠠᡀᣀᦀ᧠ᨠᯀᰀᴀ⇠⋀⍀⍠⎀⎠⏀␀─┠╀╠▀■◀◠☀☠♀♠⚀⚠⛀⛠✀✠❀➀➠⠠⡀⡠⢀⢠⣀⣠⤀⤠⥀⥠⦠⨠⩀⪀⪠⫠⬀⬠⭀ⰀⲀⲠⳀⴀⵀ⺠⻀㇀㐀㐠㑀㑠㒀㒠㓀㓠㔀㔠㕀㕠㖀㖠㗀㗠㘀㘠㙀㙠㚀㚠㛀㛠㜀㜠㝀㝠㞀㞠㟀㟠㠀㠠㡀㡠㢀㢠㣀㣠㤀㤠㥀㥠㦀㦠㧀㧠㨀㨠㩀㩠㪀㪠㫀㫠㬀㬠㭀㭠㮀㮠㯀㯠㰀㰠㱀㱠㲀㲠㳀㳠㴀㴠㵀㵠㶀㶠㷀㷠㸀㸠㹀㹠㺀㺠㻀㻠㼀㼠㽀㽠㾀㾠㿀㿠䀀䀠䁀䁠䂀䂠䃀䃠䄀䄠䅀䅠䆀䆠䇀䇠䈀䈠䉀䉠䊀䊠䋀䋠䌀䌠䍀䍠䎀䎠䏀䏠䐀䐠䑀䑠䒀䒠䓀䓠䔀䔠䕀䕠䖀䖠䗀䗠䘀䘠䙀䙠䚀䚠䛀䛠䜀䜠䝀䝠䞀䞠䟀䟠䠀䠠䡀䡠䢀䢠䣀䣠䤀䤠䥀䥠䦀䦠䧀䧠䨀䨠䩀䩠䪀䪠䫀䫠䬀䬠䭀䭠䮀䮠䯀䯠䰀䰠䱀䱠䲀䲠䳀䳠䴀䴠䵀䵠䶀䷀䷠一丠乀习亀亠什仠伀传佀你侀侠俀俠倀倠偀偠傀傠僀僠儀儠兀兠冀冠净几刀删剀剠劀加勀勠匀匠區占厀厠叀叠吀吠呀呠咀咠哀哠唀唠啀啠喀喠嗀嗠嘀嘠噀噠嚀嚠囀因圀圠址坠垀垠埀埠堀堠塀塠墀墠壀壠夀夠奀奠妀妠姀姠娀娠婀婠媀媠嫀嫠嬀嬠孀孠宀宠寀寠尀尠局屠岀岠峀峠崀崠嵀嵠嶀嶠巀巠帀帠幀幠庀庠廀廠开张彀彠往徠忀忠怀怠恀恠悀悠惀惠愀愠慀慠憀憠懀懠戀戠所扠技抠拀拠挀挠捀捠掀掠揀揠搀搠摀摠撀撠擀擠攀攠敀敠斀斠旀无昀映晀晠暀暠曀曠最朠杀杠枀枠柀柠栀栠桀桠梀梠检棠椀椠楀楠榀榠槀槠樀樠橀橠檀檠櫀櫠欀欠歀歠殀殠毀毠氀氠汀池沀沠泀泠洀洠浀浠涀涠淀淠渀渠湀湠満溠滀滠漀漠潀潠澀澠激濠瀀瀠灀灠炀炠烀烠焀焠煀煠熀熠燀燠爀爠牀牠犀犠狀狠猀猠獀獠玀玠珀珠琀琠瑀瑠璀璠瓀瓠甀甠畀畠疀疠痀痠瘀瘠癀癠皀皠盀盠眀眠着睠瞀瞠矀矠砀砠础硠碀碠磀磠礀礠祀祠禀禠秀秠稀稠穀穠窀窠竀章笀笠筀筠简箠節篠簀簠籀籠粀粠糀糠紀素絀絠綀綠緀締縀縠繀繠纀纠绀绠缀缠罀罠羀羠翀翠耀耠聀聠肀肠胀胠脀脠腀腠膀膠臀臠舀舠艀艠芀芠苀苠茀茠荀荠莀莠菀菠萀萠葀葠蒀蒠蓀蓠蔀蔠蕀蕠薀薠藀藠蘀蘠虀虠蚀蚠蛀蛠蜀蜠蝀蝠螀螠蟀蟠蠀蠠血衠袀袠裀裠褀褠襀襠覀覠觀觠言訠詀詠誀誠諀諠謀謠譀譠讀讠诀诠谀谠豀豠貀負賀賠贀贠赀赠趀趠跀跠踀踠蹀蹠躀躠軀軠輀輠轀轠辀辠迀迠退造遀遠邀邠郀郠鄀鄠酀酠醀醠釀釠鈀鈠鉀鉠銀銠鋀鋠錀錠鍀鍠鎀鎠鏀鏠鐀鐠鑀鑠钀钠铀铠销锠镀镠門閠闀闠阀阠陀陠隀隠雀雠需霠靀靠鞀鞠韀韠頀頠顀顠颀颠飀飠餀餠饀饠馀馠駀駠騀騠驀驠骀骠髀髠鬀鬠魀魠鮀鮠鯀鯠鰀鰠鱀鱠鲀鲠鳀鳠鴀鴠鵀鵠鶀鶠鷀鷠鸀鸠鹀鹠麀麠黀黠鼀鼠齀齠龀龠ꀀꀠꁀꁠꂀꂠꃀꃠꄀꄠꅀꅠꆀꆠꇀꇠꈀꈠꉀꉠꊀꊠꋀꋠꌀꌠꍀꍠꎀꎠꏀꏠꐀꐠꑀꑠ꒠ꔀꔠꕀꕠꖀꖠꗀꗠꙀꚠꛀ꜀꜠ꝀꞀꡀ測試_Русский___ě_áñ"
const alphabet = "abcdefg123456789"
var extras = []string{"apple", "banana", "appleappleapplebanana", "splitbananasplit"}
@@ -100,17 +152,19 @@ func makeTestFiles(t *testing.T, r *fstest.Run, dir string) []fstest.Item {
items := []fstest.Item{}
for _, c := range alphabet {
var out strings.Builder
for i := rune(0); i < 32; i++ {
for i := rune(0); i < 7; i++ {
out.WriteRune(c + i)
}
fileName := filepath.Join(dir, fmt.Sprintf("%04d-%s.txt", n, out.String()))
fileName := path.Join(dir, fmt.Sprintf("%04d-%s.txt", n, out.String()))
fileName = strings.ToValidUTF8(fileName, "")
fileName = strings.NewReplacer(":", "", "<", "", ">", "", "?", "").Replace(fileName) // remove characters illegal on windows
if debug != "" {
fileName = debug
}
item := r.WriteObject(context.Background(), fileName, fileName, t1)
r.WriteFile(fileName, fileName, t1)
items = append(items, item)
n++
@@ -121,6 +175,7 @@ func makeTestFiles(t *testing.T, r *fstest.Run, dir string) []fstest.Item {
for _, extra := range extras {
item := r.WriteObject(context.Background(), extra, extra, t1)
r.WriteFile(extra, extra, t1)
items = append(items, item)
}
@@ -137,7 +192,7 @@ func deleteDSStore(t *testing.T, r *fstest.Run) {
assert.NoError(t, err)
}
func compareNames(t *testing.T, r *fstest.Run, items []fstest.Item) {
func compareNames(ctx context.Context, t *testing.T, r *fstest.Run, items []fstest.Item) {
var entries fs.DirEntries
deleteDSStore(t, r)
@@ -158,10 +213,8 @@ func compareNames(t *testing.T, r *fstest.Run, items []fstest.Item) {
// sort by CONVERTED name
slices.SortStableFunc(items, func(a, b fstest.Item) int {
aConv, err := ConvertPath(a.Path, Opt.ConvertAlgo, false)
require.NoError(t, err, a.Path)
bConv, err := ConvertPath(b.Path, Opt.ConvertAlgo, false)
require.NoError(t, err, b.Path)
aConv := transform.Path(ctx, a.Path, false)
bConv := transform.Path(ctx, b.Path, false)
return cmp.Compare(aConv, bConv)
})
slices.SortStableFunc(entries, func(a, b fs.DirEntry) int {
@@ -169,23 +222,21 @@ func compareNames(t *testing.T, r *fstest.Run, items []fstest.Item) {
})
for i, e := range entries {
expect, err := ConvertPath(items[i].Path, Opt.ConvertAlgo, false)
assert.NoError(t, err)
expect := transform.Path(ctx, items[i].Path, false)
msg := fmt.Sprintf("expected %v, got %v", detectEncoding(expect), detectEncoding(e.Remote()))
assert.Equal(t, expect, e.Remote(), msg)
}
}
func convertItems(t *testing.T, items []fstest.Item) []fstest.Item {
convertedItems := []fstest.Item{}
func transformItems(ctx context.Context, t *testing.T, items []fstest.Item) []fstest.Item {
transformedItems := []fstest.Item{}
for _, item := range items {
newPath, err := ConvertPath(item.Path, Opt.ConvertAlgo, false)
assert.NoError(t, err)
newPath := transform.Path(ctx, item.Path, false)
newItem := item
newItem.Path = newPath
convertedItems = append(convertedItems, newItem)
transformedItems = append(transformedItems, newItem)
}
return convertedItems
return transformedItems
}
func detectEncoding(s string) string {

View File

@@ -43,7 +43,7 @@ Setting |--auto-filename| will attempt to automatically determine the
filename from the URL (after any redirections) and used in the
destination path.
With |--auto-filename-header| in addition, if a specific filename is
With |--header-filename| in addition, if a specific filename is
set in HTTP headers, it will be used instead of the name from the URL.
With |--print-filename| in addition, the resulting file name will be
printed.

131
cmd/gitannex/configparse.go Normal file
View File

@@ -0,0 +1,131 @@
package gitannex
import (
"fmt"
"slices"
"strings"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/fspath"
)
type configID int
const (
configRemoteName configID = iota
configPrefix
configLayout
)
// configDefinition describes a configuration value required by this command. We
// use "GETCONFIG" messages to query git-annex for these values at runtime.
type configDefinition struct {
id configID
names []string
description string
defaultValue string
}
const (
defaultRclonePrefix = "git-annex-rclone"
defaultRcloneLayout = "nodir"
)
var requiredConfigs = []configDefinition{
{
id: configRemoteName,
names: []string{"rcloneremotename", "target"},
description: "Name of the rclone remote to use. " +
"Must match a remote known to rclone. " +
"(Note that rclone remotes are a distinct concept from git-annex remotes.)",
},
{
id: configPrefix,
names: []string{"rcloneprefix", "prefix"},
description: "Directory where rclone will write git-annex content. " +
fmt.Sprintf("If not specified, defaults to %q. ", defaultRclonePrefix) +
"This directory will be created on init if it does not exist.",
defaultValue: defaultRclonePrefix,
},
{
id: configLayout,
names: []string{"rclonelayout", "rclone_layout"},
description: "Defines where, within the rcloneprefix directory, rclone will write git-annex content. " +
fmt.Sprintf("Must be one of %v. ", allLayoutModes()) +
fmt.Sprintf("If empty, defaults to %q.", defaultRcloneLayout),
defaultValue: defaultRcloneLayout,
},
}
func (c *configDefinition) getCanonicalName() string {
if len(c.names) < 1 {
panic(fmt.Errorf("configDefinition must have at least one name: %v", c))
}
return c.names[0]
}
// fullDescription returns a single-line, human-readable description for this
// config. The returned string begins with a list of synonyms and ends with
// `c.description`.
func (c *configDefinition) fullDescription() string {
if len(c.names) <= 1 {
return c.description
}
// Exclude the canonical name from the list of synonyms.
synonyms := c.names[1:len(c.names)]
commaSeparatedSynonyms := strings.Join(synonyms, ", ")
return fmt.Sprintf("(synonyms: %s) %s", commaSeparatedSynonyms, c.description)
}
// validateRemoteName validates the "rcloneremotename" config that we receive
// from git-annex. It returns nil iff `value` is valid. Otherwise, it returns a
// descriptive error suitable for sending back to git-annex via stdout.
//
// The value is only valid when:
// 1. It is the exact name of an existing remote.
// 2. It is an fspath string that names an existing remote or a backend. The
// string may include options, but it must not include a path. (That's what
// the "rcloneprefix" config is for.)
//
// While backends are not remote names, per se, they are permitted for
// compatibility with [fstest]. We could guard this behavior behind
// [testing.Testing] to prevent users from specifying backend strings, but
// there's no obvious harm in permitting it.
func validateRemoteName(value string) error {
remoteNames := config.GetRemoteNames()
// Check whether `value` is an exact match for an existing remote.
//
// If we checked whether [cache.Get] returns [fs.ErrorNotFoundInConfigFile],
// we would incorrectly identify file names as valid remote names. We also
// avoid [config.FileSections] because it will miss remotes that are defined
// by environment variables.
if slices.Contains(remoteNames, value) {
return nil
}
parsed, err := fspath.Parse(value)
if err != nil {
return fmt.Errorf("remote could not be parsed: %s", value)
}
if parsed.Path != "" {
return fmt.Errorf("remote does not exist or incorrectly contains a path: %s", value)
}
// Now that we've established `value` is an fspath string that does not
// include a path component, we only need to check whether it names an
// existing remote or backend.
if slices.Contains(remoteNames, parsed.Name) {
return nil
}
maybeBackend := strings.HasPrefix(value, ":")
if !maybeBackend {
return fmt.Errorf("remote does not exist: %s", value)
}
// Strip the leading colon before searching for the backend. For instance,
// search for "local" instead of ":local". Note that `parsed.Name` already
// omits any config options baked into the string.
trimmedBackendName := strings.TrimPrefix(parsed.Name, ":")
if _, err = fs.Find(trimmedBackendName); err != nil {
return fmt.Errorf("backend does not exist: %s", trimmedBackendName)
}
return nil
}

View File

@@ -28,14 +28,11 @@ import (
"io"
"os"
"path/filepath"
"slices"
"strings"
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/cache"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/fspath"
"github.com/rclone/rclone/fs/operations"
"github.com/spf13/cobra"
)
@@ -110,35 +107,6 @@ func (m *messageParser) finalParameter() string {
return param
}
// configDefinition describes a configuration value required by this command. We
// use "GETCONFIG" messages to query git-annex for these values at runtime.
type configDefinition struct {
names []string
description string
destination *string
defaultValue *string
}
func (c *configDefinition) getCanonicalName() string {
if len(c.names) < 1 {
panic(fmt.Errorf("configDefinition must have at least one name: %v", c))
}
return c.names[0]
}
// fullDescription returns a single-line, human-readable description for this
// config. The returned string begins with a list of synonyms and ends with
// `c.description`.
func (c *configDefinition) fullDescription() string {
if len(c.names) <= 1 {
return c.description
}
// Exclude the canonical name from the list of synonyms.
synonyms := c.names[1:len(c.names)]
commaSeparatedSynonyms := strings.Join(synonyms, ", ")
return fmt.Sprintf("(synonyms: %s) %s", commaSeparatedSynonyms, c.description)
}
// server contains this command's current state.
type server struct {
reader *bufio.Reader
@@ -274,81 +242,31 @@ func (s *server) handleInitRemote() error {
return fmt.Errorf("failed to get configs: %w", err)
}
// Explicitly check whether [server.configRcloneRemoteName] names a remote.
//
// - We do not permit file paths in the remote name; that's what
// [s.configPrefix] is for. If we simply checked whether [cache.Get]
// returns [fs.ErrorNotFoundInConfigFile], we would incorrectly identify
// file names as valid remote names.
//
// - In order to support remotes defined by environment variables, we must
// use [config.GetRemoteNames] instead of [config.FileSections].
trimmedName := strings.TrimSuffix(s.configRcloneRemoteName, ":")
if slices.Contains(config.GetRemoteNames(), trimmedName) {
s.sendMsg("INITREMOTE-SUCCESS")
return nil
if err := validateRemoteName(s.configRcloneRemoteName); err != nil {
s.sendMsg(fmt.Sprintf("INITREMOTE-FAILURE %s", err))
return fmt.Errorf("failed to init remote: %w", err)
}
// Otherwise, check whether [server.configRcloneRemoteName] is actually a
// backend string such as ":local:". These are not remote names, per se, but
// they are permitted for compatibility with [fstest]. We could guard this
// behavior behind [testing.Testing] to prevent users from specifying
// backend strings, but there's no obvious harm in permitting it.
maybeBackend := strings.HasPrefix(s.configRcloneRemoteName, ":")
if !maybeBackend {
s.sendMsg("INITREMOTE-FAILURE remote does not exist: " + s.configRcloneRemoteName)
return fmt.Errorf("remote does not exist: %s", s.configRcloneRemoteName)
}
parsed, err := fspath.Parse(s.configRcloneRemoteName)
if err != nil {
s.sendMsg("INITREMOTE-FAILURE remote could not be parsed as a backend: " + s.configRcloneRemoteName)
return fmt.Errorf("remote could not be parsed as a backend: %s", s.configRcloneRemoteName)
}
if parsed.Path != "" {
s.sendMsg("INITREMOTE-FAILURE backend must not have a path: " + s.configRcloneRemoteName)
return fmt.Errorf("backend must not have a path: %s", s.configRcloneRemoteName)
}
// Strip the leading colon and options before searching for the backend,
// i.e. search for "local" instead of ":local,description=hello:/tmp/foo".
trimmedBackendName := strings.TrimPrefix(parsed.Name, ":")
if _, err = fs.Find(trimmedBackendName); err != nil {
s.sendMsg("INITREMOTE-FAILURE backend does not exist: " + trimmedBackendName)
return fmt.Errorf("backend does not exist: %s", trimmedBackendName)
if mode := parseLayoutMode(s.configRcloneLayout); mode == layoutModeUnknown {
err := fmt.Errorf("unknown layout mode: %s", s.configRcloneLayout)
s.sendMsg(fmt.Sprintf("INITREMOTE-FAILURE %s", err))
return fmt.Errorf("failed to init remote: %w", err)
}
s.sendMsg("INITREMOTE-SUCCESS")
return nil
}
// Get a list of configs with pointers to fields of `s`.
func (s *server) getRequiredConfigs() []configDefinition {
defaultRclonePrefix := "git-annex-rclone"
defaultRcloneLayout := "nodir"
return []configDefinition{
{
[]string{"rcloneremotename", "target"},
"Name of the rclone remote to use. " +
"Must match a remote known to rclone. " +
"(Note that rclone remotes are a distinct concept from git-annex remotes.)",
&s.configRcloneRemoteName,
nil,
},
{
[]string{"rcloneprefix", "prefix"},
"Directory where rclone will write git-annex content. " +
fmt.Sprintf("If not specified, defaults to %q. ", defaultRclonePrefix) +
"This directory will be created on init if it does not exist.",
&s.configPrefix,
&defaultRclonePrefix,
},
{
[]string{"rclonelayout", "rclone_layout"},
"Defines where, within the rcloneprefix directory, rclone will write git-annex content. " +
fmt.Sprintf("Must be one of %v. ", allLayoutModes()) +
fmt.Sprintf("If empty, defaults to %q.", defaultRcloneLayout),
&s.configRcloneLayout,
&defaultRcloneLayout,
},
func (s *server) mustSetConfigValue(id configID, value string) {
switch id {
case configRemoteName:
s.configRcloneRemoteName = value
case configPrefix:
s.configPrefix = value
case configLayout:
s.configRcloneLayout = value
default:
panic(fmt.Errorf("unhandled configId: %v", id))
}
}
@@ -360,8 +278,8 @@ func (s *server) queryConfigs() error {
// Send a "GETCONFIG" message for each required config and parse git-annex's
// "VALUE" response.
for _, config := range s.getRequiredConfigs() {
var valueReceived bool
queryNextConfig:
for _, config := range requiredConfigs {
// Try each of the config's names in sequence, starting with the
// canonical name.
for _, configName := range config.names {
@@ -377,19 +295,15 @@ func (s *server) queryConfigs() error {
return fmt.Errorf("failed to parse config value: %s %s", valueKeyword, message.line)
}
value := message.finalParameter()
if value != "" {
*config.destination = value
valueReceived = true
break
if value := message.finalParameter(); value != "" {
s.mustSetConfigValue(config.id, value)
continue queryNextConfig
}
}
if !valueReceived {
if config.defaultValue == nil {
return fmt.Errorf("did not receive a non-empty config value for %q", config.getCanonicalName())
}
*config.destination = *config.defaultValue
if config.defaultValue == "" {
return fmt.Errorf("did not receive a non-empty config value for %q", config.getCanonicalName())
}
s.mustSetConfigValue(config.id, config.defaultValue)
}
s.configsDone = true
@@ -408,7 +322,7 @@ func (s *server) handlePrepare() error {
// Git-annex is asking us to return the list of settings that we use. Keep this
// in sync with `handlePrepare()`.
func (s *server) handleListConfigs() {
for _, config := range s.getRequiredConfigs() {
for _, config := range requiredConfigs {
s.sendMsg(fmt.Sprintf("CONFIG %s %s", config.getCanonicalName(), config.fullDescription()))
}
s.sendMsg("CONFIGEND")

View File

@@ -10,7 +10,6 @@ import (
"regexp"
"runtime"
"strings"
"sync"
"testing"
"time"
@@ -191,14 +190,10 @@ func TestMessageParser(t *testing.T) {
}
func TestConfigDefinitionOneName(t *testing.T) {
var parsed string
var defaultValue = "abc"
configFoo := configDefinition{
names: []string{"foo"},
description: "The foo config is utterly useless.",
destination: &parsed,
defaultValue: &defaultValue,
defaultValue: "abc",
}
assert.Equal(t, "foo",
@@ -210,14 +205,10 @@ func TestConfigDefinitionOneName(t *testing.T) {
}
func TestConfigDefinitionTwoNames(t *testing.T) {
var parsed string
var defaultValue = "abc"
configFoo := configDefinition{
names: []string{"foo", "bar"},
description: "The foo config is utterly useless.",
destination: &parsed,
defaultValue: &defaultValue,
defaultValue: "abc",
}
assert.Equal(t, "foo",
@@ -229,14 +220,10 @@ func TestConfigDefinitionTwoNames(t *testing.T) {
}
func TestConfigDefinitionThreeNames(t *testing.T) {
var parsed string
var defaultValue = "abc"
configFoo := configDefinition{
names: []string{"foo", "bar", "baz"},
description: "The foo config is utterly useless.",
destination: &parsed,
defaultValue: &defaultValue,
defaultValue: "abc",
}
assert.Equal(t, "foo",
@@ -252,6 +239,9 @@ type testState struct {
server *server
mockStdinW *io.PipeWriter
mockStdoutReader *bufio.Reader
// readLineTimeout is the maximum duration of time to wait for [server] to
// write a line to be written to the mock stdout.
readLineTimeout time.Duration
fstestRun *fstest.Run
remoteName string
@@ -270,6 +260,11 @@ func makeTestState(t *testing.T) testState {
},
mockStdinW: stdinW,
mockStdoutReader: bufio.NewReader(stdoutR),
// The default readLineTimeout must be large enough to accommodate slow
// operations on real remotes. Without a timeout, attempts to read a
// line that's never written would block indefinitely.
readLineTimeout: time.Second * 30,
}
}
@@ -277,18 +272,52 @@ func (h *testState) requireRemoteIsEmpty() {
h.fstestRun.CheckRemoteItems(h.t)
}
func (h *testState) requireReadLineExact(line string) {
receivedLine, err := h.mockStdoutReader.ReadString('\n')
require.NoError(h.t, err)
require.Equal(h.t, line+"\n", receivedLine)
// readLineWithTimeout attempts to read a line from the mock stdout. Returns an
// error if the read operation times out or fails for any reason.
func (h *testState) readLineWithTimeout() (string, error) {
ctx, cancel := context.WithTimeout(context.Background(), h.readLineTimeout)
defer cancel()
lineChan := make(chan string)
errChan := make(chan error)
go func() {
line, err := h.mockStdoutReader.ReadString('\n')
if err != nil {
errChan <- err
} else {
lineChan <- line
}
}()
select {
case line := <-lineChan:
return line, nil
case err := <-errChan:
return "", err
case <-ctx.Done():
return "", fmt.Errorf("attempt to read line timed out: %w", ctx.Err())
}
}
// requireReadLineExact requires that a line matching wantLine can be read from
// the mock stdout.
func (h *testState) requireReadLineExact(wantLine string) {
receivedLine, err := h.readLineWithTimeout()
require.NoError(h.t, err)
require.Equal(h.t, wantLine+"\n", receivedLine)
}
// requireReadLine requires that a line can be read from the mock stdout and
// returns the line.
func (h *testState) requireReadLine() string {
receivedLine, err := h.mockStdoutReader.ReadString('\n')
receivedLine, err := h.readLineWithTimeout()
require.NoError(h.t, err)
return receivedLine
}
// requireWriteLine requires that the given line is successfully written to the
// mock stdin.
func (h *testState) requireWriteLine(line string) {
_, err := h.mockStdinW.Write([]byte(line + "\n"))
require.NoError(h.t, err)
@@ -462,7 +491,7 @@ var fstestTestCases = []testCase{
h.requireReadLineExact("GETCONFIG rcloneprefix")
h.requireWriteLine("VALUE " + h.remotePrefix)
h.requireReadLineExact("GETCONFIG rclonelayout")
h.requireWriteLine("VALUE foo")
h.requireWriteLine("VALUE frankencase")
h.requireReadLineExact("PREPARE-SUCCESS")
require.Equal(t, h.server.configRcloneRemoteName, h.remoteName)
@@ -472,6 +501,35 @@ var fstestTestCases = []testCase{
require.NoError(t, h.mockStdinW.Close())
},
},
{
label: "HandlesPrepareWithUnknownLayout",
testProtocolFunc: func(t *testing.T, h *testState) {
h.requireReadLineExact("VERSION 1")
h.requireWriteLine("EXTENSIONS INFO") // Advertise that we support the INFO extension
h.requireReadLineExact("EXTENSIONS")
require.True(t, h.server.extensionInfo)
h.requireWriteLine("PREPARE")
h.requireReadLineExact("GETCONFIG rcloneremotename")
h.requireWriteLine("VALUE " + h.remoteName)
h.requireReadLineExact("GETCONFIG rcloneprefix")
h.requireWriteLine("VALUE " + h.remotePrefix)
h.requireReadLineExact("GETCONFIG rclonelayout")
h.requireWriteLine("VALUE nonexistentLayoutMode")
h.requireReadLineExact("PREPARE-SUCCESS")
require.Equal(t, h.server.configRcloneRemoteName, h.remoteName)
require.Equal(t, h.server.configPrefix, h.remotePrefix)
require.True(t, h.server.configsDone)
h.requireWriteLine("INITREMOTE")
h.requireReadLineExact("INITREMOTE-FAILURE unknown layout mode: nonexistentLayoutMode")
require.NoError(t, h.mockStdinW.Close())
},
expectedError: "unknown layout mode: nonexistentLayoutMode",
},
{
label: "HandlesPrepareWithNonexistentRemote",
testProtocolFunc: func(t *testing.T, h *testState) {
@@ -487,7 +545,7 @@ var fstestTestCases = []testCase{
h.requireReadLineExact("GETCONFIG rcloneprefix")
h.requireWriteLine("VALUE " + h.remotePrefix)
h.requireReadLineExact("GETCONFIG rclonelayout")
h.requireWriteLine("VALUE foo")
h.requireWriteLine("VALUE frankencase")
h.requireReadLineExact("PREPARE-SUCCESS")
require.Equal(t, h.server.configRcloneRemoteName, "thisRemoteDoesNotExist")
@@ -495,11 +553,11 @@ var fstestTestCases = []testCase{
require.True(t, h.server.configsDone)
h.requireWriteLine("INITREMOTE")
h.requireReadLineExact("INITREMOTE-FAILURE remote does not exist: thisRemoteDoesNotExist")
h.requireReadLineExact("INITREMOTE-FAILURE remote does not exist or incorrectly contains a path: thisRemoteDoesNotExist")
require.NoError(t, h.mockStdinW.Close())
},
expectedError: "remote does not exist: thisRemoteDoesNotExist",
expectedError: "remote does not exist or incorrectly contains a path: thisRemoteDoesNotExist",
},
{
label: "HandlesPrepareWithPathAsRemote",
@@ -516,7 +574,7 @@ var fstestTestCases = []testCase{
h.requireReadLineExact("GETCONFIG rcloneprefix")
h.requireWriteLine("VALUE /foo")
h.requireReadLineExact("GETCONFIG rclonelayout")
h.requireWriteLine("VALUE foo")
h.requireWriteLine("VALUE frankencase")
h.requireReadLineExact("PREPARE-SUCCESS")
require.Equal(t, h.server.configRcloneRemoteName, h.remotePrefix)
@@ -526,13 +584,13 @@ var fstestTestCases = []testCase{
h.requireWriteLine("INITREMOTE")
require.Regexp(t,
regexp.MustCompile("^INITREMOTE-FAILURE remote does not exist: "),
regexp.MustCompile("^INITREMOTE-FAILURE remote does not exist or incorrectly contains a path: "),
h.requireReadLine(),
)
require.NoError(t, h.mockStdinW.Close())
},
expectedError: "remote does not exist:",
expectedError: "remote does not exist or incorrectly contains a path:",
},
{
label: "HandlesPrepareWithNonexistentBackendAsRemote",
@@ -544,7 +602,7 @@ var fstestTestCases = []testCase{
h.requireReadLineExact("GETCONFIG rcloneprefix")
h.requireWriteLine("VALUE /foo")
h.requireReadLineExact("GETCONFIG rclonelayout")
h.requireWriteLine("VALUE foo")
h.requireWriteLine("VALUE frankencase")
h.requireReadLineExact("PREPARE-SUCCESS")
require.Equal(t, ":nonexistentBackend:", h.server.configRcloneRemoteName)
@@ -568,7 +626,7 @@ var fstestTestCases = []testCase{
h.requireReadLineExact("GETCONFIG rcloneprefix")
h.requireWriteLine("VALUE /foo")
h.requireReadLineExact("GETCONFIG rclonelayout")
h.requireWriteLine("VALUE foo")
h.requireWriteLine("VALUE frankencase")
h.requireReadLineExact("PREPARE-SUCCESS")
require.Equal(t, ":local:", h.server.configRcloneRemoteName)
@@ -591,7 +649,7 @@ var fstestTestCases = []testCase{
h.requireReadLineExact("GETCONFIG rcloneprefix")
h.requireWriteLine("VALUE /foo")
h.requireReadLineExact("GETCONFIG rclonelayout")
h.requireWriteLine("VALUE foo")
h.requireWriteLine("VALUE frankencase")
h.requireReadLineExact("PREPARE-SUCCESS")
require.Equal(t, ":local", h.server.configRcloneRemoteName)
@@ -599,11 +657,11 @@ var fstestTestCases = []testCase{
require.True(t, h.server.configsDone)
h.requireWriteLine("INITREMOTE")
h.requireReadLineExact("INITREMOTE-FAILURE remote could not be parsed as a backend: :local")
h.requireReadLineExact("INITREMOTE-FAILURE remote could not be parsed: :local")
require.NoError(t, h.mockStdinW.Close())
},
expectedError: "remote could not be parsed as a backend:",
expectedError: "remote could not be parsed:",
},
{
label: "HandlesPrepareWithBackendContainingOptionsAsRemote",
@@ -615,7 +673,7 @@ var fstestTestCases = []testCase{
h.requireReadLineExact("GETCONFIG rcloneprefix")
h.requireWriteLine("VALUE /foo")
h.requireReadLineExact("GETCONFIG rclonelayout")
h.requireWriteLine("VALUE foo")
h.requireWriteLine("VALUE frankencase")
h.requireReadLineExact("PREPARE-SUCCESS")
require.Equal(t, ":local,description=banana:", h.server.configRcloneRemoteName)
@@ -638,7 +696,7 @@ var fstestTestCases = []testCase{
h.requireReadLineExact("GETCONFIG rcloneprefix")
h.requireWriteLine("VALUE /foo")
h.requireReadLineExact("GETCONFIG rclonelayout")
h.requireWriteLine("VALUE foo")
h.requireWriteLine("VALUE frankencase")
h.requireReadLineExact("PREPARE-SUCCESS")
require.Equal(t, ":local,description=banana:/bad/path", h.server.configRcloneRemoteName)
@@ -646,14 +704,38 @@ var fstestTestCases = []testCase{
require.True(t, h.server.configsDone)
h.requireWriteLine("INITREMOTE")
require.Regexp(t,
regexp.MustCompile("^INITREMOTE-FAILURE backend must not have a path: "),
h.requireReadLine(),
)
h.requireReadLineExact("INITREMOTE-FAILURE remote does not exist or incorrectly contains a path: :local,description=banana:/bad/path")
require.NoError(t, h.mockStdinW.Close())
},
expectedError: "remote does not exist or incorrectly contains a path:",
},
{
label: "HandlesPrepareWithRemoteContainingOptions",
testProtocolFunc: func(t *testing.T, h *testState) {
const envVar = "RCLONE_CONFIG_fake_remote_TYPE"
require.NoError(t, os.Setenv(envVar, "memory"))
t.Cleanup(func() { require.NoError(t, os.Unsetenv(envVar)) })
h.requireReadLineExact("VERSION 1")
h.requireWriteLine("PREPARE")
h.requireReadLineExact("GETCONFIG rcloneremotename")
h.requireWriteLine("VALUE fake_remote,banana=yes:")
h.requireReadLineExact("GETCONFIG rcloneprefix")
h.requireWriteLine("VALUE /foo")
h.requireReadLineExact("GETCONFIG rclonelayout")
h.requireWriteLine("VALUE frankencase")
h.requireReadLineExact("PREPARE-SUCCESS")
require.Equal(t, "fake_remote,banana=yes:", h.server.configRcloneRemoteName)
require.Equal(t, "/foo", h.server.configPrefix)
require.True(t, h.server.configsDone)
h.requireWriteLine("INITREMOTE")
h.requireReadLineExact("INITREMOTE-SUCCESS")
require.NoError(t, h.mockStdinW.Close())
},
expectedError: "backend must not have a path:",
},
{
label: "HandlesPrepareWithSynonyms",
@@ -674,7 +756,7 @@ var fstestTestCases = []testCase{
h.requireReadLineExact("GETCONFIG rcloneprefix")
h.requireWriteLine("VALUE " + h.remotePrefix)
h.requireReadLineExact("GETCONFIG rclonelayout")
h.requireWriteLine("VALUE foo")
h.requireWriteLine("VALUE frankencase")
h.requireReadLineExact("PREPARE-SUCCESS")
require.Equal(t, h.server.configRcloneRemoteName, h.remoteName)
@@ -1281,6 +1363,46 @@ var fstestTestCases = []testCase{
},
}
// TestReadLineHasShortDeadline verifies that [testState.readLineWithTimeout]
// does not block indefinitely when a line is never written.
func TestReadLineHasShortDeadline(t *testing.T) {
const timeoutForRead = time.Millisecond * 50
const timeoutForTest = time.Millisecond * 100
const tickDuration = time.Millisecond * 10
type readLineResult struct {
line string
err error
}
resultChan := make(chan readLineResult)
go func() {
defer close(resultChan)
h := makeTestState(t)
h.readLineTimeout = timeoutForRead
line, err := h.readLineWithTimeout()
resultChan <- readLineResult{line, err}
}()
// This closure will be run periodically until time runs out or until all of
// its assertions pass.
idempotentConditionFunc := func(c *assert.CollectT) {
result, ok := <-resultChan
require.True(c, ok, "The goroutine should send a result")
require.Empty(c, result.line, "No line should be read")
require.ErrorIs(c, result.err, context.DeadlineExceeded)
_, ok = <-resultChan
require.False(c, ok, "The channel should be closed")
}
require.EventuallyWithT(t, idempotentConditionFunc, timeoutForTest, tickDuration)
}
// TestMain drives the tests
func TestMain(m *testing.M) {
fstest.TestMain(m)
@@ -1311,23 +1433,27 @@ func TestGitAnnexFstestBackendCases(t *testing.T) {
handle.remoteName = remoteName
handle.remotePrefix = remotePath
var wg sync.WaitGroup
wg.Add(1)
serverErrorChan := make(chan error)
go func() {
err := handle.server.run()
if testCase.expectedError == "" {
require.NoError(t, err)
} else {
require.ErrorContains(t, err, testCase.expectedError)
}
wg.Done()
// Run the gitannex server and send the result back to the
// goroutine associated with `t`. We can't use `require` here
// because it could call `t.FailNow()`, which says it must be
// called on the goroutine associated with the test.
serverErrorChan <- handle.server.run()
}()
defer wg.Wait()
testCase.testProtocolFunc(t, &handle)
serverError, ok := <-serverErrorChan
require.True(t, ok, "Should receive one error/nil from server")
require.Empty(t, serverErrorChan)
if testCase.expectedError == "" {
require.NoError(t, serverError)
} else {
require.ErrorContains(t, serverError, testCase.expectedError)
}
})
}
}

View File

@@ -191,7 +191,6 @@ func setupRootCommand(rootCmd *cobra.Command) {
})
cobra.OnInitialize(initConfig)
}
// Traverse the tree of commands running fn on each

View File

@@ -6,6 +6,8 @@ package ncdu
import (
"context"
"fmt"
"log/slog"
"os"
"path"
"reflect"
"sort"
@@ -925,23 +927,19 @@ func (u *UI) Run() error {
return fmt.Errorf("screen init: %w", err)
}
// Hijack fs.LogOutput so that it doesn't corrupt the screen.
if logOutput := fs.LogOutput; !log.Redirected() {
type log struct {
text string
level fs.LogLevel
}
var logs []log
fs.LogOutput = func(level fs.LogLevel, text string) {
// Hijack log output so that it doesn't corrupt the screen.
if !log.Redirected() {
var logs []string
log.Handler.SetOutput(func(level slog.Level, text string) {
if len(logs) > 100 {
logs = logs[len(logs)-100:]
}
logs = append(logs, log{level: level, text: text})
}
logs = append(logs, text)
})
defer func() {
fs.LogOutput = logOutput
for i := range logs {
logOutput(logs[i].level, logs[i].text)
log.Handler.ResetOutput()
for _, text := range logs {
_, _ = os.Stderr.WriteString(text)
}
}()
}

View File

@@ -11,6 +11,8 @@ import (
"testing"
"github.com/rclone/rclone/cmd/serve/nfs"
"github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/vfs"
"github.com/rclone/rclone/vfs/vfscommon"
"github.com/rclone/rclone/vfs/vfstest"
"github.com/stretchr/testify/require"
@@ -38,7 +40,7 @@ func TestMount(t *testing.T) {
nfs.Opt.HandleCacheDir = t.TempDir()
require.NoError(t, nfs.Opt.HandleCache.Set(cacheType))
// Check we can create a handler
_, err := nfs.NewHandler(context.Background(), nil, &nfs.Opt)
_, err := nfs.NewHandler(context.Background(), vfs.New(object.MemoryFs, nil), &nfs.Opt)
if errors.Is(err, nfs.ErrorSymlinkCacheNotSupported) || errors.Is(err, nfs.ErrorSymlinkCacheNoPermission) {
t.Skip(err.Error() + ": run with: go test -c && sudo setcap cap_dac_read_search+ep ./nfsmount.test && ./nfsmount.test -test.v")
}

View File

@@ -5,11 +5,11 @@ package cmd
import (
"bytes"
"fmt"
"log/slog"
"strings"
"sync"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/log"
"github.com/rclone/rclone/fs/operations"
@@ -19,8 +19,6 @@ import (
const (
// interval between progress prints
defaultProgressInterval = 500 * time.Millisecond
// time format for logging
logTimeFormat = "2006/01/02 15:04:05"
)
// startProgress starts the progress bar printing
@@ -28,15 +26,13 @@ const (
// It returns a func which should be called to stop the stats.
func startProgress() func() {
stopStats := make(chan struct{})
oldLogOutput := fs.LogOutput
oldSyncPrint := operations.SyncPrintf
if !log.Redirected() {
// Intercept the log calls if not logging to file or syslog
fs.LogOutput = func(level fs.LogLevel, text string) {
printProgress(fmt.Sprintf("%s %-6s: %s", time.Now().Format(logTimeFormat), level, text))
}
log.Handler.SetOutput(func(level slog.Level, text string) {
printProgress(text)
})
}
// Intercept output from functions such as HashLister to stdout
@@ -60,7 +56,10 @@ func startProgress() func() {
case <-stopStats:
ticker.Stop()
printProgress("")
fs.LogOutput = oldLogOutput
if !log.Redirected() {
// Reset intercept of the log calls
log.Handler.ResetOutput()
}
operations.SyncPrintf = oldSyncPrint
fmt.Println("")
return

View File

@@ -3,6 +3,7 @@ package dlna
import (
"bytes"
"context"
"encoding/xml"
"fmt"
"net"
@@ -19,9 +20,12 @@ import (
"github.com/anacrolix/dms/upnp"
"github.com/anacrolix/log"
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/cmd/serve"
"github.com/rclone/rclone/cmd/serve/dlna/data"
"github.com/rclone/rclone/cmd/serve/dlna/dlnaflags"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/flags"
"github.com/rclone/rclone/fs/rc"
"github.com/rclone/rclone/lib/systemd"
"github.com/rclone/rclone/vfs"
"github.com/rclone/rclone/vfs/vfscommon"
@@ -29,9 +33,63 @@ import (
"github.com/spf13/cobra"
)
// OptionsInfo descripts the Options in use
var OptionsInfo = fs.Options{{
Name: "addr",
Default: ":7879",
Help: "The ip:port or :port to bind the DLNA http server to",
}, {
Name: "name",
Default: "",
Help: "Name of DLNA server",
}, {
Name: "log_trace",
Default: false,
Help: "Enable trace logging of SOAP traffic",
}, {
Name: "interface",
Default: []string{},
Help: "The interface to use for SSDP (repeat as necessary)",
}, {
Name: "announce_interval",
Default: fs.Duration(12 * time.Minute),
Help: "The interval between SSDP announcements",
}}
// Options is the type for DLNA serving options.
type Options struct {
ListenAddr string `config:"addr"`
FriendlyName string `config:"name"`
LogTrace bool `config:"log_trace"`
InterfaceNames []string `config:"interface"`
AnnounceInterval fs.Duration `config:"announce_interval"`
}
// Opt contains the options for DLNA serving.
var Opt Options
func init() {
dlnaflags.AddFlags(Command.Flags())
vfsflags.AddFlags(Command.Flags())
fs.RegisterGlobalOptions(fs.OptionsInfo{Name: "dlna", Opt: &Opt, Options: OptionsInfo})
flagSet := Command.Flags()
flags.AddFlagsFromOptions(flagSet, "", OptionsInfo)
vfsflags.AddFlags(flagSet)
serve.Command.AddCommand(Command)
serve.AddRc("dlna", func(ctx context.Context, f fs.Fs, in rc.Params) (serve.Handle, error) {
// Read VFS Opts
var vfsOpt = vfscommon.Opt // set default opts
err := configstruct.SetAny(in, &vfsOpt)
if err != nil {
return nil, err
}
// Read opts
var opt = Opt // set default opts
err = configstruct.SetAny(in, &opt)
if err != nil {
return nil, err
}
// Create server
return newServer(ctx, f, &opt, &vfsOpt)
})
}
// Command definition for cobra.
@@ -53,7 +111,19 @@ Rclone will add external subtitle files (.srt) to videos if they have the same
filename as the video file itself (except the extension), either in the same
directory as the video, or in a "Subs" subdirectory.
` + dlnaflags.Help + vfs.Help(),
### Server options
Use ` + "`--addr`" + ` to specify which IP address and port the server should
listen on, e.g. ` + "`--addr 1.2.3.4:8000` or `--addr :8080`" + ` to listen to all
IPs.
Use ` + "`--name`" + ` to choose the friendly server name, which is by
default "rclone (hostname)".
Use ` + "`--log-trace` in conjunction with `-vv`" + ` to enable additional debug
logging of all UPNP traffic.
` + vfs.Help(),
Annotations: map[string]string{
"versionIntroduced": "v1.46",
"groups": "Filter",
@@ -63,16 +133,12 @@ directory as the video, or in a "Subs" subdirectory.
f := cmd.NewFsSrc(args)
cmd.Run(false, false, command, func() error {
s, err := newServer(f, &dlnaflags.Opt)
s, err := newServer(context.Background(), f, &Opt, &vfscommon.Opt)
if err != nil {
return err
}
if err := s.Serve(); err != nil {
return err
}
defer systemd.Notify()()
s.Wait()
return nil
return s.Serve()
})
},
}
@@ -108,7 +174,7 @@ type server struct {
vfs *vfs.VFS
}
func newServer(f fs.Fs, opt *dlnaflags.Options) (*server, error) {
func newServer(ctx context.Context, f fs.Fs, opt *Options, vfsOpt *vfscommon.Options) (*server, error) {
friendlyName := opt.FriendlyName
if friendlyName == "" {
friendlyName = makeDefaultFriendlyName()
@@ -137,7 +203,7 @@ func newServer(f fs.Fs, opt *dlnaflags.Options) (*server, error) {
waitChan: make(chan struct{}),
httpListenAddr: opt.ListenAddr,
f: f,
vfs: vfs.New(f, &vfscommon.Opt),
vfs: vfs.New(f, vfsOpt),
}
s.services = map[string]UPnPService{
@@ -168,6 +234,19 @@ func newServer(f fs.Fs, opt *dlnaflags.Options) (*server, error) {
http.FileServer(data.Assets))))
s.handler = logging(withHeader("Server", serverField, r))
// Currently, the SSDP server only listens on an IPv4 multicast address.
// Differentiate between two INADDR_ANY addresses,
// so that 0.0.0.0 can only listen on IPv4 addresses.
network := "tcp4"
if strings.Count(s.httpListenAddr, ":") > 1 {
network = "tcp"
}
listener, err := net.Listen(network, s.httpListenAddr)
if err != nil {
return nil, err
}
s.HTTPConn = listener
return s, nil
}
@@ -288,24 +367,9 @@ func (s *server) resourceHandler(w http.ResponseWriter, r *http.Request) {
http.ServeContent(w, r, remotePath, node.ModTime(), in)
}
// Serve runs the server - returns the error only if
// the listener was not started; does not block, so
// use s.Wait() to block on the listener indefinitely.
// Serve runs the server - returns the error only if the listener was
// not started. Blocks until the server is closed.
func (s *server) Serve() (err error) {
if s.HTTPConn == nil {
// Currently, the SSDP server only listens on an IPv4 multicast address.
// Differentiate between two INADDR_ANY addresses,
// so that 0.0.0.0 can only listen on IPv4 addresses.
network := "tcp4"
if strings.Count(s.httpListenAddr, ":") > 1 {
network = "tcp"
}
s.HTTPConn, err = net.Listen(network, s.httpListenAddr)
if err != nil {
return
}
}
go func() {
s.startSSDP()
}()
@@ -319,6 +383,7 @@ func (s *server) Serve() (err error) {
}
}()
s.Wait()
return nil
}
@@ -327,13 +392,19 @@ func (s *server) Wait() {
<-s.waitChan
}
func (s *server) Close() {
// Shutdown the DLNA server
func (s *server) Shutdown() error {
err := s.HTTPConn.Close()
if err != nil {
fs.Errorf(s.f, "Error closing HTTP server: %v", err)
return
}
close(s.waitChan)
if err != nil {
return fmt.Errorf("failed to shutdown DLNA server: %w", err)
}
return nil
}
// Return the first address of the server
func (s *server) Addr() net.Addr {
return s.HTTPConn.Addr()
}
// Run SSDP (multicast for server discovery) on all interfaces.

View File

@@ -13,11 +13,13 @@ import (
"github.com/anacrolix/dms/soap"
"github.com/rclone/rclone/cmd/serve/servetest"
"github.com/rclone/rclone/fs/config/configfile"
"github.com/rclone/rclone/fs/rc"
"github.com/rclone/rclone/vfs"
"github.com/rclone/rclone/vfs/vfscommon"
_ "github.com/rclone/rclone/backend/local"
"github.com/rclone/rclone/cmd/serve/dlna/dlnaflags"
"github.com/rclone/rclone/fs"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@@ -33,12 +35,14 @@ const (
)
func startServer(t *testing.T, f fs.Fs) {
opt := dlnaflags.Opt
opt := Opt
opt.ListenAddr = testBindAddress
var err error
dlnaServer, err = newServer(f, &opt)
dlnaServer, err = newServer(context.Background(), f, &opt, &vfscommon.Opt)
assert.NoError(t, err)
assert.NoError(t, dlnaServer.Serve())
go func() {
assert.NoError(t, dlnaServer.Serve())
}()
baseURL = "http://" + dlnaServer.HTTPConn.Addr().String()
}
@@ -271,3 +275,10 @@ func TestContentDirectoryBrowseDirectChildren(t *testing.T) {
}
}
func TestRc(t *testing.T) {
servetest.TestRc(t, rc.Params{
"type": "dlna",
"vfs_cache_mode": "off",
})
}

View File

@@ -1,69 +0,0 @@
// Package dlnaflags provides utility functionality to DLNA.
package dlnaflags
import (
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/flags"
"github.com/spf13/pflag"
)
// Help contains the text for the command line help and manual.
var Help = `### Server options
Use ` + "`--addr`" + ` to specify which IP address and port the server should
listen on, e.g. ` + "`--addr 1.2.3.4:8000` or `--addr :8080`" + ` to listen to all
IPs.
Use ` + "`--name`" + ` to choose the friendly server name, which is by
default "rclone (hostname)".
Use ` + "`--log-trace` in conjunction with `-vv`" + ` to enable additional debug
logging of all UPNP traffic.
`
// OptionsInfo descripts the Options in use
var OptionsInfo = fs.Options{{
Name: "addr",
Default: ":7879",
Help: "The ip:port or :port to bind the DLNA http server to",
}, {
Name: "name",
Default: "",
Help: "Name of DLNA server",
}, {
Name: "log_trace",
Default: false,
Help: "Enable trace logging of SOAP traffic",
}, {
Name: "interface",
Default: []string{},
Help: "The interface to use for SSDP (repeat as necessary)",
}, {
Name: "announce_interval",
Default: fs.Duration(12 * time.Minute),
Help: "The interval between SSDP announcements",
}}
func init() {
fs.RegisterGlobalOptions(fs.OptionsInfo{Name: "dlna", Opt: &Opt, Options: OptionsInfo})
}
// Options is the type for DLNA serving options.
type Options struct {
ListenAddr string `config:"addr"`
FriendlyName string `config:"name"`
LogTrace bool `config:"log_trace"`
InterfaceNames []string `config:"interface"`
AnnounceInterval fs.Duration `config:"announce_interval"`
}
// Opt contains the options for DLNA serving.
var Opt Options
// AddFlags add the command line flags for DLNA serving.
func AddFlags(flagSet *pflag.FlagSet) {
flags.AddFlagsFromOptions(flagSet, "", OptionsInfo)
}

View File

@@ -12,6 +12,7 @@ import (
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/cmd/mountlib"
"github.com/rclone/rclone/cmd/serve"
"github.com/rclone/rclone/fs/config/flags"
"github.com/rclone/rclone/vfs"
"github.com/rclone/rclone/vfs/vfsflags"
@@ -50,6 +51,8 @@ func init() {
// Add common mount/vfs flags
mountlib.AddFlags(cmdFlags)
vfsflags.AddFlags(cmdFlags)
// Register with parent command
serve.Command.AddCommand(Command)
}
// Command definition for cobra

View File

@@ -18,13 +18,16 @@ import (
"time"
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/cmd/serve"
"github.com/rclone/rclone/cmd/serve/proxy"
"github.com/rclone/rclone/cmd/serve/proxy/proxyflags"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/flags"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/log"
"github.com/rclone/rclone/fs/rc"
"github.com/rclone/rclone/vfs"
"github.com/rclone/rclone/vfs/vfscommon"
"github.com/rclone/rclone/vfs/vfsflags"
@@ -70,8 +73,8 @@ type Options struct {
ListenAddr string `config:"addr"` // Port to listen on
PublicIP string `config:"public_ip"` // Passive ports range
PassivePorts string `config:"passive_port"` // Passive ports range
BasicUser string `config:"user"` // single username for basic auth if not using Htpasswd
BasicPass string `config:"pass"` // password for BasicUser
User string `config:"user"` // single username for basic auth if not using Htpasswd
Pass string `config:"pass"` // password for User
TLSCert string `config:"cert"` // TLS PEM key (concatenation of certificate and CA certificate)
TLSKey string `config:"key"` // TLS PEM Private key
}
@@ -88,6 +91,29 @@ func init() {
vfsflags.AddFlags(Command.Flags())
proxyflags.AddFlags(Command.Flags())
AddFlags(Command.Flags())
serve.Command.AddCommand(Command)
serve.AddRc("ftp", func(ctx context.Context, f fs.Fs, in rc.Params) (serve.Handle, error) {
// Read VFS Opts
var vfsOpt = vfscommon.Opt // set default opts
err := configstruct.SetAny(in, &vfsOpt)
if err != nil {
return nil, err
}
// Read Proxy Opts
var proxyOpt = proxy.Opt // set default opts
err = configstruct.SetAny(in, &proxyOpt)
if err != nil {
return nil, err
}
// Read opts
var opt = Opt // set default opts
err = configstruct.SetAny(in, &opt)
if err != nil {
return nil, err
}
// Create server
return newServer(ctx, f, &opt, &vfsOpt, &proxyOpt)
})
}
// Command definition for cobra
@@ -121,18 +147,18 @@ You can set a single username and password with the --user and --pass flags.
},
Run: func(command *cobra.Command, args []string) {
var f fs.Fs
if proxyflags.Opt.AuthProxy == "" {
if proxy.Opt.AuthProxy == "" {
cmd.CheckArgs(1, 1, command, args)
f = cmd.NewFsSrc(args)
} else {
cmd.CheckArgs(0, 0, command, args)
}
cmd.Run(false, false, command, func() error {
s, err := newServer(context.Background(), f, &Opt)
s, err := newServer(context.Background(), f, &Opt, &vfscommon.Opt, &proxy.Opt)
if err != nil {
return err
}
return s.serve()
return s.Serve()
})
},
}
@@ -157,7 +183,7 @@ func init() {
var passivePortsRe = regexp.MustCompile(`^\s*\d+\s*-\s*\d+\s*$`)
// Make a new FTP to serve the remote
func newServer(ctx context.Context, f fs.Fs, opt *Options) (*driver, error) {
func newServer(ctx context.Context, f fs.Fs, opt *Options, vfsOpt *vfscommon.Options, proxyOpt *proxy.Options) (*driver, error) {
host, port, err := net.SplitHostPort(opt.ListenAddr)
if err != nil {
return nil, fmt.Errorf("failed to parse host:port from %q", opt.ListenAddr)
@@ -172,11 +198,11 @@ func newServer(ctx context.Context, f fs.Fs, opt *Options) (*driver, error) {
ctx: ctx,
opt: *opt,
}
if proxyflags.Opt.AuthProxy != "" {
d.proxy = proxy.New(ctx, &proxyflags.Opt)
if proxy.Opt.AuthProxy != "" {
d.proxy = proxy.New(ctx, proxyOpt, vfsOpt)
d.userPass = make(map[string]string, 16)
} else {
d.globalVFS = vfs.New(f, &vfscommon.Opt)
d.globalVFS = vfs.New(f, vfsOpt)
}
d.useTLS = d.opt.TLSKey != ""
@@ -208,20 +234,58 @@ func newServer(ctx context.Context, f fs.Fs, opt *Options) (*driver, error) {
return d, nil
}
// serve runs the ftp server
func (d *driver) serve() error {
// Serve runs the FTP server until it is shutdown
func (d *driver) Serve() error {
fs.Logf(d.f, "Serving FTP on %s", d.srv.Hostname+":"+strconv.Itoa(d.srv.Port))
return d.srv.ListenAndServe()
err := d.srv.ListenAndServe()
if err == ftp.ErrServerClosed {
err = nil
}
return err
}
// close stops the ftp server
// Shutdown stops the ftp server
//
//lint:ignore U1000 unused when not building linux
func (d *driver) close() error {
func (d *driver) Shutdown() error {
fs.Logf(d.f, "Stopping FTP on %s", d.srv.Hostname+":"+strconv.Itoa(d.srv.Port))
return d.srv.Shutdown()
}
// Return the first address of the server
func (d *driver) Addr() net.Addr {
// The FTP server doesn't let us read the listener
// so we have to synthesize the net.Addr here.
// On errors we'll return a zero item or zero parts.
addr := &net.TCPAddr{}
// Split host and port
host, port, err := net.SplitHostPort(d.opt.ListenAddr)
if err != nil {
fs.Errorf(nil, "ftp: addr: invalid address format: %v", err)
return addr
}
// Parse port
addr.Port, err = strconv.Atoi(port)
if err != nil {
fs.Errorf(nil, "ftp: addr: invalid port number: %v", err)
}
// Resolve the host to an IP address.
ipAddrs, err := net.LookupIP(host)
if err != nil {
fs.Errorf(nil, "ftp: addr: failed to resolve host: %v", err)
} else if len(ipAddrs) == 0 {
fs.Errorf(nil, "ftp: addr: no IP addresses found for host: %s", host)
} else {
// Choose the first IP address.
addr.IP = ipAddrs[0]
}
return addr
}
// Logger ftp logger output formatted message
type Logger struct{}
@@ -269,7 +333,7 @@ func (d *driver) CheckPasswd(sctx *ftp.Context, user, pass string) (ok bool, err
d.userPass[user] = oPass
d.userPassMu.Unlock()
} else {
ok = d.opt.BasicUser == user && (d.opt.BasicPass == "" || d.opt.BasicPass == pass)
ok = d.opt.User == user && (d.opt.Pass == "" || d.opt.Pass == pass)
if !ok {
fs.Infof(nil, "login failed: bad credentials")
return false, nil

View File

@@ -12,12 +12,15 @@ import (
"testing"
_ "github.com/rclone/rclone/backend/local"
"github.com/rclone/rclone/cmd/serve/proxy"
"github.com/rclone/rclone/cmd/serve/servetest"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/rc"
"github.com/rclone/rclone/lib/israce"
"github.com/rclone/rclone/vfs/vfscommon"
"github.com/stretchr/testify/assert"
ftp "goftp.io/server/v2"
)
const (
@@ -36,19 +39,16 @@ func TestFTP(t *testing.T) {
opt := Opt
opt.ListenAddr = testHOST + ":" + testPORT
opt.PassivePorts = testPASSIVEPORTRANGE
opt.BasicUser = testUSER
opt.BasicPass = testPASS
opt.User = testUSER
opt.Pass = testPASS
w, err := newServer(context.Background(), f, &opt)
w, err := newServer(context.Background(), f, &opt, &vfscommon.Opt, &proxy.Opt)
assert.NoError(t, err)
quit := make(chan struct{})
go func() {
err := w.serve()
assert.NoError(t, w.Serve())
close(quit)
if err != ftp.ErrServerClosed {
assert.NoError(t, err)
}
}()
// Config for the backend we'll use to connect to the server
@@ -61,7 +61,7 @@ func TestFTP(t *testing.T) {
}
return config, func() {
err := w.close()
err := w.Shutdown()
assert.NoError(t, err)
<-quit
}
@@ -69,3 +69,13 @@ func TestFTP(t *testing.T) {
servetest.Run(t, "ftp", start)
}
func TestRc(t *testing.T) {
if israce.Enabled {
t.Skip("Skipping under race detector as underlying library is racy")
}
servetest.TestRc(t, rc.Params{
"type": "ftp",
"vfs_cache_mode": "off",
})
}

View File

@@ -6,6 +6,7 @@ import (
"errors"
"fmt"
"io"
"net"
"net/http"
"os"
"path"
@@ -15,10 +16,14 @@ import (
"github.com/go-chi/chi/v5/middleware"
"github.com/rclone/rclone/cmd"
cmdserve "github.com/rclone/rclone/cmd/serve"
"github.com/rclone/rclone/cmd/serve/proxy"
"github.com/rclone/rclone/cmd/serve/proxy/proxyflags"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/flags"
"github.com/rclone/rclone/fs/rc"
libhttp "github.com/rclone/rclone/lib/http"
"github.com/rclone/rclone/lib/http/serve"
"github.com/rclone/rclone/lib/systemd"
@@ -28,6 +33,12 @@ import (
"github.com/spf13/cobra"
)
// OptionsInfo describes the Options in use
var OptionsInfo = fs.Options{}.
Add(libhttp.ConfigInfo).
Add(libhttp.AuthConfigInfo).
Add(libhttp.TemplateConfigInfo)
// Options required for http server
type Options struct {
Auth libhttp.AuthConfig
@@ -45,17 +56,42 @@ var DefaultOpt = Options{
// Opt is options set by command line flags
var Opt = DefaultOpt
func init() {
fs.RegisterGlobalOptions(fs.OptionsInfo{Name: "http", Opt: &Opt, Options: OptionsInfo})
}
// flagPrefix is the prefix used to uniquely identify command line flags.
// It is intentionally empty for this package.
const flagPrefix = ""
func init() {
flagSet := Command.Flags()
libhttp.AddAuthFlagsPrefix(flagSet, flagPrefix, &Opt.Auth)
libhttp.AddHTTPFlagsPrefix(flagSet, flagPrefix, &Opt.HTTP)
libhttp.AddTemplateFlagsPrefix(flagSet, flagPrefix, &Opt.Template)
flags.AddFlagsFromOptions(flagSet, "", OptionsInfo)
vfsflags.AddFlags(flagSet)
proxyflags.AddFlags(flagSet)
cmdserve.Command.AddCommand(Command)
cmdserve.AddRc("http", func(ctx context.Context, f fs.Fs, in rc.Params) (cmdserve.Handle, error) {
// Read VFS Opts
var vfsOpt = vfscommon.Opt // set default opts
err := configstruct.SetAny(in, &vfsOpt)
if err != nil {
return nil, err
}
// Read Proxy Opts
var proxyOpt = proxy.Opt // set default opts
err = configstruct.SetAny(in, &proxyOpt)
if err != nil {
return nil, err
}
// Read opts
var opt = Opt // set default opts
err = configstruct.SetAny(in, &opt)
if err != nil {
return nil, err
}
// Create server
return newServer(ctx, f, &opt, &vfsOpt, &proxyOpt)
})
}
// Command definition for cobra
@@ -81,7 +117,7 @@ control the stats printing.
},
Run: func(command *cobra.Command, args []string) {
var f fs.Fs
if proxyflags.Opt.AuthProxy == "" {
if proxy.Opt.AuthProxy == "" {
cmd.CheckArgs(1, 1, command, args)
f = cmd.NewFsSrc(args)
} else {
@@ -89,14 +125,12 @@ control the stats printing.
}
cmd.Run(false, true, command, func() error {
s, err := run(context.Background(), f, Opt)
s, err := newServer(context.Background(), f, &Opt, &vfscommon.Opt, &proxy.Opt)
if err != nil {
fs.Fatal(nil, fmt.Sprint(err))
}
defer systemd.Notify()()
s.server.Wait()
return nil
return s.Serve()
})
},
}
@@ -136,19 +170,19 @@ func (s *HTTP) auth(user, pass string) (value any, err error) {
return VFS, err
}
func run(ctx context.Context, f fs.Fs, opt Options) (s *HTTP, err error) {
func newServer(ctx context.Context, f fs.Fs, opt *Options, vfsOpt *vfscommon.Options, proxyOpt *proxy.Options) (s *HTTP, err error) {
s = &HTTP{
f: f,
ctx: ctx,
opt: opt,
opt: *opt,
}
if proxyflags.Opt.AuthProxy != "" {
s.proxy = proxy.New(ctx, &proxyflags.Opt)
if proxyOpt.AuthProxy != "" {
s.proxy = proxy.New(ctx, proxyOpt, vfsOpt)
// override auth
s.opt.Auth.CustomAuthFn = s.auth
} else {
s._vfs = vfs.New(f, &vfscommon.Opt)
s._vfs = vfs.New(f, vfsOpt)
}
s.server, err = libhttp.NewServer(ctx,
@@ -168,11 +202,26 @@ func run(ctx context.Context, f fs.Fs, opt Options) (s *HTTP, err error) {
router.Get("/*", s.handler)
router.Head("/*", s.handler)
s.server.Serve()
return s, nil
}
// Serve HTTP until the server is shutdown
func (s *HTTP) Serve() error {
s.server.Serve()
s.server.Wait()
return nil
}
// Addr returns the first address of the server
func (s *HTTP) Addr() net.Addr {
return s.server.Addr()
}
// Shutdown the server
func (s *HTTP) Shutdown() error {
return s.server.Shutdown()
}
// handler reads incoming requests and dispatches them
func (s *HTTP) handler(w http.ResponseWriter, r *http.Request) {
isDir := strings.HasSuffix(r.URL.Path, "/")

View File

@@ -12,10 +12,13 @@ import (
"time"
_ "github.com/rclone/rclone/backend/local"
"github.com/rclone/rclone/cmd/serve/proxy/proxyflags"
"github.com/rclone/rclone/cmd/serve/proxy"
"github.com/rclone/rclone/cmd/serve/servetest"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/filter"
"github.com/rclone/rclone/fs/rc"
libhttp "github.com/rclone/rclone/lib/http"
"github.com/rclone/rclone/vfs/vfscommon"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@@ -39,13 +42,16 @@ func start(ctx context.Context, t *testing.T, f fs.Fs) (s *HTTP, testURL string)
},
}
opts.HTTP.ListenAddr = []string{testBindAddress}
if proxyflags.Opt.AuthProxy == "" {
if proxy.Opt.AuthProxy == "" {
opts.Auth.BasicUser = testUser
opts.Auth.BasicPass = testPass
}
s, err := run(ctx, f, opts)
s, err := newServer(ctx, f, &opts, &vfscommon.Opt, &proxy.Opt)
require.NoError(t, err, "failed to start server")
go func() {
require.NoError(t, s.Serve())
}()
urls := s.server.URLs()
require.Len(t, urls, 1, "expected one URL")
@@ -110,9 +116,9 @@ func testGET(t *testing.T, useProxy bool) {
cmd := "go run " + prog + " " + files
// FIXME this is untidy setting a global variable!
proxyflags.Opt.AuthProxy = cmd
proxy.Opt.AuthProxy = cmd
defer func() {
proxyflags.Opt.AuthProxy = ""
proxy.Opt.AuthProxy = ""
}()
f = nil
@@ -267,3 +273,10 @@ func TestGET(t *testing.T) {
func TestAuthProxy(t *testing.T) {
testGET(t, true)
}
func TestRc(t *testing.T) {
servetest.TestRc(t, rc.Params{
"type": "http",
"vfs_cache_mode": "off",
})
}

View File

@@ -3,6 +3,7 @@
package nfs
import (
"bytes"
"crypto/md5"
"encoding/hex"
"errors"
@@ -30,6 +31,15 @@ var (
ErrorSymlinkCacheNoPermission = errors.New("symlink cache must be run as root or with CAP_DAC_READ_SEARCH")
)
// Metadata files have the file handle of their source file with this
// suffixed so we can look them up directly from the file handle.
//
// Note that this is 4 bytes - using a non multiple of 4 will cause
// the Linux NFS client not to be able to read any files.
//
// The value is big endian 0x00000001
var metadataSuffix = []byte{0x00, 0x00, 0x00, 0x01}
// Cache controls the file handle cache implementation
type Cache interface {
// ToHandle takes a file and represents it with an opaque handle to reference it.
@@ -77,7 +87,9 @@ type diskHandler struct {
write func(fh []byte, cachePath string, fullPath string) ([]byte, error)
read func(fh []byte, cachePath string) ([]byte, error)
remove func(fh []byte, cachePath string) error
handleType int32 //nolint:unused // used by the symlink cache
suffix func(fh []byte) []byte // returns nil for no suffix or the suffix
handleType int32 //nolint:unused // used by the symlink cache
metadata string // extension for metadata
}
// Create a new disk handler
@@ -102,6 +114,8 @@ func newDiskHandler(h *Handler) (dh *diskHandler, err error) {
write: dh.diskCacheWrite,
read: dh.diskCacheRead,
remove: dh.diskCacheRemove,
suffix: dh.diskCacheSuffix,
metadata: h.vfs.Opt.MetadataExtension,
}
fs.Infof("nfs", "Storing handle cache in %q", dh.cacheDir)
return dh, nil
@@ -124,6 +138,17 @@ func (dh *diskHandler) handleToPath(fh []byte) (cachePath string) {
return cachePath
}
// Return true if name represents a metadata file
//
// It returns the underlying path
func (dh *diskHandler) isMetadataFile(name string) (rawName string, found bool) {
if dh.metadata == "" {
return name, false
}
rawName, found = strings.CutSuffix(name, dh.metadata)
return rawName, found
}
// ToHandle takes a file and represents it with an opaque handle to reference it.
// In stateless nfs (when it's serving a unix fs) this can be the device + inode
// but we can generalize with a stateful local cache of handed out IDs.
@@ -131,6 +156,8 @@ func (dh *diskHandler) ToHandle(f billy.Filesystem, splitPath []string) (fh []by
dh.mu.Lock()
defer dh.mu.Unlock()
fullPath := path.Join(splitPath...)
// metadata file has file handle of original file
fullPath, isMetadataFile := dh.isMetadataFile(fullPath)
fh = hashPath(fullPath)
cachePath := dh.handleToPath(fh)
cacheDir := filepath.Dir(cachePath)
@@ -144,6 +171,10 @@ func (dh *diskHandler) ToHandle(f billy.Filesystem, splitPath []string) (fh []by
fs.Errorf("nfs", "Couldn't create cache file handle: %v", err)
return fh
}
// metadata file handle is suffixed with metadataSuffix
if isMetadataFile {
fh = append(fh, metadataSuffix...)
}
return fh
}
@@ -152,18 +183,43 @@ func (dh *diskHandler) diskCacheWrite(fh []byte, cachePath string, fullPath stri
return fh, os.WriteFile(cachePath, []byte(fullPath), 0600)
}
var errStaleHandle = &nfs.NFSStatusError{NFSStatus: nfs.NFSStatusStale}
var (
errStaleHandle = &nfs.NFSStatusError{NFSStatus: nfs.NFSStatusStale}
)
// Test to see if a fh is a metadata handle and if so return the underlying handle
func (dh *diskHandler) isMetadataHandle(fh []byte) (isMetadata bool, newFh []byte, err error) {
if dh.metadata == "" {
return false, fh, nil
}
suffix := dh.suffix(fh)
if len(suffix) == 0 {
// OK
return false, fh, nil
} else if bytes.Equal(suffix, metadataSuffix) {
return true, fh[:len(fh)-len(suffix)], nil
}
fs.Errorf("nfs", "Bad file handle suffix %X", suffix)
return false, nil, errStaleHandle
}
// FromHandle converts from an opaque handle to the file it represents
func (dh *diskHandler) FromHandle(fh []byte) (f billy.Filesystem, splitPath []string, err error) {
dh.mu.RLock()
defer dh.mu.RUnlock()
isMetadata, fh, err := dh.isMetadataHandle(fh)
if err != nil {
return nil, nil, err
}
cachePath := dh.handleToPath(fh)
fullPathBytes, err := dh.read(fh, cachePath)
if err != nil {
fs.Errorf("nfs", "Stale handle %q: %v", cachePath, err)
return nil, nil, errStaleHandle
}
if isMetadata {
fullPathBytes = append(fullPathBytes, []byte(dh.metadata)...)
}
splitPath = strings.Split(string(fullPathBytes), "/")
return dh.billyFS, splitPath, nil
}
@@ -177,8 +233,16 @@ func (dh *diskHandler) diskCacheRead(fh []byte, cachePath string) ([]byte, error
func (dh *diskHandler) InvalidateHandle(f billy.Filesystem, fh []byte) error {
dh.mu.Lock()
defer dh.mu.Unlock()
isMetadata, fh, err := dh.isMetadataHandle(fh)
if err != nil {
return err
}
if isMetadata {
// Can't invalidate a metadata handle as it is synthetic
return nil
}
cachePath := dh.handleToPath(fh)
err := dh.remove(fh, cachePath)
err = dh.remove(fh, cachePath)
if err != nil {
fs.Errorf("nfs", "Failed to remove handle %q: %v", cachePath, err)
}
@@ -190,6 +254,14 @@ func (dh *diskHandler) diskCacheRemove(fh []byte, cachePath string) error {
return os.Remove(cachePath)
}
// Return a suffix for the file handle or nil
func (dh *diskHandler) diskCacheSuffix(fh []byte) []byte {
if len(fh) <= md5.Size {
return nil
}
return fh[md5.Size:]
}
// HandleLimit exports how many file handles can be safely stored by this cache.
func (dh *diskHandler) HandleLimit() int {
return math.MaxInt

View File

@@ -5,10 +5,13 @@ package nfs
import (
"context"
"fmt"
"strings"
"sync"
"testing"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/vfs"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@@ -18,6 +21,8 @@ const testSymlinkCache = "go test -c && sudo setcap cap_dac_read_search+ep ./nfs
// Check basic CRUD operations
func testCacheCRUD(t *testing.T, h *Handler, c Cache, fileName string) {
isMetadata := strings.HasSuffix(fileName, ".metadata")
// Check reading a non existent handle returns an error
_, _, err := c.FromHandle([]byte{10})
assert.Error(t, err)
@@ -26,6 +31,11 @@ func testCacheCRUD(t *testing.T, h *Handler, c Cache, fileName string) {
splitPath := []string{"dir", fileName}
fh := c.ToHandle(h.billyFS, splitPath)
assert.True(t, len(fh) > 0)
if isMetadata {
assert.Equal(t, metadataSuffix, fh[len(fh)-len(metadataSuffix):])
} else {
assert.NotEqual(t, metadataSuffix, fh[len(fh)-len(metadataSuffix):])
}
// Read the handle back
newFs, newSplitPath, err := c.FromHandle(fh)
@@ -43,8 +53,13 @@ func testCacheCRUD(t *testing.T, h *Handler, c Cache, fileName string) {
// Check the handle is gone and returning stale handle error
_, _, err = c.FromHandle(fh)
require.Error(t, err)
assert.Equal(t, errStaleHandle, err)
if !isMetadata {
require.Error(t, err)
assert.Equal(t, errStaleHandle, err)
} else {
// Can't invalidate metadata handles
require.NoError(t, err)
}
}
// Thrash the cache operations in parallel on different files
@@ -113,8 +128,10 @@ func TestCache(t *testing.T) {
cacheType := cacheType
t.Run(cacheType.String(), func(t *testing.T) {
h := &Handler{
vfs: vfs.New(object.MemoryFs, nil),
billyFS: billyFS,
}
h.vfs.Opt.MetadataExtension = ".metadata"
h.opt.HandleLimit = 1000
h.opt.HandleCache = cacheType
h.opt.HandleCacheDir = t.TempDir()
@@ -151,6 +168,10 @@ func TestCache(t *testing.T) {
t.Run("ThrashSame", func(t *testing.T) {
testCacheThrashSame(t, h, c)
})
// Metadata file handles only supported on non memory
t.Run("CRUDMetadata", func(t *testing.T) {
testCacheCRUD(t, h, c, "file.metadata")
})
}
})
}

Some files were not shown because too many files have changed in this diff Show More