1
0
mirror of https://github.com/rclone/rclone.git synced 2025-12-06 00:03:32 +00:00

Compare commits

..

784 Commits

Author SHA1 Message Date
Nick Craig-Wood
444a6e6d2d Make http servers obey --dump headers,bodies 2023-07-30 13:26:20 +01:00
nielash
982f76b4df sftp: support dynamic --sftp-path-override
Before this change, rclone always expected --sftp-path-override to be
the absolute SSH path to remote:path/subpath which effectively made it
unusable for wrapped remotes (for example, when used with a crypt
remote, the user would need to provide the full decrypted path.)

After this change, the old behavior remains the default, but dynamic
paths are now also supported, if the user adds '@' as the first
character of --sftp-path-override. Rclone will ignore the '@' and
treat the rest of the string as the path to the SFTP remote's root.
Rclone will then add any relative subpaths automatically (including
unwrapping/decrypting remotes as necessary).

In other words, the path_override config parameter can now be used to
specify the difference between the SSH and SFTP paths. Once specified
in the config, it is no longer necessary to re-specify for each
command.

See: https://forum.rclone.org/t/sftp-path-override-breaks-on-wrapped-remotes/40025
2023-07-30 03:12:07 +01:00
Zach
347812d1d3 ftp,sftp: add socks_proxy support for SOCKS5 proxies
Fixes #3558
2023-07-30 03:02:08 +01:00
yuudi
f4449440f8 http: CORS should not be send if not set (#6433) 2023-07-29 16:12:31 +09:00
kapitainsky
e66675d346 docs: rclone backend restore 2023-07-29 11:31:16 +09:00
Nick Craig-Wood
45228e2f18 build: update dependencies
This does not update bazil/fuse because it does not build on freebsd

https://github.com/bazil/fuse/issues/295

This partially updates the prometheus library as the latest no longer compiles with plan9

https://github.com/prometheus/procfs/issues/554
2023-07-29 01:57:23 +01:00
Nick Craig-Wood
b866850fdd Add yuudi to contributors 2023-07-29 01:57:23 +01:00
yuudi
5b63b9534f rc: add execute-id for job-id 2023-07-28 18:35:14 +09:00
Nick Craig-Wood
10449c86a4 sftp: add --sftp-ssh to specify an external ssh binary to use
This allows using an external ssh binary instead of the built in ssh
library for making SFTP connections.

This makes another integration test target TestSFTPRcloneSSH:

Fixes #7012
2023-07-28 10:29:02 +01:00
Nick Craig-Wood
26a9a9fed2 Add Niklas Hambüchen to contributors 2023-07-28 10:29:02 +01:00
Chun-Hung Tseng
602e42d334 protondrive: fix a bug in parsing User metadata (#7174) 2023-07-28 11:03:23 +02:00
Niklas Hambüchen
4c5a21703e docs: dropbox: Explain that Teams needs "Full Dropbox" 2023-07-28 17:52:29 +09:00
Nick Craig-Wood
f2ee949eff fichier: implement DirMove
See: https://forum.rclone.org/t/1fichier-rclone-does-not-allow-to-rename-files-and-folders-when-you-mount-a-1fichier-disk-drive/24726/
2023-07-28 01:25:42 +01:00
kapitainsky
3ad255172c docs: b2 versions names caveat 2023-07-28 09:23:34 +09:00
Nick Craig-Wood
29b1751d0e serve webdav: fix error: Expecting fs.Object or fs.Directory, got <nil>
Before this change rclone serve webdav would sometimes give this error

    Expecting fs.Object or fs.Directory, got <nil>

It turns out that when a file is being updated it doesn't have a
DirEntry and it is allowed to be <nil> so in this case we create the
mime type from the extension.

See: https://forum.rclone.org/t/webdav-union-of-onedrive-expecting-fs-object-or-fs-directory-got-nil/40298
2023-07-28 00:54:45 +01:00
kapitainsky
363da9aa82 docs: s3 versions names caveat 2023-07-27 12:36:50 +09:00
yuudi
6c8148ef39 http servers: allow CORS to be set with --allow-origin flag - fixes #5078
Some changes about test cases:
Because MiddlewareCORS will return early on OPTIONS request,
this middleware should only be used once at NewServer function.
Test cases should pass AllowOrigin config instead of adding
this middleware again.

A new test case was added to test CORS preflight request with
an authenticator. Preflight request should always return 200 OK
regardless of autentications.

Co-authored-by: yuudi <yuudi@users.noreply.github.com>
2023-07-26 10:15:54 +01:00
Nick Craig-Wood
3ed4a2e963 sftp: stop uploads re-using the same ssh connection to improve performance
Before this change we released the ssh connection back to the pool
before the upload was finished.

This meant that uploads were re-using the same ssh connection which
reduces throughput.

This releases the ssh connection back to the pool only after the
upload has finished, or on error state.

See: https://forum.rclone.org/t/sftp-backend-opens-less-connection-than-expected/40245
2023-07-25 13:05:37 +01:00
Anagh Kumar Baranwal
aaadb48d48 vfs: keep virtual directory status accurate and reduce deadlock potential
This changes hasVirtual to an atomic struct variable that's updated on
add or delete from the virtual map.

This keeps it up to date and avoids deadlocks.

Signed-off-by: Anagh Kumar Baranwal <6824881+darthShadow@users.noreply.github.com>
2023-07-25 09:08:16 +01:00
Anagh Kumar Baranwal
52e25c43b9 vfs: Added cache cleaner for directories to reduce memory usage
This empties the directory cache after twice the directory cache
period to release memory.

Signed-off-by: Anagh Kumar Baranwal <6824881+darthShadow@users.noreply.github.com>
2023-07-25 09:08:16 +01:00
Nick Craig-Wood
9a66563fc6 Add Edwin Mackenzie-Owen to contributors 2023-07-25 09:08:16 +01:00
Nick Craig-Wood
6ca670d66a Add Tiago Boeing to contributors 2023-07-25 09:08:16 +01:00
Nick Craig-Wood
809653055d Add gabriel-suela to contributors 2023-07-25 09:08:16 +01:00
Nick Craig-Wood
61325ce507 Add Ricardo D'O. Albanus to contributors 2023-07-25 09:08:16 +01:00
Edwin Mackenzie-Owen
c3989d1906 smb: implement multi-threaded writes for copies to smb
smb2.File implements the WriterAtCloser interface defined in
fs/types.go. Expose it via a OpenWriterAt method on
the fs struct to support multi-threaded writes.
2023-07-25 08:31:36 +01:00
Tiago Boeing
a79887171c docs: mega: update with solution when receiving killed on process 2023-07-25 04:21:37 +01:00
Chun-Hung Tseng
f29e284c90 protondrive: fix download signature verification bug (#7169) 2023-07-24 14:54:39 +02:00
Chun-Hung Tseng
9a66086fa0 protondrive: fix bug in digests parsing (#7164) 2023-07-24 09:00:18 +02:00
Chun-Hung Tseng
1845c261c6 protondrive: fix missing file sha1 and appstring issues (#7163) 2023-07-24 08:56:21 +02:00
Chun-Hung Tseng
70cbcef624 Add Chun-Hung Tseng to Maintainer (#7162) 2023-07-23 16:29:24 +02:00
gabriel-suela
9169b2b5ab cmd: fix log message typo 2023-07-23 08:43:03 +09:00
Ricardo D'O. Albanus
0957c8fb74 chunker: Update documentation to mention issue with small files
See: https://forum.rclone.org/t/chunker-not-deactivating-for-small-files-and-wasting-api-calls/40122
2023-07-23 00:40:50 +01:00
Anagh Kumar Baranwal
bb0cd76a5f fix: mount parsing for linux 2023-07-22 17:29:20 +05:30
Nick Craig-Wood
08240c8cf5 Add Chun-Hung Tseng to contributors 2023-07-22 10:54:21 +01:00
Chun-Hung Tseng
014acc902d protondrive: add protondrive backend - fixes #6072 2023-07-22 10:46:21 +01:00
Benjamin
33fec9c835 doc: Fix Leviia block 2023-07-18 19:58:19 +01:00
kapitainsky
3a5ffc7839 docs: mention Box as base32768 compatible
As suddenly many people move to Box - another "unlimited" cloud story migration saga there are frequent questions about crypt files encoding to be used.

Box is base32768 friendly.

It has been tested with:

https://pub.rclone.org/base32768.zip

and:

rclone test info --check-length boxremote:

maxFileLength = 255 // for 1 byte unicode characters
maxFileLength = 255 // for 2 byte unicode characters
maxFileLength = 255 // for 3 byte unicode characters
maxFileLength = -1 // for 4 byte unicode characters
2023-07-18 19:55:54 +01:00
Benjamin
8a6bf35481 Add Leviia Object Storage on index.md 2023-07-18 09:52:05 +01:00
Benjamin
f7d27f4bf2 Add Object storage to Leviia on README.md 2023-07-18 09:52:05 +01:00
kapitainsky
378a2d21ee --max-transfer - add new exit code (10)
It adds dedicated exit code (10) for --max-duration flag.

Rclone will exit with exit code 10 if the duration limit is reached.

It behaves in similar fashion as --max-transfer and exit code 8.

discussed on the forum:

https://forum.rclone.org/t/max-duration-option-is-triggering-exit-with-error/39917/6
2023-07-18 09:51:31 +01:00
Nick Craig-Wood
3404eb0444 Changelog updates from Version v1.63.1 2023-07-17 15:15:16 +01:00
Nick Craig-Wood
13e5701f2a build: add new sponsors page to docs 2023-07-17 14:28:40 +01:00
Nick Craig-Wood
432d5d1e20 operations: fix overlapping check on case insensitive file systems
Before this change, the overlapping check could erroneously give this
error on case insensitive file systems:

    Failed to sync: destination and parameter to --backup-dir mustn't overlap

The code was fixed and re-worked to be simpler and more reliable.

See: https://forum.rclone.org/t/backup-dir-cannot-be-in-root-even-when-excluded/39844/
2023-07-17 14:00:04 +01:00
Nick Craig-Wood
cc05159518 Add Benjamin to contributors 2023-07-17 14:00:04 +01:00
Benjamin
119ccb2b95 s3: add Leviia S3 Object Storage as provider 2023-07-16 18:08:47 +01:00
Anagh Kumar Baranwal
0ef0e908ca build: update to go1.21rc3 and make go1.19 the minimum required version
Signed-off-by: Anagh Kumar Baranwal <6824881+darthShadow@users.noreply.github.com>
2023-07-16 10:09:25 +01:00
Nick Craig-Wood
0063d14dbb Add darix to contributors 2023-07-14 10:27:20 +01:00
albertony
0d34efb10f box: fix reconnect failing with HTTP 400 Bad Request
The error is:

  Error: failed to configure token with jwt authentication: jwtutil: failed making auth request: 400 Bad Request

With the following additional debug information:

  jwtutil: Response Body: {"error":"invalid_grant","error_description":"Please check the 'aud' claim. Should be a string"}

Problem is that in jwt-go the RegisteredClaims type has Audience field (aud claim) that
is a list, while box apparantly expects it to be a singular string. In jwt-go v4 we
currently use there is an alternative type StandardClaims which matches what box wants.
Unfortunately StandardClaims is marked as deprecated, and is removed in the
newer v5 version, so we this is a short term fix only.

Fixes #7114
2023-07-14 10:24:33 +01:00
darix
415f4b2b93 webdav: nextcloud chunking: add more guidance for the user to check the config 2023-07-10 14:37:09 +01:00
Nick Craig-Wood
07cf5f1d25 operations: fix .rclonelink files not being converted back to symlinks
Before this change the new partial downloads code was causing symlinks
to be copied as regular files.

This was because the partial isn't named .rclonelink so the local
backend saves it as a normal file and renaming it to .rclonelink
doesn't cause it to become a symlink.

This fixes the problem by not copying .rclonelink files using the
partials mechanism but reverting to the previous --inplace behaviour.

This could potentially be fixed better in the future by changing the
local backend Move to change files to and from symlinks depending on
their name. However this was deemed too complicated for a point
release.

This also adds a test in the local backend. This test should ideally
be in operations but it isn't easy to put it there as operations knows
nothing of symlinks.

Fixes #7101
See: https://forum.rclone.org/t/reggression-in-v1-63-0-links-drops-the-rclonelink-extension/39483
2023-07-10 14:30:59 +01:00
Nick Craig-Wood
7d31956169 local: fix partial directory read for corrupted filesystem
Before this change if a directory entry could be listed but not
lstat-ed then rclone would give an error and abort the directory
listing with the error

    failed to read directory entry: failed to read directory "XXX": lstat XXX

This change makes sure that the directory listing carries on even
after this kind of error.

The sync will be failed but it will carry on.

This problem was caused by a programming error setting the err
variable in an outer scope when it should have been using a local err
variable.

See: https://forum.rclone.org/t/sync-aborts-if-even-one-single-unreadable-folder-is-encountered/39653
2023-07-09 17:58:03 +01:00
Nick Craig-Wood
473d443874 smb: fix "Statfs failed: bucket or container name is needed" when mounting
Before this change, if you mounted the root of the smb then it would
give an error on rclone about and periodically in the mount logs:

    Statfs failed: bucket or container name is needed in remote

This fix makes the smb backend return empty usage in this case which
will stop the errors and show the default 1P of free space.

See: https://forum.rclone.org/t/error-statfs-failed-bucket-or-container-name-is-needed-in-remote/39631
2023-07-08 12:24:46 +01:00
Nick Craig-Wood
e294b76121 Add Vladislav Vorobev to contributors 2023-07-08 12:24:46 +01:00
Vladislav Vorobev
8f3c583870 docs: no need to disable 2FA for Mail.ru Cloud anymore
This sentence was written at the time when backend used access token, nowadays, users need to generate and use application password instead, see #6398.
2023-07-08 10:27:40 +02:00
Nick Craig-Wood
d0d41fe847 rclone config redacted: implement support mechanism for showing redacted config
This introduces a new fs.Option flag, Sensitive and uses this along
with IsPassword to redact the info in the config file for support
purposes.

It adds this flag into backends where appropriate. It was necessary to
add oauthutil.SharedOptions to some backends as they were missing
them.

Fixes #5209
2023-07-07 16:25:14 +01:00
Nick Craig-Wood
297f15a3e3 docs: update the number of providers supported 2023-07-07 16:25:14 +01:00
Nick Craig-Wood
d5f0affd4b Add Mahad to contributors 2023-07-07 16:25:14 +01:00
Nick Craig-Wood
0598aafbfd Add BakaWang to contributors 2023-07-07 16:25:14 +01:00
Mahad
528e22f139 docs: drive: Fix step 4 in "Making your own client_id" 2023-07-06 21:24:17 +01:00
BakaWang
f1a8420814 s3: add synology to s3 provider list 2023-07-06 10:54:07 +01:00
Nick Craig-Wood
e250f1afcd docs: remove old donate page 2023-07-06 10:13:42 +01:00
Nick Craig-Wood
ebf24c9872 docs: update contact page on website 2023-07-05 16:57:07 +01:00
Paul
b4c7b240d8 webdav: nextcloud: fix must use /dav/files/USER endpoint not /webdav error
Fix https://github.com/rclone/rclone/issues/7103

Before this change the RegExp validating the endpoint URL was a bit
too strict allowing only /dav/files/USER due to chunking limitations.

This patch adds back support for /dav/files/USER/dir/subdir etc.

Co-authored-by: Nick Craig-Wood <nick@craig-wood.com>
2023-07-05 16:56:01 +01:00
Nick Craig-Wood
22a14a8c98 operations: fix deadlock when using lsd/ls with --progress - Fixes #7102
The --progress flag overrides operations.SyncPrintf in order to do its
magic on stdout without interfering with other output.

Before this change the syncFprintf routine in operations (which is
used to print all output to stdout) was taking the
operations.StdoutMutex and the printProgress function in the
--progress routine was also attempting to take the same mutex causing
a deadlock.

This patch fixes the problem by moving the locking from the
syncFprintf function to SyncPrintf. It is then up to the function
overriding this to lock the StdoutMutex. This ensures the StdoutMutex
can never cause a deadlock.
2023-07-03 15:07:00 +01:00
Nick Craig-Wood
07133b892d dirtree: fix performance with large directories of directories and --fast-list
Before this change if using --fast-list on a directory with more than
a few thousand directories in it DirTree.CheckParents became very slow
taking up to 24 hours for a directory with 1,000,000 directories in
it.

This is because it becomes an O(N²) operation as DirTree.Find has to
search each directory in a linear fashion as it is stored as a slice.

This patch fixes the problem by scanning the DirTree for directories
before starting the CheckParents process so it never has to call
DirTree.Find.

After the fix calling DirTree.CheckParents on a directory with
1,000,000 directories in it will take about 1 second.

Anything which calls DirTree.Find can potentially have bad performance
so in the future we should redesign the DirTree to use a different
underlying datastructure or have an index.

https://forum.rclone.org/t/almost-24-hours-cpu-compute-time-during-sync-between-two-large-s3-buckets/39375/
2023-07-03 14:09:21 +01:00
Nick Craig-Wood
a8ca18165e Add Fjodor42 to contributors 2023-07-03 14:09:21 +01:00
Nick Craig-Wood
8c4e71fc84 Add Dean Attali to contributors 2023-07-03 14:09:21 +01:00
Nick Craig-Wood
351e2db2ef Add Sawada Tsunayoshi to contributors 2023-07-03 14:09:21 +01:00
Fjodor42
2234feb23d jottacloud: add Onlime provider 2023-07-02 11:16:07 +01:00
Anagh Kumar Baranwal
fb5125ecee build: fix macos builds for versions < 12
Signed-off-by: Anagh Kumar Baranwal <6824881+darthShadow@users.noreply.github.com>
2023-07-01 18:03:50 +01:00
Dean Attali
e8cbc54a06 docs: dropbox get client id, clarify you need to click a button 2023-07-01 17:50:40 +01:00
Nick Craig-Wood
00512e1303 Start v1.64.0-DEV development 2023-06-30 15:39:03 +01:00
Nick Craig-Wood
fcfbd3153b docs: website: replace google analytics with plausible analytics 2023-06-30 14:32:53 +01:00
Nick Craig-Wood
9a8075b682 docs: rename donate page to sponsor page and rework 2023-06-30 14:32:53 +01:00
Sawada Tsunayoshi
996037bee9 docs: fixed typo in exclude example in filtering docs (#7097)
The exclude flag instructions had "without" written as "with" which changes the whole meaning of how the exclude flag works.
2023-06-30 15:28:38 +02:00
Nick Craig-Wood
e90537b2e9 Version v1.63.0 2023-06-30 14:11:17 +01:00
Nick Craig-Wood
42c211c6b2 Revert sponsors back to organization 2023-06-30 10:10:05 +01:00
Nick Craig-Wood
3d4f127b33 Revert "union: disable PartialUploads on integration tests failures"
This reverts commit 9065e921c1.

It turns out the problem for the failing fs/sync tests was the
policies being different for search and create which meant that the
file was being created in one union branch but a diferent one was
found in another branch.
2023-06-29 21:11:04 +01:00
Misty
ff966b37af dropbox: fix result chans not taken care by defer fun 2023-06-28 19:49:38 +01:00
Nick Craig-Wood
3b6effa81a uptobox: fix rmdir declaring that directories weren't empty
The API seems to have changed and the `totalFileCount` item no longer
tracks the number of files in the directory so is useless for seeing
if the directory is empty.

This patch fixes the problem by seeing whether there are any files or
directories in the folder instead.

This problem was detected by the integration tests.
2023-06-28 17:27:43 +01:00
Nick Craig-Wood
8308d5d640 putio: fix server side copy failures (400 errors)
For some unknown reason the API sometimes returns the name already
exists on a server side copy.

    {
      "error_id": null,
      "error_message": "Name already exist",
      "error_type": "NAME_ALREADY_EXIST",
      "error_uri": "http://api.put.io/v2/docs",
      "extra": {},
      "status": "ERROR",
      "status_code": 400
    }

This patch uploads to a temporary name then renames it which works
around the problem.

This was spotted by the integration tests.
2023-06-28 16:45:35 +01:00
Nick Craig-Wood
14024936a8 putio: fix modification times not being preserved for server side copy and move
The integration tests spotted that modification times are no longer
being preserved by the putio API in server side move and copy.

This patch explicitly sets the modtime after the server side move or
copy.
2023-06-28 11:03:19 +01:00
Nick Craig-Wood
9065e921c1 union: disable PartialUploads on integration tests failures
In this commit we enabled PartialUploads for the union backend.

3faa84b47c combine,compress,crypt,hasher,union: support wrapping backends with PartialUploads

This turns out to cause test failures in fs/sync so this commit
disables them again pending further investigation.
2023-06-27 17:31:01 +01:00
Nick Craig-Wood
99788b605e sharefile: disable streamed transfers as they no longer work
At some point the sharefile API changed to require the size of the
file in the initial transaction which makes the streaming upload fail
with this error:

    upload failed: file size does not match (-2)

This was discovered by the integration tests.
2023-06-27 17:08:37 +01:00
Nick Craig-Wood
d4cc3760e6 putio: fix uploading to the wrong object on Update with overriden remote name
In this commit we discovered a problem with objects being uploaded to
the incorrect object name. It added an integration test for the
problem.

65b2e378e0 drive: fix incorrect remote after Update on object

This test was tripped by the putio backend and this patch fixes the
problem.
2023-06-27 16:02:33 +01:00
Nick Craig-Wood
a6acbd1844 uptobox: fix Update returning the wrong object
Before this patch the Update method had a 50/50 chance of returning
the old object rather than the new updated object.

This was discovered in the integration tests.

This patch fixes the problem by deleting the duplicate object before
we look for the new object.
2023-06-27 16:02:33 +01:00
Nick Craig-Wood
389565f5e2 storj: fix uploading to the wrong object on Update with overriden remote name
In this commit we discovered a problem with objects being uploaded to
the incorrect object name. It added an integration test for the
problem.

65b2e378e0 drive: fix incorrect remote after Update on object

This test was tripped by the Storj backend and this patch fixes the
problem.
2023-06-27 16:02:33 +01:00
Nick Craig-Wood
4b4198522d storj: fix "uplink: too many requests" errors when uploading to the same file
Storj has a rate limit of 1 per second when uploading to the same
file.

This was being tripped by the integration tests.

This patch fixes it by detecting the error and sleeping for 1 second
before retrying.

See: https://github.com/storj/uplink/issues/149
2023-06-27 16:02:33 +01:00
Nick Craig-Wood
f7665300c0 fstests: allow ObjectUpdate test to retry upload 2023-06-27 16:02:33 +01:00
Nick Craig-Wood
73beae147f webdav: Fix modtime on server side copy for owncloud and nextcloud
Before this change a server side copy did not preserve the modtime.

This used to work on nextcloud but at some point it started ignoring
the `X-Oc-Mtime` header.

This patch sets the modtime explicitly after a server side copy if the
`X-Oc-Mtime` wasn't accepted.

This problem was discovered in the integration tests.
2023-06-26 20:23:28 +01:00
Nick Craig-Wood
92f8e476b7 Add mac-15 to contributors 2023-06-26 20:23:28 +01:00
Nick Craig-Wood
5849148d51 Add zzq to contributors 2023-06-26 20:23:28 +01:00
Nick Craig-Wood
37853ec412 Add Peter Fern to contributors 2023-06-26 20:23:28 +01:00
Nick Craig-Wood
ae7ff28714 Add danielkrajnik to contributors 2023-06-26 20:23:28 +01:00
Nick Craig-Wood
9873f4bc74 Add Mariusz Suchodolski to contributors 2023-06-26 20:23:28 +01:00
Nick Craig-Wood
1b200bf69a Add Paulo Schreiner to contributors 2023-06-26 20:23:28 +01:00
Nick Craig-Wood
e3fa6fe3cc swift: fix code formatting 2023-06-26 20:23:28 +01:00
mac-15
9e1b3861e7 docs: add blomp cloud storage guide 2023-06-26 17:49:27 +01:00
zzq
e9a753f678 s3: add Qiniu KODO quirks virtualHostStyle is false 2023-06-26 17:47:27 +01:00
Dimitri Papadopoulos
708391a5bf backend: fix misspellings found by codespell 2023-06-26 14:34:52 +01:00
Peter Fern
1cfed18aa7 http: add client certificate user auth middleware
This populates the authenticated user from the client certificate
common name.

Also added tests for the existing client certificate functionality.
2023-06-26 14:33:53 +01:00
kapitainsky
7751d5a00b rc: config/listremotes include from env vars
Fixes: 
#6540

Discussed:
https://forum.rclone.org/t/environment-variable-config-not-used-for-remote-control/39014
2023-06-26 12:30:44 +01:00
danielkrajnik
8274712c2c docs: s3: fix example for restoring single objects
See: https://forum.rclone.org/t/cant-restore-files-from-aws-glacier-deep-only-directories/39258/3
2023-06-26 11:41:15 +01:00
Mariusz Suchodolski
625a564ba3 docs: faq: add solution for port opening issues on Windows 2023-06-25 11:20:54 +01:00
Ehsan Tadayon
2dd2072cdb s3: Fix Arvancloud Domain and region changes and alphabetise the provider 2023-06-25 11:01:41 +01:00
kapitainsky
998d1d1727 docs: listremotes also includes remotes from env vars 2023-06-24 15:46:23 +01:00
Paulo Schreiner
fcb912a664 fs: allow setting a write buffer for multithread
when multi-thread downloading is enabled, rclone used
to send a write to disk after every read, resulting in a lot
of small writes to different locations of the file.

depending on the underlying filesystem or device, it can be more
efficient to send bigger writes.
2023-06-23 18:44:43 +01:00
Nick Craig-Wood
5f938fb9ed s3: fix "Entry doesn't belong in directory" errors when using directory markers
Before this change we were incorrectly identifying the root directory
of the listing and adding it into the listing.

This caused higher layers of rclone to emit the error above.

See #7038
2023-06-23 18:01:11 +01:00
Nick Craig-Wood
72b79504ea azureblob: fix "Entry doesn't belong in directory" errors when using directory markers
Before this change we were incorrectly identifying the root directory
of the listing and adding it into the listing.

This caused higher layers of rclone to emit the error above.

See #7038
2023-06-23 18:01:11 +01:00
Nick Craig-Wood
3e2a606adb gcs: fix "Entry doesn't belong in directory" errors when using directory markers
Before this change we were incorrectly identifying the root directory
of the listing and adding it into the listing.

This caused higher layers of rclone to emit the error above.

Fixes #7038
2023-06-23 18:01:11 +01:00
Nick Craig-Wood
95a6e3e338 Add Stanislav Gromov to contributors 2023-06-23 18:01:11 +01:00
Anagh Kumar Baranwal
d06bb55f3f mount: Added _netdev to the example mount so it gets treated as a remote-fs rather than local-fs
Signed-off-by: Anagh Kumar Baranwal <6824881+darthShadow@users.noreply.github.com>
2023-06-23 17:37:00 +01:00
Stanislav Gromov
9f3694cea3 docs: drive: fix typo 2023-06-23 14:40:47 +01:00
Nick Craig-Wood
2c50f26c36 mount: fix mount failure on macOS with on the fly remote
This commit

3567a47258 fs: make ConfigString properly reverse suffixed file systems

made fs.ConfigString() return the full config of the backend. Because
mount was using this to make a volume name it started to make volume
names with illegal characters in which couldn't be mounted by macOS.

This fixes the problem by making a separate fs.ConfigStringFull() and
using that where appropriate and leaving the original
fs.ConfigString() function untouched.

Fixes #7063
See: https://forum.rclone.org/t/1-63-beta-fails-to-mount-on-macos-with-on-the-fly-crypt-remote/39090
2023-06-23 14:12:03 +01:00
Nick Craig-Wood
22d6c8d30d Add URenko to contributors 2023-06-23 14:12:03 +01:00
Nick Craig-Wood
96fb75c5a7 Add Sam Lai to contributors 2023-06-23 14:12:03 +01:00
URenko
acd67edf9a docs: remove "After" in systemd mount example again 2023-06-22 18:03:04 +01:00
Sam Lai
b26db8e640 accounting: bwlimit signal handler should always start
The SIGUSR2 signal handler for bandwidth limits currently only starts
if rclone is started at a time when a bandwidth limit applies. This
means that if rclone starts _outside_ such a time, i.e. with no
bandwidth limits, then enters a time where bandwidth limits do apply,
it will not be possible to use SIGUSR2 to toggle it.

This fixes that by always starting the signal handler, but only
toggling the limiter if there is a bandwidth limit configured.
2023-06-22 17:59:24 +01:00
Nick Craig-Wood
da955e5d4f operations: remove partials when the copy fails
Before this change we were only removing partials when it was
corrupted rather than when the copy just failed.
2023-06-21 22:56:05 +01:00
Nick Craig-Wood
4f8dab8bce zoho: fix downloads with Range: header returning the wrong data
Zoho has started returning the results from Range: requests with a 200
response code rather than the technically correct 206 error code.

Before this change this triggered workaround code to deal with Zoho
not obeying Range: requests properly.

This fix tests the returned header for a Content-Range: header and if
it exists assumes it is a valid reply to the Range: request despite
the status being 200.

This problem was spotted by the integration tests.
2023-06-14 17:43:26 +01:00
Nick Craig-Wood
000ddc4951 s3: fix versions tests when running on minio 2023-06-14 17:30:36 +01:00
Nick Craig-Wood
3faa84b47c combine,compress,crypt,hasher,union: support wrapping backends with PartialUploads
This means that, for example, wrapping a sftp backend with crypt will
upload to a temporary name and then rename unless disabled with
--inplace.

See: https://forum.rclone.org/t/backup-versioning/38978/7
2023-06-14 10:52:03 +01:00
kapitainsky
e1162ec440 docs: clarify --server-side-across-configs 2023-06-13 17:58:27 +01:00
Nick Craig-Wood
30cccc7101 cache: fix backends shutting down when in use when used via the rc
Before this fix, if a long running task (eg a copy) was started by the
rc then the backend could expire before the copy had finished.

The typical symptom was with the dropbox backend giving "batcher is
shutting down" errors.

This patch fixes the problem by pinning the backend until the job has
finished.

See: https://forum.rclone.org/t/uploads-start-repeatedly-failing-after-a-while-using-rc-sync-copy-vs-rclone-copy-for-dropbox/38873/
2023-06-13 15:48:20 +01:00
Nick Craig-Wood
1f5a29209e rc: add Job to ctx so it can be used elsewhere
See: https://forum.rclone.org/t/uploads-start-repeatedly-failing-after-a-while-using-rc-sync-copy-vs-rclone-copy-for-dropbox/38873/
2023-06-13 15:48:20 +01:00
Nick Craig-Wood
45255bccb3 accounting: fix Prometheus metrics to be the same as core/stats
In 04aa6969a4 we updated the displayed speed to be a rolling
average in core/stats and the progress output but we didn't update the
Prometheus metrics.

This patch updates the Prometheus metrics too.

Fixes #7053
2023-06-12 17:42:29 +01:00
Nick Craig-Wood
055206c4ee yandex: fix 400 Bad Request on transfer failure
Before this fix, if the upload failed for some reason the yandex
backend would attempt to retry itself it which would fail immediately
with 400 Bad Request.

Normally we retry uploads at a higher level so they can be done with
new data and this patch does that.

See #7044
2023-06-11 11:11:43 +01:00
Nick Craig-Wood
f3070b82bc Add douchen to contributors 2023-06-11 11:11:43 +01:00
douchen
7e2deffc62 filter: fix deadlock with errors on --files-from
Before this change if doing a recursive directory listing with
`--files-from` if more than `--checkers` files errored (other than
file not found) then rclone would deadlock.

This fixes the problem by exiting on the first error.
2023-06-10 15:53:08 +01:00
Nick Craig-Wood
ae3ff50580 dropbox: implement --dropbox-pacer-min-sleep flag
See: https://forum.rclone.org/t/combine-mount-options-query/38080
2023-06-10 14:57:26 +01:00
Nick Craig-Wood
6486ba6344 operations: remove partially uploaded files on exit when not using --inplace
Before this change partially uploaded files (when --inplace is not in
effect) would be left lying around in the file system if rclone was
killed in the middle of a transfer.

This adds an exit handler to remove the file and removes it when the
file is complete.
2023-06-10 14:55:05 +01:00
Nick Craig-Wood
7842000f8a backend: for command not found errors, hint to look in the underlying remote
See: https://forum.rclone.org/t/rclone-cleanup-no-way-to-delete-pending-uploads-newer-than-24-hours/38416/6
2023-06-10 14:44:01 +01:00
Nick Craig-Wood
1f9c962183 operations: reopen downloads on error when using check --download and cat
Before this change, some parts of operations called the Open method on
objects directly, and some called NewReOpen to make an object which
can re-open itself on errors.

This adds a new function operations.Open which should be called
instead of fs.Object.Open to open a reliable stream of data and
changes all call sites to use that.

This means `rclone check --download` and `rclone cat` will re-open
files on failures.

See: https://forum.rclone.org/t/does-rclone-support-retries-for-check-when-using-download-flag/38641
2023-06-10 14:42:29 +01:00
Nick Craig-Wood
279d9ecc56 operations: fix pcloud can't set modified time
Before this change we tested special errors for straight equality.

This works for all normal backends, but the union backend may return
wrapped errors which contain the special error types.

In particular if a pcloud backend was part of a union when attempting
to set modification times the fs.ErrorCantSetModTime return wasn't
understood because it was wrapped in a union.Error.

This fixes the problem by using errors.Is instead in all the
comparisons in operations.

See: https://forum.rclone.org/t/failed-to-set-modification-time-1-error-pcloud-cant-set-modified-time/38596
2023-06-10 14:39:41 +01:00
Nick Craig-Wood
31773ecfbf union: allow errors to be unwrapped for inspection
Before this change the Errors type in the union backend produced
errors which could not be Unwrapped to test their type.

This adds the (go1.20) Unwrap method to the Errors type which allows
errors.Is to work on these errors.

It also adds unit tests for the Errors type and fixes a couple of
minor bugs thrown up in the process.

See: https://forum.rclone.org/t/failed-to-set-modification-time-1-error-pcloud-cant-set-modified-time/38596
2023-06-10 14:39:41 +01:00
kapitainsky
666e34cf69 s3: docs: old broken link updated 2023-06-09 18:15:54 +01:00
Nick Craig-Wood
5a84a08b3f build: fix build failure installing nfpm
Before this fix we used the bin/get-github-release.go script to
install nfpm.

However this script fails scraping the downloads page when the target
has more than a few download options. The alternative would be using
the GitHub API but this needs authentication so as not to be rate
limited on GitHub actions.

This patch switches over to go install which is less efficient but
should work in all circumstances.
2023-06-07 15:41:52 +01:00
Nick Craig-Wood
51a468b2ba genautocomplete: rename to completion with alias to old name
This brings it into line with cobra's naming scheme and stops cobra
writing another "completion" command which doesn't work as well which
confuses users.

See: https://forum.rclone.org/t/rclone-genautocomplete-bash-vs-rclone-completion-bash-neither-works-fully/38431
2023-05-25 14:32:40 +01:00
Nick Craig-Wood
fc798d800c vfs: fix backends being Shutdown too early when startup takes a long time
Before this change if the VFS took more than 5 to initialise (which
can happen if there is a lot of files or a lot of files which need
uploading) the backend was dropped out of the cache before the VFS was
fully created.

This was noticeable in the dropbox backend where the batcher Shutdown
too soon and prevented further uploads.

This fixes the problem by Pinning backends before the VFS cache is
created.

https://forum.rclone.org/t/if-more-than-251-elements-in-the-que-to-upload-fails-with-batcher-is-shutting-down/38076/2
2023-05-18 16:16:12 +01:00
Nick Craig-Wood
3115ede1d8 Add kapitainsky to contributors 2023-05-18 16:16:12 +01:00
kapitainsky
7a5491ba7b docs: chunker: fix typo 2023-05-17 17:10:53 +01:00
Nick Craig-Wood
a6cf4989b6 local: fix crash with --metadata on Android
Before this change we called statx which causes a

    SIGSYS: bad system call

fault.

After this we force Android to use fstatat

Fixes #7006
2023-05-17 17:03:26 +01:00
Nick Craig-Wood
f489b54fa0 operations: ignore partial tests on backends which don't support them 2023-05-17 17:03:26 +01:00
Nick Craig-Wood
6244d1729b Add Tareq Sharafy to contributors 2023-05-17 17:03:19 +01:00
Nick Craig-Wood
e97c2a2832 Add cc to contributors 2023-05-17 17:03:19 +01:00
albertony
56bf9b4a10 Add albertony to maintainers 2023-05-17 15:31:07 +02:00
WeidiDeng
ceb9406c2f serve webdav: implement owncloud checksum and modtime extensions
* implement owncloud checksum and modtime extensions for webdav server
* test rclone webdav server as owncloud webdav
2023-05-15 15:38:00 +01:00
Tareq Sharafy
1f887f7ba0 azblob: doc
Signed-off-by: Tareq Sharafy <tareq.sha@gmail.com>
2023-05-14 12:12:24 +01:00
Tareq Sharafy
7db26b6b34 azblob: support azure workload identities 2023-05-14 12:12:24 +01:00
cc
37a3309438 s3: v3sign: add missing subresource delete
The delete query string parameter must be included when you create the
CanonicalizedResource for a multi-object Delete request.
2023-05-14 11:25:52 +01:00
Nick Craig-Wood
97be9015a4 union: implement missing methods
Implement these missing methods:

- CleanUp

And declare these ones unimplementable:

- UnWrap
- WrapFs
- SetWrapper
- UserInfo
- Disconnect
- PublicLink
- PutUnchecked
- MergeDirs
- OpenWriterAt
2023-05-14 11:22:57 +01:00
Nick Craig-Wood
487e4f09b3 combine: implement missing methods
Implement these missing methods:

- PublicLink
- PutUnchecked
- MergeDirs
- CleanUp
- OpenWriterAt

And declare these ones unimplementable:

- UnWrap
- WrapFs
- SetWrapper
- UserInfo
- Disconnect

Fixes #6999
2023-05-14 11:22:57 +01:00
Nick Craig-Wood
09a408664d fs: create Overlay feature flag to indicate backend wraps others
Set this automatically for any backend which implements UnWrap and
manually for combine and union which can't implement UnWrap but do
overlay other backends.
2023-05-14 11:22:57 +01:00
Nick Craig-Wood
43fa256d56 fs: add OverrideDirectory for overriding path of directory 2023-05-14 11:22:57 +01:00
wiserain
6859c04772 pikpak: add validity check when using a media link
Before this change, the Pikpak backend would always download
the first media item whenever possible, regardless of whether
or not it was the original contents.

Now we check the validity of a media link using the `fid`
parameter in the link URL.

Fixes #6992
2023-05-13 03:41:59 +09:00
dependabot[bot]
38a0539096 build(deps): bump github.com/cloudflare/circl from 1.1.0 to 1.3.3
Bumps [github.com/cloudflare/circl](https://github.com/cloudflare/circl) from 1.1.0 to 1.3.3.
- [Release notes](https://github.com/cloudflare/circl/releases)
- [Commits](https://github.com/cloudflare/circl/compare/v1.1.0...v1.3.3)

---
updated-dependencies:
- dependency-name: github.com/cloudflare/circl
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>
2023-05-12 14:56:45 +01:00
Nick Craig-Wood
2cd85813b4 sftp: don't check remote points to a file if it ends with /
This avoids calling stat on the root directory which saves a call and
some servers don't like.

See: https://forum.rclone.org/t/stat-failed-error-on-sftp/38045
2023-05-11 07:58:20 +01:00
Nick Craig-Wood
e6e6069ecf sftp: don't stat directories before listing them
Before this change we ran stat on the directory to see if it existed.

Not only is this inefficient it isn't allowed by some SFTP servers.

See: https://forum.rclone.org/t/stat-failed-error-on-sftp/38045
2023-05-10 15:07:21 +01:00
Nick Craig-Wood
fcf47a8393 pikpak: set the NoMultiThreading feature flag to disable multi-thread copy
Before this change the pikpak backend changed the global
--multi-thread-streams flag which wasn't desirable.

Now the machinery is in place to use the NoMultiThreading feature flag
instead.

Fixes #6915
2023-05-09 17:46:19 +01:00
Nick Craig-Wood
46a323ae14 operations: Don't use multi-thread copy if the backend doesn't support it #6915 2023-05-09 17:40:58 +01:00
Nick Craig-Wood
72be80ddca fs: add new backend feature NoMultiThreading
This should be set for backends which can't support simultaneous reads
from different offsets in a single file.
2023-05-09 17:40:11 +01:00
Nick Craig-Wood
a9e7e7bcc2 ftp: Fix "501 Not a valid pathname." errors when creating directories
Some servers return a 501 error when using MLST on a non-existing
directory. This patch allows it.

I don't think this is correct usage according to the RFC, but the RFC
doesn't explicitly state which error code should be returned for
file/directory not found.
2023-05-09 17:27:35 +01:00
Nick Craig-Wood
925c4382e2 ftp: fix "unsupported LIST line" errors on startup
Before this fix a blank line in the MLST output from the FTP server
would cause the "unsupported LIST line" error.

This fixes the problem in the upstream fork.

Fixes #6879
2023-05-09 17:27:35 +01:00
Nick Craig-Wood
08c60c3091 Add Janne Hellsten to contributors 2023-05-09 17:27:35 +01:00
Janne Hellsten
5c594fea90 operations: implement uploads to temp name with --inplace to disable
When copying to a backend which has the PartialUploads feature flag
set and can Move files the file is copied into a temporary name first.
Once the copy is complete, the file is renamed to the real
destination.

This prevents other processes from seeing partially downloaded copies
of files being downloaded and prevents overwriting the old file until
the new one is complete.

This also adds --inplace flag that can be used to disable the partial
file copy/rename feature.

See #3770

Co-authored-by: Nick Craig-Wood <nick@craig-wood.com>
2023-05-09 16:28:10 +01:00
Janne Hellsten
cc01223535 fs: Implement PartialUploads feature flag
Implement a Partialuploads feature flag to mark backends for which
uploads are not atomic.

This is set for the following backends

- local
- ftp
- sftp

See #3770
2023-05-09 16:28:10 +01:00
Nick Craig-Wood
aaacfa51a0 sftp: fix move to allow overwriting existing files
Before this change rclone used a normal SFTP rename if present to
implement Move.

However the normal SFTP rename won't overwrite existing files.

This fixes it to either use the POSIX rename extension
("posix-rename@openssh.com") or to delete the source first before
renaming using the normal SFTP rename.

This isn't normally a problem as rclone always removes any existing
objects first, however to implement non --inplace operations we do
require overwriting an existing file.
2023-05-09 16:28:10 +01:00
Nick Craig-Wood
c18c66f167 fs: when creating new fs.OverrideRemotes don't layer overrides if not needed 2023-05-09 16:28:10 +01:00
Nick Craig-Wood
d6667d34e7 fs: fix String() method on fs.OverrideRemote
Before this fix it was returning the base objects string rather than
the overridden remote.
2023-05-09 16:28:10 +01:00
Nick Craig-Wood
e649cf4d50 uptobox: add --uptobox-private flag to make all uploaded files private
See: #6946
2023-05-08 17:50:50 +01:00
Nick Craig-Wood
f080ec437c azureblob: empty directory markers #3453 2023-05-07 12:47:09 +01:00
Nick Craig-Wood
4023eaebe0 gcs: fix directory marker code #3453
Use Update to upload the directory markers
2023-05-07 12:47:09 +01:00
Nick Craig-Wood
baf16a65f0 s3: fix directory marker code #3453
Use Update to upload the directory markers
2023-05-07 12:47:09 +01:00
Nick Craig-Wood
70fe2ac852 azureblob: fix azure blob uploads with multiple bits of metadata 2023-05-07 12:47:09 +01:00
Nick Craig-Wood
41cf7faea4 Add Andrei Smirnov to contributors 2023-05-07 12:47:09 +01:00
Andrei Smirnov
f226f2dfb1 s3: add petabox.io to s3 providers 2023-05-05 09:44:25 +01:00
Nick Craig-Wood
31caa019fa rc: fix output of Time values in options/get
Before this change these were output as `{}` after this change they
are output as time strings `"2022-03-26T17:48:19Z"` in standard
javascript format.
2023-05-04 15:04:11 +01:00
Nick Craig-Wood
0468375054 uptobox: ensure files and folders show the modtime configured by --default-time #6986 2023-05-04 15:03:11 +01:00
Nick Craig-Wood
6001f05a12 union: the root folder shows the modtime configured by --default-time #6986 2023-05-04 15:03:11 +01:00
Nick Craig-Wood
f7b87a8049 koofr: ensure folders show the modtime configured by --default-time #6986 2023-05-04 15:03:11 +01:00
Nick Craig-Wood
d379641021 http: ensure folders show the modtime configured by --default-time #6986 2023-05-04 15:03:11 +01:00
Nick Craig-Wood
84281c9089 dropbox: ensure folders show the modtime configured by --default-time #6986 2023-05-04 15:03:11 +01:00
Nick Craig-Wood
8e2dc069d2 fs: Add --default-time flag to control unknown modtime of files/dirs
Before this patch, files or directories with unknown modtime would
appear as the current date.

When mounted some systems look at modification dates of directories to
see if they change and having them change whenever they drop out of
the directory cache is not optimal.

See #6986
2023-05-04 15:03:11 +01:00
Nick Craig-Wood
61d6f538b3 onedrive: add --onedrive-av-override flag to download files flagged as virus
This also produces a warning when rclone detects files have been
blocked because of virus content

    server reports this file is infected with a virus - use --onedrive-av-override to download anyway

Fixes #557
2023-05-03 15:21:30 +01:00
Nick Craig-Wood
65b2e378e0 drive: fix incorrect remote after Update on object
Before this change, when Object.Update was called in the drive
backend, it overwrote the remote with that of the object info.

This is incorrect - the remote doesn't change on Update and this patch
fixes that and introduces a new test to make sure it is correct for
all backends.

This was noticed when doing Update of objects in a nested combine
backend.

See: https://forum.rclone.org/t/rclone-runtime-goroutine-stack-exceeds-1000000000-byte-limit/37912
2023-05-03 13:51:27 +01:00
Nick Craig-Wood
dea6bdf3df combine: fix goroutine stack overflow on bad object
If the Remote() call failed to do its path adjustment, then it would
recursively call Remote() as part of logging the failure and cause a
stack overflow.

This fixes it by logging the underlying object instead.

See: https://forum.rclone.org/t/rclone-runtime-goroutine-stack-exceeds-1000000000-byte-limit/37912
2023-05-03 13:51:27 +01:00
Nick Craig-Wood
27eb8c7f45 config: stop config create making invalid config files
If config create was passed a parameter with an embedded \n it wrote
it straight to the config file which made it invalid and caused a
fatal error reloading it.

This stops keys and values with \r and \n being added to the config
file.

See: https://forum.rclone.org/t/how-to-control-bad-remote-creation-which-takes-rclone-down/37856
2023-05-03 11:40:30 +01:00
Nick Craig-Wood
1607344613 Add Adam K to contributors 2023-05-03 11:40:30 +01:00
Adam K
5f138dd822 dropbox: syncing documentation with source for dropbox default batch_timeout - fixes #6984 2023-05-02 17:04:32 +01:00
Anagh Kumar Baranwal
2520c05c4b mount2: disable xattrs
Signed-off-by: Anagh Kumar Baranwal <6824881+darthShadow@users.noreply.github.com>
2023-04-30 17:56:47 +01:00
Anagh Kumar Baranwal
f7f5e87632 mount2: fixed statfs
Signed-off-by: Anagh Kumar Baranwal <6824881+darthShadow@users.noreply.github.com>
2023-04-30 17:56:47 +01:00
Anagh Kumar Baranwal
a7e6806f26 mount2: updated go-fuse version
Signed-off-by: Anagh Kumar Baranwal <6824881+darthShadow@users.noreply.github.com>
2023-04-30 17:56:47 +01:00
Anagh Kumar Baranwal
d0eb884262 mount: removed unnecessary byte slice allocation for reads
Signed-off-by: Anagh Kumar Baranwal <6824881+darthShadow@users.noreply.github.com>
2023-04-30 17:54:30 +01:00
WeidiDeng
ae6874170f webdav: set modtime using propset for owncloud and nextcloud 2023-04-28 17:38:49 +01:00
Nick Craig-Wood
f5bab284c3 s3: fix missing "tier" metadata
Before this change if the storage class wasn't set on the object, we
didn't set the "tier" metadata.

This made it impossible to filter on tier using the metadata filters.

This returns the "tier" metadata as STANDARD if the storage class
isn't set on the object.

See: https://forum.rclone.org/t/copy-from-s3-to-another-s3-filter-by-storage-class/37861
2023-04-28 14:33:01 +01:00
Nick Craig-Wood
c75dfa6436 Add Jānis Bebrītis to contributors 2023-04-28 14:33:01 +01:00
Nick Craig-Wood
56eb82bdfc Add Tobias Gion to contributors 2023-04-28 14:33:01 +01:00
Nick Craig-Wood
066e00b470 gcs: empty directory markers #3453
- Report correct feature flag
- Fix test failures due to that
- don't output the root directory marker
- Don't create the directory marker if it is the bucket or root
- Create directories when uploading files
2023-04-28 14:31:05 +01:00
Jānis Bebrītis
e0c445d36e gcs: empty directory markers - #3453 2023-04-28 14:31:05 +01:00
Nick Craig-Wood
74652bf318 s3: empty directory markers further work #3453
- Report correct feature flag
- Fix test failures due to that
- don't output the root directory marker
- Don't create the directory marker if it is the bucket or root
- Create directories when uploading files
2023-04-28 14:31:05 +01:00
Jānis Bebrītis
b6a95c70e9 s3: empty directory markers - #3453 2023-04-28 14:31:05 +01:00
Nick Craig-Wood
aca7d0fd22 s3: fix potential crash in integration tests 2023-04-28 14:31:05 +01:00
Nick Craig-Wood
12761b3058 fstests: make integration tests work with connection strings in remotes 2023-04-28 14:31:05 +01:00
Nick Craig-Wood
3567a47258 fs: make ConfigString properly reverse suffixed file systems
Before this change we renamed file systems with overridden config with
{suffix}.

However this meant that ConfigString produced a value which wouldn't
re-create the file system.

This uses an internal hash to keep note of what config goes which
which {suffix} in order to remake the config properly.
2023-04-28 14:31:05 +01:00
Nick Craig-Wood
6b670bd439 mockfs: make it so it can be registered as an Fs 2023-04-28 14:31:05 +01:00
Nick Craig-Wood
335ca6d572 lsjson: make --stat more efficient
Don't look for a file if the remote ends with /

This also makes it less likely to find a directory marker in bucket
based file systems.
2023-04-28 14:31:05 +01:00
Tobias Gion
c4a9e480c9 ftp: lower log message priority when SetModTime is not supported to debug
See: https://forum.rclone.org/t/ftp-fritz-box-setmodtime-is-not-supported/37781
2023-04-25 16:31:42 +02:00
Nick Craig-Wood
232d304c13 drive: fix trailing slash mis-identificaton of folder as file
Before this change, drive would mistakenly identify a folder with a
training slash as a file when passed to NewObject.

This was picked up by the integration tests
2023-04-25 12:10:15 +01:00
Nick Craig-Wood
44ac79e357 Add dlitster to contributors 2023-04-25 12:10:15 +01:00
dlitster
0487e465ee docs: s3: clarify that X-Amz-Meta-Md5chksum is really a base64-encoded hex 2023-04-25 11:39:36 +01:00
Nick Craig-Wood
bb6cfe109d crypt: fix reading 0 length files
In an earlier patch

d5afcf9e34 crypt: try not to return "unexpected EOF" error

This introduced a bug for 0 length files which this fixes which only
manifests if the io.Reader returns data and EOF which not all readers
do.

This was failing in the integration tests.
2023-04-24 16:54:40 +01:00
WeidiDeng
864eb89a67 webdav: fix server side copy/move not overwriting - fixes #6964 2023-04-24 14:35:42 +01:00
Nick Craig-Wood
4471e6f258 selfupdate: obey --no-check-certificate flag
This patch makes sure we use our own HTTP transport when fetching the
current rclone version.

This allows it to use --no-check-certificate (and any other features
of our own transport).

See: https://forum.rclone.org/t/rclone-selfupdate-no-check-certificate-flag-not-work/37501
2023-04-24 12:26:01 +01:00
Nick Craig-Wood
e82db0b7d5 vfs: fix potential data race - Fixes #6962
This fixes a data race that was found by static analysis.
2023-04-24 12:17:03 +01:00
Nick Craig-Wood
72e624c5e4 serve dlna: fix potential data race #6962
This fixes a data race that was found by static analysis.
2023-04-24 12:17:03 +01:00
Nick Craig-Wood
6092fa57c3 Add Loren Gordon to contributors 2023-04-24 12:17:03 +01:00
Loren Gordon
3e15a594b7 cat: adds --separator option to cat command
When using `rclone cat` to print the contents of several files, the
user may want to inject some separator between the files, such as a
comma or a newline. This patch adds a `--separator` option to the `cat`
command to make that possible. The default value remains an empty
string, `""`, maintaining the prior behavior of `rclone cat`.

Closes #6968
2023-04-24 12:01:53 +01:00
Nick Craig-Wood
db8c007983 swift: ignore 404 error when deleting an object
See: https://forum.rclone.org/t/rclone-should-optionally-ignore-404-for-delete/37592
2023-04-22 10:49:10 +01:00
dependabot[bot]
5836da14c2 build(deps): bump github.com/aws/aws-sdk-go from 1.44.236 to 1.44.246
Bumps [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) from 1.44.236 to 1.44.246.
- [Release notes](https://github.com/aws/aws-sdk-go/releases)
- [Commits](https://github.com/aws/aws-sdk-go/compare/v1.44.236...v1.44.246)

---
updated-dependencies:
- dependency-name: github.com/aws/aws-sdk-go
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2023-04-20 18:03:27 +01:00
dependabot[bot]
8ed07d11a0 build(deps): bump github.com/klauspost/compress from 1.16.3 to 1.16.5
Bumps [github.com/klauspost/compress](https://github.com/klauspost/compress) from 1.16.3 to 1.16.5.
- [Release notes](https://github.com/klauspost/compress/releases)
- [Changelog](https://github.com/klauspost/compress/blob/master/.goreleaser.yml)
- [Commits](https://github.com/klauspost/compress/compare/v1.16.3...v1.16.5)

---
updated-dependencies:
- dependency-name: github.com/klauspost/compress
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2023-04-20 18:03:18 +01:00
dependabot[bot]
1f2ee44c20 build(deps): bump golang.org/x/term from 0.6.0 to 0.7.0
Bumps [golang.org/x/term](https://github.com/golang/term) from 0.6.0 to 0.7.0.
- [Release notes](https://github.com/golang/term/releases)
- [Commits](https://github.com/golang/term/compare/v0.6.0...v0.7.0)

---
updated-dependencies:
- dependency-name: golang.org/x/term
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2023-04-20 18:02:43 +01:00
Nick Craig-Wood
32798dca25 build: remove Go updates from dependabot as it is too noisy 2023-04-20 17:58:10 +01:00
Nick Craig-Wood
075f98551f Add jladbrook to contributors 2023-04-20 17:58:10 +01:00
Nick Craig-Wood
963ab220f6 Add Brian Starkey to contributors 2023-04-20 17:58:10 +01:00
jladbrook
281a007b1a crypt: add suffix option to set a custom suffix for encrypted files - fixes #6392 2023-04-20 17:28:13 +01:00
Brian Starkey
589b7b4873 s3: update Scaleway storage classes
There are now 3 classes:
 * "STANDARD" - Multi-AZ, all regions
 * "ONEZONE_IA" - Single-AZ, FR-PAR only
 * "GLACIER" - Archive, FR-PAR and NL-AMS only
2023-04-19 17:20:30 +01:00
Nick Craig-Wood
04d2781fda fichier: add cdn option to use CDN for download - Fixes #6943 2023-04-18 17:35:21 +01:00
Nick Craig-Wood
5b95fd9588 Add WeidiDeng to contributors 2023-04-18 17:35:21 +01:00
Nick Craig-Wood
a42643101e Add Damo to contributors 2023-04-18 17:35:21 +01:00
Nick Craig-Wood
bcca67efd5 Add Rintze Zelle to contributors 2023-04-18 17:35:21 +01:00
WeidiDeng
7771aaacf6 vfs: fix writing to a read only directory creating spurious directory entries
Before this fix, when a write to a read only directory failed, rclone
would leav spurious directory entries in the directory.

This confuses `rclone serve webdav` into giving this error

    http: superfluous response.WriteHeader

This fixes the VFS layer to remove any directory entries where the
file creation did not succeed.

Fixes #5702
2023-04-18 17:33:04 +01:00
Damo
fda06fc17d docs: mount: add guidance for macFUSE installed via macports 2023-04-18 15:28:20 +01:00
Rintze Zelle
2faa4758e4 docs: azureblob: typo fix in "azureblob-account" command 2023-04-18 12:48:55 +01:00
Nick Craig-Wood
9a9ef040e3 vfs: fix reload: failed to add virtual dir entry: file does not exist
This error happened on a restart of the VFS with files to upload into
a new directory on a bucket based backend. Rclone was assuming that
directories created before the restart would still exist, but this is
a bad assumption for bucket based backends which don't really have
directories.

This change creates the pretend directory and thus the directory cache
if the parent directory does not exist when adding a virtual on a
backend which can't have empty directories.

See: https://forum.rclone.org/t/that-pesky-failed-to-reload-error-message/34527
2023-04-13 18:00:26 +01:00
Nick Craig-Wood
ca403dc90e vfs: add MkdirAll function to make a directory and all beneath 2023-04-13 18:00:22 +01:00
Nick Craig-Wood
451f4c2a8f onedrive: fix quickxorhash on 32 bit architectures
Before this fix quickxorhash would sometimes crash with an error like
this:

    panic: runtime error: slice bounds out of range [-1248:]

This was caused by an incorrect cast of a 64 bit number to a 32 bit
one on 32 bit platforms.

See: https://forum.rclone.org/t/panic-runtime-error-slice-bounds-out-of-range/37548
2023-04-13 15:14:46 +01:00
Nick Craig-Wood
5f6b105c3e Add Shyim to contributors 2023-04-13 15:14:46 +01:00
Nick Craig-Wood
d98837b7e6 Add Roel Arents to contributors 2023-04-13 15:14:46 +01:00
Shyim
99dd748fec serve restic: trigger systemd notify
Allow to use Type=notify together with serving restic api
2023-04-10 15:22:54 +01:00
albertony
bdfe213c47 version: fix reported os/kernel version for windows 2023-04-10 12:02:26 +02:00
albertony
52fbb10b47 config: add more unit tests of save 2023-04-08 21:48:21 +02:00
albertony
6cb584f455 config: do not overwrite config file symbolic link - fixes #6754 2023-04-08 21:48:21 +02:00
albertony
ec8bbb8d30 config: do not remove/overwrite other files during config file save - fixes #3759 2023-04-08 21:48:21 +02:00
wiserain
fcdffab480 Add @wiserain as the pikpak backend maintainer 2023-04-06 17:45:54 +09:00
dependabot[bot]
aeb568c494 build(deps): bump github.com/aws/aws-sdk-go from 1.44.228 to 1.44.236
Bumps [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) from 1.44.228 to 1.44.236.
- [Release notes](https://github.com/aws/aws-sdk-go/releases)
- [Commits](https://github.com/aws/aws-sdk-go/compare/v1.44.228...v1.44.236)

---
updated-dependencies:
- dependency-name: github.com/aws/aws-sdk-go
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2023-04-05 15:12:35 +01:00
dependabot[bot]
b07f575d07 build(deps): bump github.com/oracle/oci-go-sdk/v65
Bumps [github.com/oracle/oci-go-sdk/v65](https://github.com/oracle/oci-go-sdk) from 65.33.0 to 65.34.0.
- [Release notes](https://github.com/oracle/oci-go-sdk/releases)
- [Changelog](https://github.com/oracle/oci-go-sdk/blob/master/CHANGELOG.md)
- [Commits](https://github.com/oracle/oci-go-sdk/compare/v65.33.0...v65.34.0)

---
updated-dependencies:
- dependency-name: github.com/oracle/oci-go-sdk/v65
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2023-04-05 15:12:00 +01:00
dependabot[bot]
ebae647dfa build(deps): bump google.golang.org/api from 0.114.0 to 0.115.0
Bumps [google.golang.org/api](https://github.com/googleapis/google-api-go-client) from 0.114.0 to 0.115.0.
- [Release notes](https://github.com/googleapis/google-api-go-client/releases)
- [Changelog](https://github.com/googleapis/google-api-go-client/blob/main/CHANGES.md)
- [Commits](https://github.com/googleapis/google-api-go-client/compare/v0.114.0...v0.115.0)

---
updated-dependencies:
- dependency-name: google.golang.org/api
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2023-04-05 15:11:18 +01:00
dependabot[bot]
6fd5b469bc build(deps): bump github.com/spf13/cobra from 1.6.1 to 1.7.0
Bumps [github.com/spf13/cobra](https://github.com/spf13/cobra) from 1.6.1 to 1.7.0.
- [Release notes](https://github.com/spf13/cobra/releases)
- [Commits](https://github.com/spf13/cobra/compare/v1.6.1...v1.7.0)

---
updated-dependencies:
- dependency-name: github.com/spf13/cobra
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2023-04-05 15:10:31 +01:00
dependabot[bot]
78e822dd79 build(deps): bump github.com/shirou/gopsutil/v3 from 3.23.2 to 3.23.3
Bumps [github.com/shirou/gopsutil/v3](https://github.com/shirou/gopsutil) from 3.23.2 to 3.23.3.
- [Release notes](https://github.com/shirou/gopsutil/releases)
- [Commits](https://github.com/shirou/gopsutil/compare/v3.23.2...v3.23.3)

---
updated-dependencies:
- dependency-name: github.com/shirou/gopsutil/v3
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2023-04-05 15:09:55 +01:00
Roel Arents
a79db20bcd azureblob: send nil tier if empty string 2023-04-05 15:08:32 +01:00
Nick Craig-Wood
d67ef19f6e bisync: fix maxDelete parameter being ignored via the rc
See: https://forum.rclone.org/t/bisync-maxdelete-api/37215
2023-04-05 14:51:46 +01:00
Nick Craig-Wood
037a6bd1b0 crypt: recommend Dropbox for base32768 encoding
See: https://forum.rclone.org/t/base32768-filename-encoding-with-crypt-dropbox-remote/37375
2023-04-05 14:51:21 +01:00
Nick Craig-Wood
09b884aade Add wiserain to contributors 2023-04-05 14:51:21 +01:00
wiserain
243bcc9d07 pikpak: new backend
Fixes #6429
2023-04-04 16:33:48 +01:00
Nick Craig-Wood
64cf9ac911 local: fix /path/to/file.rclonelink when -l/--links is in use
Before this change using /path/to/file.rclonelink would not find the
file when using -l/--links.

This fixes the problem by doing another stat call if the file wasn't
found without the suffix if -l/--links is in use.

It will also give an error if you refer to a symlink without its
suffix which will not work because the limit to a single file
filtering will be using the file name without the .rclonelink suffix.

    need ".rclonelink" suffix to refer to symlink when using -l/--links

Before this change it would use the symlink as a directory which then
would fail when listed.

See: #6855
2023-04-04 10:22:00 +01:00
Nick Craig-Wood
15a3ec8fa1 local: fix filtering of symlinks with -l/--links flag
Before this fix, with the -l flag, the `.rclonelink` suffix wasn't
being added to the file names before filtering by name.

See #6855
2023-04-04 10:22:00 +01:00
Nick Craig-Wood
2b8af4d23f sync,copy,move: make sure we output a debug log on start of transfer
Before this change we weren't outputing a debug log on the start of a
transfer for files which existed on the source but not in the
destination.

This was different to the single file copy routine.
2023-04-04 09:41:36 +01:00
Nick Craig-Wood
5755e31ef0 Add Joel to contributors 2023-04-04 09:41:36 +01:00
Joel
f4c787ab74 sftp: add --sftp-host-key-algorithms to allow specifying SSH host key algorithms 2023-03-30 18:00:54 +01:00
Nick Craig-Wood
4d7b6e14b8 mount: clarify rclone mount error when installed via homebrew
See: https://forum.rclone.org/t/suggestion-for-error-message/37145
2023-03-29 13:59:27 +01:00
Nick Craig-Wood
9ea7d143dd Add Drew Parsons to contributors 2023-03-29 13:59:27 +01:00
Drew Parsons
927e721a25 docs: faq: clarify name resolver control
On Linux systems rclone builds with cgo but uses the internal Go
resolver for DNS by default.

This update the FAQ to suggest use of GODEBUG=netdns=cgo if there are
name resolution problems on Linux/BSD (with CGO_ENABLED rebuild from
source if necessary), or try GODEBUG=netdns=go on Windows/MacOS.

See: #683
2023-03-28 15:24:37 +01:00
Nick Craig-Wood
bd46f01eb4 cmount: add --mount-case-insensitive to force the mount to be case insensitive 2023-03-27 16:17:49 +01:00
Nick Craig-Wood
5f4d7154c0 fs: fix tristate conversion to JSON 2023-03-27 16:17:49 +01:00
Nick Craig-Wood
bad8a01850 fs: allow boolean features to be enabled with --disable !Feature 2023-03-27 16:17:49 +01:00
Nick Craig-Wood
d808c3848a Add ed to contributors 2023-03-27 16:17:49 +01:00
ed
3f0bec2ee9 webdav: make pacer minSleep configurable
This adds the config argument --webdav-pacer-min-sleep which specifies
the http-request rate limit. Lowering this from the default 10ms can
greatly improve performance when synchronizing small files.

See: https://forum.rclone.org/t/webdav-with-persistent-connections/37024/10
2023-03-27 15:30:02 +02:00
Nick Craig-Wood
8fb9eb2fee sync: make --suffix-keep-extension preserve 2 part extensions like .tar.gz
If a file has two (or more) extensions and the second (or subsequent)
extension is recognised as a valid mime type, then the suffix will go
before that extension. So `file.tar.gz` would be backed up to
`file-2019-01-01.tar.gz` whereas `file.badextension.gz` would be
backed up to `file.badextension-2019-01-01.gz`

Fixes #6892
2023-03-27 14:24:21 +01:00
Nick Craig-Wood
01fa15a7d9 Add Aditya Basu to contributors 2023-03-27 14:24:21 +01:00
Nick Craig-Wood
6aaa5d7a75 Add jumbi77 to contributors 2023-03-27 14:24:21 +01:00
Nick Craig-Wood
b4d3411637 Add Juang, Yi-Lin to contributors 2023-03-27 14:24:21 +01:00
Nick Craig-Wood
01ddc8ca6c Add NickIAm to contributors 2023-03-27 14:24:21 +01:00
Nick Craig-Wood
16c1e7149e Add yuudi to contributors 2023-03-27 14:24:21 +01:00
albertony
0374ea2c79 Use jwt-go (golang-jwt) instead of deprecated jws (x/oauth2/jws)
golang.org/x/oauth2/jws is deprecated: this package is not intended for public use and
might be removed in the future. It exists for internal use only. Please switch to another
JWS package or copy this package into your own source tree.

github.com/golang-jwt/jwt/v4 seems to be a good alternative, and was already
an implicit dependency.
2023-03-26 19:20:50 +02:00
Nick Craig-Wood
2e2451f8ec lib/rest: fix problems re-using HTTP connections
Before this fix, it was noticed that the rclone webdav client did not
re-use HTTP connections when it should have been.

This turned out to be because rclone was not draining the HTTP bodies
when it was not expecting a response.

From the Go docs:

> If the returned error is nil, the Response will contain a non-nil
> Body which the user is expected to close. If the Body is not both
> read to EOF and closed, the Client's underlying RoundTripper
> (typically Transport) may not be able to re-use a persistent TCP
> connection to the server for a subsequent "keep-alive" request.

This fixes the problem by draining up to 10MB of data from an HTTP
response if the NoResponse flag is set, or at the end of a JSON or XML
response (which could have some whitespace on the end).

See: https://forum.rclone.org/t/webdav-with-persistent-connections/37024/
2023-03-26 17:19:48 +01:00
albertony
bd1e3448b3 build: add exclude for misspell linter 2023-03-26 17:05:24 +02:00
albertony
20909fa294 build: enable misspell linter 2023-03-26 17:05:24 +02:00
albertony
c502e00c87 fs: fix infinite recursive call in pacer ModifyCalculator (fixes issue reported by the staticcheck linter) 2023-03-26 14:28:15 +02:00
albertony
9172c9b3dd crypt: reduce allocations
This changes crypt's use of sync.Pool: Instead of storing slices
it now stores pointers pointers fixed sized arrays.

This issue was reported by staticcheck:

SA6002 - Storing non-pointer values in sync.Pool allocates memory

A sync.Pool is used to avoid unnecessary allocations and reduce
the amount of work the garbage collector has to do.

When passing a value that is not a pointer to a function that accepts
an interface, the value needs to be placed on the heap, which means
an additional allocation. Slices are a common thing to put in sync.Pools,
and they're structs with 3 fields (length, capacity, and a pointer to
an array). In order to avoid the extra allocation, one should store
a pointer to the slice instead.

See: https://staticcheck.io/docs/checks#SA6002
2023-03-26 14:28:15 +02:00
albertony
78deab05f9 netstorage: ignore false positive from the staticcheck linter regarding header name not being canonical 2023-03-26 14:28:15 +02:00
albertony
6c9d377bbb vfs: ignore false positive from the unused linter 2023-03-26 14:28:15 +02:00
albertony
62ddc9b7f9 vfscache: remove unused code (fixes issue reported by the unused linter) 2023-03-26 14:28:15 +02:00
albertony
448ae49fa4 webgui: remove unused code (fixes issue reported by the unused linter) 2023-03-26 14:28:15 +02:00
albertony
5f3c276d0a zoho: remove unused code (fixes issue reported by the unused linter) 2023-03-26 14:28:15 +02:00
albertony
9cea493f58 union: remove unused code (fixes issue reported by the unused linter) 2023-03-26 14:28:15 +02:00
albertony
400d1a4468 swift: remove unused code (fixes issue reported by the unused linter) 2023-03-26 14:28:15 +02:00
albertony
851ce0f4fe seafile: remove unused code for legacy API v2 (fixes issue reported by the unused linter) 2023-03-26 14:28:15 +02:00
albertony
cc885bd39a hidrive: remove unused code (fixes issue reported by the unused linter) 2023-03-26 14:28:15 +02:00
albertony
a1a8c21c70 dropbox: remove unused code (fixes issue reported by the unused linter) 2023-03-26 14:28:15 +02:00
albertony
6ef4bd8c45 cache: remove unused code (fixes issue reported by the unused linter) 2023-03-26 14:28:15 +02:00
albertony
fb316123ec azureblob: remove unused code (fixes issue reported by the unused linter) 2023-03-26 14:28:15 +02:00
albertony
270af61665 smb: code cleanup to avoid overwriting ctx before first use (fixes issue reported by the staticcheck linter) 2023-03-26 14:28:15 +02:00
albertony
155f4f2e21 mount: replace deprecated bazil/fuse specific constants with syscall constants 2023-03-26 14:28:15 +02:00
albertony
eaf593884b serve/ftp: use io.SeekEnd instead of os.SEEK_END (deprecated since Go 1.7) 2023-03-26 14:28:15 +02:00
albertony
930574c6e9 oracleobjectstorage: remove empty branch (fixes issue reported by the staticcheck linter) 2023-03-26 14:28:15 +02:00
albertony
c1586a9866 onedrive: report any list errors during cleanup 2023-03-26 14:28:15 +02:00
albertony
432eb74814 lib: avoid unnecessary use of fmt.Sprintf for string constant 2023-03-26 14:28:15 +02:00
albertony
92fb644fb6 test: use decompressed.String() instead of string(decompressed.Bytes()) 2023-03-26 14:28:15 +02:00
albertony
bb92af693a test: do not test deprecated and unused Dial and DialTLS functions on http Transport type 2023-03-26 14:28:15 +02:00
albertony
eb5fd07131 mount: error strings should not be capitalized 2023-03-26 14:28:15 +02:00
albertony
b2ce7c9aa6 hidrive: error strings should not be capitalized 2023-03-26 14:28:15 +02:00
albertony
d6b46e41dd build: replace deprecated linters deadcode, structcheck and varcheckadd with unused
The three linters deadcode, structcheck and varcheck we have been using are as of
gitlabci-lint version 1.49.0 (24 Aug 2022) marked as deprecated, and replaced by unused.

The linters staticcheck, gosimple, stylecheck and unused should combined correspond to
the checks performed by the stand-alone staticcheck tool, which is by default used for
linting in Visual Studio Code with the Go extension. We previously enabled the first
three, but skipped unused due to many reported issues.

See #6387 for more information.
2023-03-26 14:28:15 +02:00
albertony
254c6ef1dd build: add lint ignore comment required for golangci-staticcheck in addition to stand-alone staticcheck 2023-03-26 14:28:15 +02:00
albertony
547f943851 build: exclude known issues from the staticcheck linting in ci 2023-03-26 14:28:15 +02:00
albertony
8611c9f6f7 build: add staticcheck, gosimple and stylecheck linting to the build pipeline - fixes #6273
These combined should correspond to the checks performed by the stand-alone
staticcheck tool, which is by default used for linting in Visual Studio Code
with the Go extension. One exception is the unused checks, which staticcheck
tool performs, but chose to not enabled here in rclone due to many reported
occurrences.

See #6387 for more information.
2023-03-26 14:28:15 +02:00
Dimitri Papadopoulos
f6576237a4 fs: fix typos found by codespell 2023-03-25 12:51:04 +01:00
Dimitri Papadopoulos
207b64865e fstest: fix typo found by codespell 2023-03-25 09:34:10 +01:00
Dimitri Papadopoulos
9ee1b21ec2 vfs: fix typos found by codespell 2023-03-25 09:33:34 +01:00
Dimitri Papadopoulos
55a12bd639 backend: fix repeated words typos 2023-03-25 09:31:36 +01:00
dependabot[bot]
3b4a57dab9 build(deps): bump github.com/aws/aws-sdk-go from 1.44.227 to 1.44.228
Bumps [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) from 1.44.227 to 1.44.228.
- [Release notes](https://github.com/aws/aws-sdk-go/releases)
- [Commits](https://github.com/aws/aws-sdk-go/compare/v1.44.227...v1.44.228)

---
updated-dependencies:
- dependency-name: github.com/aws/aws-sdk-go
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2023-03-24 20:49:50 +00:00
Dimitri Papadopoulos
afe158f878 docs: fix typos found by codespell 2023-03-24 20:49:00 +00:00
Dimitri Papadopoulos
722a3f32cc cmdtest: fix typos found by codespell 2023-03-24 20:44:25 +00:00
Dimitri Papadopoulos
9183618082 backend: fix typos found by codespell 2023-03-24 20:42:45 +00:00
Dimitri Papadopoulos
18ebca3979 lib: fix typos found by codespell 2023-03-24 20:40:52 +00:00
Nick Craig-Wood
e84d2c9e5f docs: add info about # of parallel checks for rclone check/cryptcheck
The original commit 7dbf1ab66f put the changes in the auto
generated docs - this fixes that.
2023-03-24 12:43:45 +00:00
Aditya Basu
e98b61ceeb docs: update install with docker interactive use
* Install with docker: interactive use
* remove extra mount from command
* update listremotes
2023-03-24 11:42:58 +00:00
albertony
19f9fca2f6 docs: document how the configuration file is written, and that an .old file will be deleted 2023-03-24 11:40:34 +00:00
jumbi77
7dbf1ab66f docs: add info about # of parallel checks for rclone check/cryptcheck 2023-03-24 11:35:58 +00:00
Dimitri Papadopoulos
bfe272bf67 backend: fix typos found by codespell 2023-03-24 11:34:14 +00:00
Dimitri Papadopoulos
cce8936802 cmd: fix typos found by codespell 2023-03-24 11:32:59 +00:00
Juang, Yi-Lin
043bf3567d drive: update drive service account guide 2023-03-24 11:31:46 +00:00
NickIAm
1b2f2c0d69 docs: add section about --vfs-cache-max-age
This change adds a section to clarify how exactly the --vfs-cache-max-age flag affects caching
2023-03-24 11:28:34 +00:00
yuudi
4b376514a6 doc: Clarify the srcFs and dstRs when using local filesystem
Co-authored-by: yuudi <yuudi@users.noreply.github.com>
2023-03-24 11:25:39 +00:00
Peter Brunner
c27e6a89b0 drive: add env_auth to drive provider
This change provides the ability to pass `env_auth` as a parameter to
the drive provider. This enables the provider to pull IAM
credentials from the environment or instance metadata. Previously if no
auth method was given it would default to requesting oauth.
2023-03-24 11:11:21 +00:00
albertony
76c6e3b15c build: set Rclone as file description since it is shown as process name in task manager 2023-03-23 18:55:57 +01:00
Nick Craig-Wood
48ec00cc1a rc: fix missing --rc flags
In this commit we accidentally removed the global --rc flags.

0df7466d2b cmd/rcd: Fix command docs to include command specific prefix (#6675)

This re-instates them.
2023-03-23 12:05:31 +00:00
dependabot[bot]
866600a73b build(deps): bump github.com/aws/aws-sdk-go from 1.44.226 to 1.44.227
Bumps [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) from 1.44.226 to 1.44.227.
- [Release notes](https://github.com/aws/aws-sdk-go/releases)
- [Commits](https://github.com/aws/aws-sdk-go/compare/v1.44.226...v1.44.227)

---
updated-dependencies:
- dependency-name: github.com/aws/aws-sdk-go
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2023-03-23 11:27:14 +00:00
Nick Craig-Wood
d8f4cd4d5f drive: fix change notify picking up files outside the root
Before this change, change notify would pick up files which were
shared with us as well as file within the drive.

When using an encrypted mount this caused errors like:

    ChangeNotify was unable to decrypt "Plain file name": illegal base32 data at input byte 5

The fix tells drive to restrict changes to the drive in use.

Fixes #6771
2023-03-22 16:24:07 +00:00
Nick Craig-Wood
d0810b602a crypt: add --crypt-pass-bad-blocks to allow corrupted file output 2023-03-22 16:23:37 +00:00
Nick Craig-Wood
d5afcf9e34 crypt: try not to return "unexpected EOF" error
Before this change the code wasn't taking into account the error
io.ErrUnexpectedEOF that io.ReadFull can return properly. Sometimes
that error was being returned instead of a more specific and useful
error.

To fix this, io.ReadFull was replaced with the simpler
readers.ReadFill which is much easier to use correctly.
2023-03-22 16:23:37 +00:00
Nick Craig-Wood
07c4d95f38 crypt: fix tests assert.Error which should have been assert.EqualError 2023-03-22 16:23:37 +00:00
Nick Craig-Wood
fd83071b6b rc: fix operations/stat with trailing /
Before this change using operations/stat with a remote pointing to a
dir with a trailing / would return a null output rather than the
correct info.

This was because the directory was not found with a trailing slash in
the directory listing.

Fixes #6817
2023-03-22 16:22:45 +00:00
Nick Craig-Wood
e042d9089f fs: Fix interaction between --progress and --interactive
Before this change if both --progress and --interactive were set then
the screen display could become muddled.

This change makes --progress and --interactive use the same lock so
while rclone is asking for interactive questions, the progress will be
paused.

Fixes #6755
2023-03-22 16:18:41 +00:00
Nick Craig-Wood
cdfa0beafb lib/atexit: ensure OnError only calls cancel function once
Before this change the cancelFunc could be called twice, once while
handling the interrupt (CTRL-C) and once while unwinding the stack if
the function happened to finish.

This change ensure the cancelFunc is only called once by wrapping it
in a sync.Once
2023-03-22 12:50:58 +00:00
Nick Craig-Wood
ddb3b17e96 s3: fix hang on aborting multpart upload with iDrive e2
Apparently the abort multipart upload call doesn't return while
multipart uploads are in progress on iDrive e2.

This means that if we CTRL-C a multpart upload rclone hangs until the
all parts uploading have completed. However since rclone is uploading
multiple parts at once this doesn't happen until after the entire file
is uploaded.

This was fixed by cancelling the upload context which causes all the
uploads to stop instantly.
2023-03-22 12:50:58 +00:00
Nick Craig-Wood
32f71c97ea Add Zach Kipp to contributors 2023-03-22 12:50:58 +00:00
dependabot[bot]
53853116fb build(deps): bump github.com/aws/aws-sdk-go from 1.44.223 to 1.44.226
Bumps [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) from 1.44.223 to 1.44.226.
- [Release notes](https://github.com/aws/aws-sdk-go/releases)
- [Commits](https://github.com/aws/aws-sdk-go/compare/v1.44.223...v1.44.226)

---
updated-dependencies:
- dependency-name: github.com/aws/aws-sdk-go
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2023-03-22 11:51:03 +00:00
dependabot[bot]
a887856998 build(deps): bump github.com/oracle/oci-go-sdk/v65
Bumps [github.com/oracle/oci-go-sdk/v65](https://github.com/oracle/oci-go-sdk) from 65.32.1 to 65.33.0.
- [Release notes](https://github.com/oracle/oci-go-sdk/releases)
- [Changelog](https://github.com/oracle/oci-go-sdk/blob/master/CHANGELOG.md)
- [Commits](https://github.com/oracle/oci-go-sdk/compare/v65.32.1...v65.33.0)

---
updated-dependencies:
- dependency-name: github.com/oracle/oci-go-sdk/v65
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2023-03-22 11:50:24 +00:00
Zach Kipp
0df7466d2b cmd/rcd: Fix command docs to include command specific prefix (#6675)
This change addresses two issues with commands that re-used
flags from common packages:

1) cobra.Command definitions did not include the command specific
   prefix in doc strings.
2) Command specific flag prefixes were added after generating
   command doc strings.
2023-03-22 11:47:35 +00:00
eNV25
23579e3b99 cmd/ncdu: refactor redraw handling 2023-03-21 16:41:22 +00:00
Nick Craig-Wood
3affba6fa6 build: remove duplicate linux/arm64 build 2023-03-21 16:25:46 +00:00
Nick Craig-Wood
542677d807 s3: fix --s3-versions on individual objects
Before this fix attempting to access an s3 versioned object by name in
a subdirectory of root would not find the object.

This fixes the problem and introduced an integraton test.

See: https://forum.rclone.org/t/s3-versions-cant-retrieve-old-version/36900
2023-03-21 12:44:45 +00:00
Nick Craig-Wood
d481aa8613 Revert "s3: fix InvalidRequest copying to a locked bucket from a source with no MD5SUM"
This reverts commit e5a1bcb1ce.

This causes a lot of integration test failures so may need to be optional.
2023-03-21 11:43:43 +00:00
Nick Craig-Wood
15e633fa8b build: disable provenance in docker build
To attempt to fix this error:

buildx failed with: ERROR: failed to solve: missing provenance for owlcc15myb2dpmxrz6dl5bzqc
2023-03-20 18:09:54 +00:00
dependabot[bot]
732c24c624 build(deps): bump docker/build-push-action from 3 to 4
Bumps [docker/build-push-action](https://github.com/docker/build-push-action) from 3 to 4.
- [Release notes](https://github.com/docker/build-push-action/releases)
- [Commits](https://github.com/docker/build-push-action/compare/v3...v4)

---
updated-dependencies:
- dependency-name: docker/build-push-action
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
2023-03-20 16:21:22 +00:00
Nick Craig-Wood
75dfdbf211 ci: revert revive settings back to fix lint
The upstream revive repo changed the default settings for this linter.
We use this through golangci-lint.

This change meant lots of errors appearing all at once. We should
probably fix these in due course, but for the time being this disables
those settings.

See: https://github.com/mgechev/revive/pull/799
2023-03-20 15:26:21 +00:00
asdffdsazqqq
5f07113a4b docs: install: how to uninstall rclone via winget 2023-03-20 14:51:42 +00:00
Richard Tweed
6a380bcc67 build: fix dockerfile reference in beta image pipeline 2023-03-20 11:54:31 +00:00
dependabot[bot]
97276ce765 build(deps): bump google.golang.org/api from 0.112.0 to 0.114.0
Bumps [google.golang.org/api](https://github.com/googleapis/google-api-go-client) from 0.112.0 to 0.114.0.
- [Release notes](https://github.com/googleapis/google-api-go-client/releases)
- [Changelog](https://github.com/googleapis/google-api-go-client/blob/main/CHANGES.md)
- [Commits](https://github.com/googleapis/google-api-go-client/compare/v0.112.0...v0.114.0)

---
updated-dependencies:
- dependency-name: google.golang.org/api
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2023-03-17 16:25:06 +00:00
dependabot[bot]
a23a7a807f build(deps): bump github.com/klauspost/compress from 1.16.0 to 1.16.3
Bumps [github.com/klauspost/compress](https://github.com/klauspost/compress) from 1.16.0 to 1.16.3.
- [Release notes](https://github.com/klauspost/compress/releases)
- [Changelog](https://github.com/klauspost/compress/blob/master/.goreleaser.yml)
- [Commits](https://github.com/klauspost/compress/compare/v1.16.0...v1.16.3)

---
updated-dependencies:
- dependency-name: github.com/klauspost/compress
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2023-03-17 16:24:20 +00:00
dependabot[bot]
c6a4caaf7e build(deps): bump goftp.io/server
Bumps goftp.io/server from 0.4.2-0.20210615155358-d07a820aac35 to 1.0.0-rc1.

---
updated-dependencies:
- dependency-name: goftp.io/server
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
2023-03-17 16:23:37 +00:00
dependabot[bot]
5574733dcb build(deps): bump github.com/oracle/oci-go-sdk/v65
Bumps [github.com/oracle/oci-go-sdk/v65](https://github.com/oracle/oci-go-sdk) from 65.32.0 to 65.32.1.
- [Release notes](https://github.com/oracle/oci-go-sdk/releases)
- [Changelog](https://github.com/oracle/oci-go-sdk/blob/master/CHANGELOG.md)
- [Commits](https://github.com/oracle/oci-go-sdk/compare/v65.32.0...v65.32.1)

---
updated-dependencies:
- dependency-name: github.com/oracle/oci-go-sdk/v65
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2023-03-17 16:22:54 +00:00
dependabot[bot]
49c21d0b6e build(deps): bump github.com/aws/aws-sdk-go from 1.44.218 to 1.44.223
Bumps [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) from 1.44.218 to 1.44.223.
- [Release notes](https://github.com/aws/aws-sdk-go/releases)
- [Commits](https://github.com/aws/aws-sdk-go/compare/v1.44.218...v1.44.223)

---
updated-dependencies:
- dependency-name: github.com/aws/aws-sdk-go
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2023-03-17 16:22:08 +00:00
eNV25
0ea2ce3674 cmd/ncdu: fix screen corruption when logging
Before this change if logs were not redirected, logging would
corrupt the terminal screen.

This commit stores the logs (max ~100 lines) in an array and
print them when the program exits.
2023-03-17 14:52:34 +00:00
dependabot[bot]
3ddf824251 build(deps): bump actions/setup-go from 3 to 4
Bumps [actions/setup-go](https://github.com/actions/setup-go) from 3 to 4.
- [Release notes](https://github.com/actions/setup-go/releases)
- [Commits](https://github.com/actions/setup-go/compare/v3...v4)

---
updated-dependencies:
- dependency-name: actions/setup-go
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
2023-03-17 14:41:49 +00:00
Nick Craig-Wood
68fdff3c27 build: ensure users with no secrets (dependabot) don't run android upload step 2023-03-17 14:04:46 +00:00
Nick Craig-Wood
c003485ae3 build: ensure users with no secrets (dependabot) don't run deploy step 2023-03-17 13:49:11 +00:00
Nick Craig-Wood
99d5080191 Add Richard Tweed to contributors 2023-03-17 13:49:11 +00:00
alankrit
2ad217eedd librclone:Added example on using librclone with golang. 2023-03-17 12:00:27 +00:00
albertony
a3eb7f1142 jottacloud: fix vfs writeback stuck in a failed upload loop with file versioning disabled
Avoid returning error when option no_versions and remove fail

Fixes #6857
2023-03-17 11:54:43 +00:00
Richard Tweed
6d620b6d88 build: update docker beta build to latest actions and to push to ghcr
* Add ghcr option for docker images
* Update to use the upstream build actions
* Add ability to push beta images manually.
2023-03-17 11:54:01 +00:00
Arnav Singh
9f8357ada7 sftp: fix using key_use_agent and key_file together needing private key file
When using ssh-agent to hold multiple keys, it is common practice to configure
openssh to use a specific key by setting the corresponding public key as
the `IdentityFile`. This change makes a similar behavior possible in rclone
by having it parse the `key_file` config as the public key when
`key_use_agent` is `true`.

rclone already attempted this behavior before this change, but it assumed that
`key_file` is the private key and that the public key is specified in
`${key_file}.pub`. So for parity with the openssh behavior, this change makes
rclone first attempt to read the public key from `${key_file}.pub` as before
(for the sake of backward compatibility), then fall back to reading it from
`key_file`.

Fixes #6791
2023-03-17 11:44:19 +00:00
Nick Craig-Wood
e5a1bcb1ce s3: fix InvalidRequest copying to a locked bucket from a source with no MD5SUM
Before this change, we would upload files as single part uploads even
if the source MD5SUM was not available.

AWS won't let you upload a file to a locket bucket without some sort
of hash protection of the upload which we don't have with no MD5SUM.

So we switch to multipart upload when the source does not have an
MD5SUM.

This means that if --s3-disable-checksum is set or we are copying from
a source with no MD5SUMs we will copy with multipart uploads.

This patch changes all uploads, not just those to locked buckets
because having no MD5SUM protection on uploads is undesirable.

Fixes #6846
2023-03-17 11:34:20 +00:00
Nick Craig-Wood
46484022b0 fs: add size to JSON logs when moving or copying an object #6849 2023-03-17 11:22:57 +00:00
Nick Craig-Wood
ab746ef891 Add Thibault Coupin to contributors 2023-03-17 11:22:57 +00:00
Paul
6241c1ae43 Add devnoname120 to contributors 2023-03-17 11:09:08 +00:00
Paul
0f8d3fe6a3 webdav: add support for chunked uploads — fix #3666
Co-authored-by: Thibault Coupin <thibault.coupin@gmail.com>
Co-authored-by: Nick Craig-Wood <nick@craig-wood.com>
2023-03-17 11:09:08 +00:00
Paul
07afb9e700 webdav: add chunking helper file 2023-03-17 11:09:08 +00:00
Thibault Coupin
3165093feb fstests: add option to skip chunked upload 2023-03-17 11:09:08 +00:00
Paul
4af0c1d902 rest: add optional GetBody function for HTTP call 2023-03-17 11:09:08 +00:00
Nick Craig-Wood
82f9554474 docs: note that rcat will retry chunks when multipart uploading
See: https://forum.rclone.org/t/the-rclone-rcat-reliability-for-the-uploading-files-to-s3/36830
2023-03-17 10:52:21 +00:00
Nick Craig-Wood
d8d53b7aa0 Add Christopher Merry to contributors 2023-03-17 10:52:21 +00:00
Nick Craig-Wood
8c9048259a Add Arnavion to contributors 2023-03-17 10:52:21 +00:00
Christopher Merry
0361acbde4 googlecloudstorage: added gcs requester pays 2023-03-16 17:13:37 +00:00
Aaron Gokaslan
f5bf0a48f3 uptobox: fix improper regex 2023-03-16 17:12:27 +00:00
albertony
cec843dd8c build: run workflow even if tag/branch name contains slash 2023-03-16 17:07:07 +00:00
Anthony Pessy
54a9488e59 s3: add GCS to provider list 2023-03-16 14:24:21 +00:00
Arnavion
29fe0177bd webdav: add "fastmail" provider for Fastmail Files
This provider:

- supports the `X-OC-Mtime` header to set the mtime

- calculates SHA1 checksum server side and returns it as a `ME:sha1hex` prop

To differentiate the new hasMESHA1 quirk, the existing hasMD5 and hasSHA1
quirks for Owncloud have been renamed to hasOCMD5 and hasOCSHA1.

Fixes #6837
2023-03-16 14:20:29 +00:00
Nick Craig-Wood
0e134364ac Changelog updates from Version v1.62.2 2023-03-16 12:00:06 +00:00
Lesmiscore
0d8350d95d ftp: fix 426 errors on downloads with vsftpd
Sometimes vsftpd returns a 426 error when closing the stream even when
all the data has been transferred successfully. This is some TLS
protocol mismatch.

Rclone has code to deal with this already, but the error returned from
Close was wrapped in a multierror so the detection didn't work.

This properly extract `textproto.Error` from the errors returned by
`github.com/jlaffaye/ftp` in all the cases.

See: https://forum.rclone.org/t/vsftpd-vs-rclone-part-2/36774
2023-03-15 18:09:29 +00:00
asdffdsazqqq
497e373e31 docs: fix size documentation
change `Google Drive` to `Google Docs`
2023-03-15 16:21:37 +00:00
Nick Craig-Wood
ed8fea4aa5 docker volume plugin: add missing fuse3 dependency #6844 2023-03-15 15:57:53 +00:00
Nick Craig-Wood
4d7f75dd76 Changelog updates from Version v1.62.1 2023-03-15 14:53:21 +00:00
Nick Craig-Wood
53e757aea9 build: update release docs to be more careful with the tag 2023-03-15 14:53:21 +00:00
Nick Craig-Wood
f578896745 Set Github release to draft while uploading binaries 2023-03-15 14:53:21 +00:00
Nick Craig-Wood
13be03cb86 Add cycneuramus to contributors 2023-03-15 14:53:21 +00:00
cycneuramus
864e02409e docker: add missing fuse3 dependency - fixes #6844 2023-03-15 10:54:30 +00:00
Nick Craig-Wood
fccc779a15 Start v1.63.0-DEV development 2023-03-14 15:18:54 +00:00
Nick Craig-Wood
77c7077458 Version v1.62.0 2023-03-14 12:42:23 +00:00
Nick Craig-Wood
ffd4ab222c docs: add idrive e2 as a major sponsor 2023-03-14 12:37:34 +00:00
Nick Craig-Wood
676277e255 docs: move FUSE-T docs from auto generated file to source file
Docs commited in wrong place in

c0a5283416 docs: rclone mount on macOS with macFUSE and  FUSE-T
2023-03-14 12:37:34 +00:00
Justin Winokur
c0a5283416 docs: rclone mount on macOS with macFUSE and FUSE-T 2023-03-13 10:55:39 +00:00
Nick Craig-Wood
e405ca7733 vfs: make uploaded files retain modtime with non-modtime backends
Before this change if a file was uploaded to a backend which didn't
support modtimes, the time of the file read after the upload had
completed would change to the time the file was uploaded on the
backend.

When using `--vfs-cache-mode writes` or `full` this time would be
different by the `--vfs-write-back` delay which would cause
applications to think the file had been modified.

This changes uses the last modification time read by the OS as a
virtual modtime for backends which don't support setting modtimes. It
does not change the modtime to that actually uploaded.

This means that as long as the file remains in the directory cache it
will have the expected modtime.

See: https://forum.rclone.org/t/saving-files-causes-wrong-modified-time-to-be-set-for-a-few-seconds-on-webdav-mount-with-bitrix24/36451
2023-03-10 15:00:01 +00:00
Nick Craig-Wood
580d72f0f6 operations: skip --max-delete tests on chunker integration tests
The recent changes to remove race conditions from --max-delete have
made these tests fail on chunker with s3 because they do copy then
delete and the deletes are being counted in the --max-delete(-size)
counts.
2023-03-10 12:13:44 +00:00
Nick Craig-Wood
22daeaa6f3 build: update dependencies
This fixes the azureblob backend so it builds again after the SDK
changes.

This doesn't update bazil.org/fuse because it doesn't build on FreeBSD

https://github.com/bazil/fuse/issues/295
2023-03-10 11:15:07 +00:00
Nick Craig-Wood
ca9ad7935a Add dependabot[bot] to contributors 2023-03-10 11:15:07 +00:00
Nick Craig-Wood
dd6e229327 move: if --check-first and --order-by are set then delete with perfect ordering
If using rclone move and --check-first and --order-by then rclone uses
the transfer routine to delete files to ensure perfect ordering.

This will cause the transfer stats to have a larger than expected
number of items in it so we don't enable this by default.

Fixes #6033
2023-03-10 08:23:32 +00:00
dependabot[bot]
4edcd16f5f build(deps): bump github.com/gdamore/tcell/v2 from 2.5.4 to 2.6.0
Bumps [github.com/gdamore/tcell/v2](https://github.com/gdamore/tcell) from 2.5.4 to 2.6.0.
- [Release notes](https://github.com/gdamore/tcell/releases)
- [Changelog](https://github.com/gdamore/tcell/blob/main/CHANGESv2.md)
- [Commits](https://github.com/gdamore/tcell/compare/v2.5.4...v2.6.0)

---
updated-dependencies:
- dependency-name: github.com/gdamore/tcell/v2
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2023-03-09 18:38:47 +00:00
dependabot[bot]
534e3acd06 build(deps): bump github.com/iguanesolutions/go-systemd/v5
Bumps [github.com/iguanesolutions/go-systemd/v5](https://github.com/iguanesolutions/go-systemd) from 5.1.0 to 5.1.1.
- [Release notes](https://github.com/iguanesolutions/go-systemd/releases)
- [Commits](https://github.com/iguanesolutions/go-systemd/compare/v5.1.0...v5.1.1)

---
updated-dependencies:
- dependency-name: github.com/iguanesolutions/go-systemd/v5
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2023-03-09 18:38:04 +00:00
dependabot[bot]
cf75ddabd3 build(deps): bump golang.org/x/term from 0.5.0 to 0.6.0
Bumps [golang.org/x/term](https://github.com/golang/term) from 0.5.0 to 0.6.0.
- [Release notes](https://github.com/golang/term/releases)
- [Commits](https://github.com/golang/term/compare/v0.5.0...v0.6.0)

---
updated-dependencies:
- dependency-name: golang.org/x/term
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2023-03-09 18:37:23 +00:00
dependabot[bot]
6edcacf932 build(deps): bump github.com/Azure/azure-sdk-for-go/sdk/azidentity
Bumps [github.com/Azure/azure-sdk-for-go/sdk/azidentity](https://github.com/Azure/azure-sdk-for-go) from 1.2.0 to 1.2.2.
- [Release notes](https://github.com/Azure/azure-sdk-for-go/releases)
- [Changelog](https://github.com/Azure/azure-sdk-for-go/blob/sdk/azidentity/v1.2.2/CHANGELOG.md)
- [Commits](https://github.com/Azure/azure-sdk-for-go/compare/v1.2...sdk/azidentity/v1.2.2)

---
updated-dependencies:
- dependency-name: github.com/Azure/azure-sdk-for-go/sdk/azidentity
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2023-03-09 18:36:23 +00:00
dependabot[bot]
51506a7ccd build(deps): bump github.com/Azure/azure-sdk-for-go/sdk/azcore
Bumps [github.com/Azure/azure-sdk-for-go/sdk/azcore](https://github.com/Azure/azure-sdk-for-go) from 1.3.0 to 1.4.0.
- [Release notes](https://github.com/Azure/azure-sdk-for-go/releases)
- [Changelog](https://github.com/Azure/azure-sdk-for-go/blob/main/documentation/release.md)
- [Commits](https://github.com/Azure/azure-sdk-for-go/compare/sdk/azcore/v1.3.0...sdk/azcore/v1.4.0)

---
updated-dependencies:
- dependency-name: github.com/Azure/azure-sdk-for-go/sdk/azcore
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2023-03-09 18:33:40 +00:00
Ryan Caezar Itang
a50fd2a2a2 ci: add dependabot 2023-03-09 15:05:15 +00:00
Ryan Caezar Itang
efac7e18fb ci: add winget releaser workflow 2023-03-09 14:56:37 +00:00
Ryan Caezar Itang
02dd8eacea docs: add winget installation method 2023-03-09 14:56:37 +00:00
Nick Craig-Wood
e2984227bb fs: fix race conditions in --max-delete and --max-delete-size 2023-03-09 09:25:31 +00:00
Nick Craig-Wood
a35ee30d9f Add Leandro Sacchet to contributors 2023-03-09 09:25:31 +00:00
Leandro Sacchet
f689db4422 fs: Add --max-delete-size a delete size threshold
Fixes #3329
2023-03-08 17:12:31 +00:00
Nick Craig-Wood
fb4600f6f9 tree: fix display of files with illegal Windows file system names
Before this change, files with illegal Windows names (eg those
containing \) would not be displayed properly in tree.

This change adds the local encoding to the Windows file names so \
will be displayed as its wide unicode equivalent.

See: https://forum.rclone.org/t/error-with-build-v1-61-1-tree-command-panic-runtime-error-invalid-memory-address-or-nil-pointer-dereference/35922/
2023-03-07 15:30:11 +00:00
Nick Craig-Wood
1d0c75b0c2 ftp: retry errors when initiating downloads
This adds a retry loop to the Open() call in the FTP server so it can
retry failures opening files.

This should make downloading multipart files more reliable.

See: https://forum.rclone.org/t/downloads-fail-from-remote-server-error-426-failure-writing-network-stream/33839/
2023-03-07 12:34:20 +00:00
Nick Craig-Wood
2e435af4de ftp: retry 426 errors
Before this change we didn't retry 426 errors which are

    426 Connection closed; transfer aborted.

Or in this particular case

    426 Failure writing network stream.

These seem like they might be temporary so retry them.

See: https://forum.rclone.org/t/downloads-fail-from-remote-server-error-426-failure-writing-network-stream/33839/
2023-03-07 12:34:20 +00:00
Nick Craig-Wood
62a7765e57 smb: allow SPN (service principal name) to be configured
This enables connection to clusters.

Fixes #6515
2023-03-07 12:18:32 +00:00
Nick Craig-Wood
5ad942ed87 local: fix exclusion of dangling symlinks with -L/--copy-links
Before this fix, a dangling symlink was erroring the sync. It was
writing an ERROR log and causing rclone to exit with an error. The
List method wasn't returning an error though.

This fix makes sure that we don't log or report a global error on a
file/directory that has been excluded.

This feature was first implemented in:

a61d219bc local: fix -L/--copy-links with filters missing directories

Then fixed in:

8d1fff9a8 local: obey file filters in listing to fix errors on excluded files

This commit also adds test cases for the failure modes of those commits.

See #6376
2023-03-07 12:15:10 +00:00
Nick Craig-Wood
96609e3d6e ftp: revert to upstream github.com/jlaffaye/ftp now fix is merged
This reverts to using the upstream now the patch to fix hang when
using ExplicitTLS to certain servers is merged.

Fixes #6426
2023-03-07 12:12:07 +00:00
Nick Craig-Wood
28a8ebce5b vfs: fix rename of directory containing files to be uploaded
Before this change, if you renamed a directory containg files yet to
be uploaded then deleted the directory the files would still be
uploaded.

This fixes the problem by changing the directory path in all the file
objects in a directory when it is renamed. This wasn't necessary until
we introduced virtual files and directories which lived beyond the
directory flush mechanism.

Fixes #6809
2023-03-07 11:40:50 +00:00
Nick Craig-Wood
17854663de vfs: log size of File and Dir in tests for optimization 2023-03-07 11:40:50 +00:00
Nick Craig-Wood
a4a6b5930a Add Peter Brunner to contributors 2023-03-07 11:40:50 +00:00
Nick Craig-Wood
e9ae620844 Add Ryan Caezar Itang to contributors 2023-03-07 11:40:50 +00:00
Nick Craig-Wood
e7cfb8ad8e Add Ninh Pham to contributors 2023-03-07 11:40:50 +00:00
Nick Craig-Wood
786a1c212c Add Peter Brunner to contributors 2023-03-07 11:40:50 +00:00
Peter Brunner
03bc270730 gcs: fix google cloud storage provider help 2023-03-07 11:39:02 +00:00
Ryan Caezar Itang
7cef042231 docs: add scoop installation method 2023-03-07 11:36:07 +00:00
Ninh Pham
1155cc0d3f drive: Make --drive-stop-on-upload-limit to respond to storageQuotaExceeded
Before this change, if a "--drive-stop-on-upload-limit" was set,
rclone would not stop the upload if a "storageQuotaExceeded" error occurred.

This fix now checks for the "storageQuotaExceeded" error
and "--drive-stop-on-upload-limit", and fails fast.
2023-03-07 11:00:08 +00:00
Peter Brunner
13c3f67ab0 gcs: add env_auth to pick up IAM credentials from env/instance
This change provides the ability to pass `env_auth` as a parameter to
the google cloud storage provider. This enables the provider to pull IAM
credentials from the environment or instance metadata. Previously if no
auth method was given it would default to requesting oauth.
2023-03-06 18:18:33 +00:00
Nick Craig-Wood
ab2cdd840f serve ftp: fix timestamps older than 1 year in listings
Fixes #6785
2023-03-06 15:59:56 +00:00
Nick Craig-Wood
143285e2b7 vfs: fix incorrect modtime on fs which don't support setting modtime
Before this change we were using the Precision literally to round the
precision of the mod times.

However fs.ModTimeNotSupported is 100y on backends which don't support
setting modtimes so rounding to 100y was producing very strange
results.

See: https://forum.rclone.org/t/saving-files-causes-wrong-modified-time-to-be-set-for-a-few-seconds-on-webdav-mount-with-bitrix24/36451/
2023-03-06 10:54:21 +00:00
Nick Craig-Wood
19e8c8d42a s3: make purge remove directory markers too
See: https://forum.rclone.org/t/cannot-purge-aws-s3/36169/
2023-03-03 15:51:00 +00:00
Nick Craig-Wood
de9c4a3611 s3: use bucket.Join instead of path.Join to preserve paths
Before this change, path.Join would remove the trailing / from objects
which had them. The simplified bucket.Join does not.
2023-03-03 15:51:00 +00:00
Nick Craig-Wood
d7ad13d929 bucket: add Join function for a simplified path.Join 2023-03-03 15:51:00 +00:00
albertony
f9d50f677d lib/terminal: enable windows console virtual terminal sequences processing (ANSI/VT100 colors)
This ensures the virtual terminal processing mode is enabled on the rclone process
for Windows 10 consoles (by using Windows Console API functions GetConsoleMode/SetConsoleMode
and flag ENABLE_VIRTUAL_TERMINAL_PROCESSING), which adds native support for ANSI/VT100
escape sequences. This mode is default in many cases, e.g. when using the Windows
Terminal application, but in other cases it is not, and the default can also be
controlled with registry setting (see below), and therefore configuring it on the process
seem to be the only reliable way of ensuring it is enabled when supported.

[HKEY_CURRENT_USER\Console]
"VirtualTerminalLevel"=dword:00000001
2023-03-03 12:37:01 +01:00
albertony
3641993fab tree: fix colored output on windows
Since rclone version 1.61.0 the tree command uses ANSI color sequences in output by
default, but this lead to issues in Windows terminals that were not handling these (#6668).

This commit ensures the tree command uses the terminal package for output. It relies on
go-colorable to properly handle ANSI color sequences: If stdout is connected to a terminal
the escape sequences are decoded and the text are written with color formatting using
Windows Console API. If stdout is not connected to a terminal, e.g. redirected to file,
the escape sequences are stripped off. The tree command has its own method for writing
directly to a file, specified with flag --output, and then the output is not passed
through the terminal package and must therefore be written without ansi codes.
2023-03-03 12:37:01 +01:00
Nick Craig-Wood
93d3ae04c7 deletefile: return error code 4 if file does not exist
Before this change `rclone deletefile` would return error code 1 if
the file it was trying to delete does not exist.

Rclone can't actually tell at this point whether the file doesn't
exist or what you tried to delete is a directory, but it seems more
logical to return error code 4 "object not found" here.

See: https://forum.rclone.org/t/rclone-deletefile-cmd-return-exit-code-1-when-file-not-found-in-remote-why-1-and-not-exit-code-4/
2023-03-03 09:51:23 +00:00
Nick Craig-Wood
e25e9fbf22 Add NodudeWasTaken to contributors 2023-03-03 09:51:23 +00:00
NodudeWasTaken
fe26d6116d mega: add --mega-use-https flag
Some ISPs throttle HTTP which MEGA uses by default, so some users may find using HTTPS beneficial.
2023-03-02 20:28:10 +00:00
Fred
06e1e18793 seafile: fix for flaky tests #6799 2023-03-02 20:03:25 +00:00
Nick Craig-Wood
23d17b76be onedrive: default onedrive personal to QuickXorHash
Before this change the hash used for Onedrive Personal was SHA1. From
July 2023 Microsoft is phasing out SHA1 hashes in favour of
QuickXorHash in Onedrive Personal. Onedrive Business and Sharepoint
remain using QuickXorHash as before.

This choice can be changed using the --onedrive-hash-type flag (and
config option) so that SHA1 can be selected while it is still
available in the transition period.

See: https://forum.rclone.org/t/microsoft-is-switching-onedrive-personal-to-quickxorhash-from-sha1/36296/
2023-03-02 19:32:35 +00:00
Nick Craig-Wood
dfe4e78a77 onedrive: add --onedrive-hash-type to change the hash in use
In preparation for Microsoft removing the SHA1 hash on OneDrive
Personal this allows the hash type to be set on OneDrive.

See: https://forum.rclone.org/t/microsoft-is-switching-onedrive-personal-to-quickxorhash-from-sha1/36296/
2023-03-02 19:32:35 +00:00
Nick Craig-Wood
59e7982040 s3: add --s3-sts-endpoint to specify STS endpoint
See: https://forum.rclone.org/t/s3-profile-failing-when-explicit-s3-endpoint-is-present/36063/
2023-03-02 09:56:09 +00:00
Nick Craig-Wood
c6b0587dc0 s3: fix AWS STS failing if --s3-endpoint is set
Before this change if an --s3-profile was set which used AWS STS (eg
to assume a role) and --s3-endpoint was set then rclone would use the
value from --s3-endpoint to contact the STS server which did not work.

This fix implements an endpoint resolver which only overrides the "s3"
service if --s3-endpoint is set. It sends the "sts" service (and any
other service) to the default resolver.

Fixes #6443
See: https://forum.rclone.org/t/s3-profile-failing-when-explicit-s3-endpoint-is-present/36063/
2023-03-01 16:24:40 +00:00
Nick Craig-Wood
9baa4d1c3c accounting: show checking tag if available even on transfers 2023-03-01 11:10:38 +00:00
Nick Craig-Wood
a5390dbbeb sync,operations: fix correct concurrency: use --checkers unless transferring files
There were some places (e.g. deleting files) where we were using
--transfers instead of --checkers to control the concurrency when
files weren't being transferred.

These have been updated to use --checkers.
2023-03-01 11:10:38 +00:00
Nick Craig-Wood
019a486d5b accounting: Make checkers show what they are doing
Before this change, all types of checkers showed "checking" after the
file name despite the fact that not all of them were checking.

After this change, they can show

- checking
- deleting
- hashing
- importing
- listing
- merging
- moving
- renaming

See: https://forum.rclone.org/t/what-is-rclone-checking-during-a-purge/35931/
2023-03-01 11:10:38 +00:00
Nick Craig-Wood
34ce11d2be Add ToBeFree to contributors 2023-03-01 11:10:38 +00:00
Nick Craig-Wood
88e8ede0aa Add Gerard Bosch to contributors 2023-03-01 11:10:38 +00:00
Nick Craig-Wood
f6f250c507 Add logopk to contributors 2023-03-01 11:10:38 +00:00
Nick Craig-Wood
2c45e901f0 Add Hunter Wittenborn to contributors 2023-03-01 11:10:38 +00:00
ToBeFree
9e1443799a docs: crypt: fix typo 2023-02-28 11:50:53 +00:00
Gerard Bosch
dd72aff98a docs: bisync: clarification of --resync 2023-02-28 11:47:28 +00:00
logopk
5039f9be48 docker: fix volume plugin does not remount volume on docker restart
docker volume plugin restoreState: skip fs option if empty

Fixes #6769
Co-authored-by: Peter Kreuser <logo@kreuser.name>
2023-02-28 11:29:07 +00:00
Hunter Wittenborn
56b582cdb9 authorize: add support for custom templates
This adds support for providing custom Go templates for use in the
`rclone authorize` command.

Fixes #6741
2023-02-24 15:08:38 +00:00
Aaron Gokaslan
745c0af571 all: Apply codeql fixes 2023-02-23 10:31:51 +00:00
Nick Craig-Wood
2dabbe83ac serve http: tests for --auth-proxy 2023-02-23 10:28:13 +00:00
Nick Craig-Wood
90561176fb Add Matthias Baur to contributors 2023-02-23 10:28:13 +00:00
Matthias Baur
a0b5d77427 serve http: support --auth-proxy 2023-02-22 14:55:24 +00:00
Manoj Ghosh
ce8b1cd861 oracle-object-storage: bring your own encryption keys 2023-02-21 14:45:02 +00:00
Manoj Ghosh
5bd6e3d1e9 fix vulnerablities: upgrade golang.org/x/net@v0.5.0 to golang.org/x/net@v0.7.0 2023-02-21 10:11:16 +00:00
Nick Craig-Wood
d4d7a6a55e sftp: fix uploads being 65% slower than they should be with crypt
The block size for crypt is 64k + a few bytes. The default block size
for sftp is 32k. This means that the blocks for crypt get split over 3
sftp packets two of 32k and one of a few bytes.

However due to a bug in pkg/sftp it was sending 32k instead of just a
few bytes, leading to the 65% slowdown.

This was fixed in the upstream library.

This bug probably affected transfers from over the network sources
also.

Fixes #6763
See: https://github.com/pkg/sftp/pull/537
2023-02-14 15:47:19 +00:00
Nick Craig-Wood
b3e0672535 s3: Check multipart upload ETag when --s3-no-head is in use
Before this change if --s3-no-head was in use rclone didn't check the
multipart upload ETag at all. However the ETag is returned in the
final POST request when completing the object.

This change uses that ETag from the final POST if --s3-no-head is in
use, otherwise it uses the ETag from a fresh HEAD request.

See: https://forum.rclone.org/t/in-some-cases-rclone-does-not-use-etag-to-verify-files/36095/
2023-02-14 12:04:28 +00:00
Nick Craig-Wood
a407437e92 Add Simmon Li (he/him) to contributors 2023-02-14 12:04:28 +00:00
Manoj Ghosh
0164a4e686 add more documentation around oci authentication ways 2023-02-14 11:58:38 +00:00
Simmon Li (he/him)
b8ea79042c docs: drive: make clear "testing" apps have short token grant time 2023-02-13 14:30:20 +00:00
albertony
49a6533bc1 docs/mount: improve explanation of windows filesystem permissions 2023-02-10 23:21:33 +01:00
Nick Craig-Wood
21459f3cc0 tree: fix nil pointer exception on stat failure
This fixes the crash by updating the upstream.

See: https://forum.rclone.org/t/error-with-build-v1-61-1-tree-command-panic-runtime-error-invalid-memory-address-or-nil-pointer-dereference/35922/
See: https://github.com/a8m/tree/pull/21
2023-02-08 16:21:25 +00:00
albertony
04f7e52803 accounting: show human readable elapsed time when longer than a day - fixes #6748 2023-02-06 15:02:03 +01:00
Kaloyan Raev
25535e5eac storj: update satellite urls and labels
The docs and setup wizard still contained deprecated URLs and labels of
Storj satellites. This change updates them.
2023-02-06 13:18:15 +00:00
Nick Craig-Wood
c37b6b1a43 cache: fix lint error in latest golangci-lint 2023-02-06 10:44:40 +00:00
albertony
0328878e46 accounting: limit length of ETA string
No need to report hours, minutes, and even seconds when the
ETA is several years, e.g. "292y24w3d23h47m16s". Now only
reports the 3 most significant units, sacrificing precision,
e.g. "292y24w3d", "24w3d23h", "3d23h47m", "23h47m16s".

Fixes #6381
2023-02-04 17:29:08 +01:00
albertony
67132ecaec accounting: avoid negative ETA values for very slow speeds
Integer overflow would lead to ETA such as "-255y7w4h11m22s966ms",
as reported in #6381. Now the value will be clipped at the maximum
"292y24w3d23h47m16s", and it will be shown as infinity.
2023-02-04 17:29:08 +01:00
albertony
120cfcde70 install.sh: fix arm-v6 download 2023-02-04 13:32:26 +01:00
albertony
37db2a0e44 selfupdate: consider arm version 2023-02-04 13:32:26 +01:00
albertony
f92816899c version: report arm version 2023-02-04 13:32:26 +01:00
albertony
5386ffc8f2 build: correct building for ARMv5 and ARMv6
Explicitly set ARM version in GOARM build variable, to avoid relying
on some default value which differs when compiling natively and when
cross-compiling, and which is also incorrectly documented as being
6 when in reality it is 5.

Fix incorrect labelling of ARMv5 builds as ARMv6, and change
architecture of .rpm and .deb packages containing them to
match.

Add ARMv6 builds, to complement existing ARMv5 and ARMv7, and to
reduce disruption due to previous ARMv5 builds incorrectly being
identified as ARMv6, and to provide .rpm and .deb packages with the
same ARMv6 architectures as was previously also published
(then containing ARMv5 binaries).

See #6528

Background info:

https://github.com/golang/go/wiki/GoArm
https://go.dev/doc/install/source#environment
661e931dd1/src/cmd/dist/build.go (L140-L144)
661e931dd1/src/cmd/dist/util.go (L392-L422)
2023-02-04 13:32:26 +01:00
Anagh Kumar Baranwal
3898d534f3 build: update to go1.20
Signed-off-by: Anagh Kumar Baranwal <6824881+darthShadow@users.noreply.github.com>
2023-02-03 20:15:15 +00:00
Ole Frost
34333d9fa8 docs: added troubleshooting tips for Live Photos in OneDrive 2023-02-03 16:24:30 +00:00
Ole Frost
14e852ee9d s3: fix incorrect tier support for StorJ and IDrive when pointing at a file
Fixes #6734
2023-02-02 18:12:00 +00:00
albertony
37623732c6 build: avoid running workflow twice for pull requests with branch on main repo 2023-02-01 16:47:38 +01:00
Nick Craig-Wood
adbcc83fa5 filter: emit INFO message when can't work out directory filters
See: https://forum.rclone.org/t/rclone-scans-unwanted-folder/34437
2023-02-01 14:21:45 +00:00
Nick Craig-Wood
d4ea6632ca drive: note that --drive-acknowledge-abuse needs SA Manager permission
See: https://github.com/rclone/rclone/issues/2338#issuecomment-762820600
See: https://forum.rclone.org/t/bisync-already-add-drive-acknowledge-abuse-still-got-critical-error-cannotdownloadabusivefile/35604/
2023-02-01 12:11:46 +00:00
Nick Craig-Wood
21849fd0d9 webdav: fix interop with davrods server
The davrods server returns URLS with a double / in and the // confuses
rclone into thinking these files are in a directory called "".

The fix removes leading /s from the directory listing names.

See: https://forum.rclone.org/t/upload-to-webdav-does-not-check-if-files-already-exist/35756/
2023-02-01 12:00:25 +00:00
Nick Craig-Wood
ac20ee41ca Add happyxhw to contributors 2023-02-01 12:00:25 +00:00
happyxhw
d376fb1df2 smb: check smb connection is closed - fixes #6735 2023-02-01 08:25:25 +01:00
Nick Craig-Wood
8e63a08d7f docs: note that we have test Android builds 2023-01-31 14:11:50 +00:00
Nick Craig-Wood
3aee5b3c55 Add Simmon Li (he/him) to contributors 2023-01-31 14:11:50 +00:00
Nick Craig-Wood
0145d98314 Add LXY to contributors 2023-01-31 14:11:50 +00:00
Nick Craig-Wood
4c03c71a5f Add Bryan Kaplan to contributors 2023-01-31 14:11:50 +00:00
Simmon Li (he/him)
82e2801aae update drive.md
* Updates OAuth consent screen instructions to include adding scopes for backup purposes (create, edit and delete files).
* Updates instructions to keep app in testing mode (appropriate for most users). The previous instructions suggested this, but we don't need to "publish" the app at all in order to proceed with this step.
2023-01-27 15:25:17 +00:00
LXY
dc5d5de35c onedrive: improve speed of quickxorhash
This commits ports a fast C-implementation from https://github.com/namazso/QuickXorHash

It uses new crypto/subtle code from go1.20 to avoid the use of unsafe.

Typical speedups are about 25x  when using go1.20

    goos: linux
    goarch: amd64
    cpu: Intel(R) Celeron(R) N5105 @ 2.00GHz
    QuickXorHash-Before  2.49ms   422MB/s ±11%   100.00%
    QuickXorHash-Subtle  87.9µs 11932MB/s ± 5% +2730.83% + 42.17%

Co-Author: @namazso
2023-01-26 11:50:12 +00:00
Bryan Kaplan
41cc4530f3 docs: Improve bisync check-access & check-filename
This commit documents my learnings after having encountered a failure
I reported in the rclone forum[0].

I may be a fool for having failed to understand the previous
documentation, but I am likely not the only fool to get snared by it.

This commit therefore adds details to clarify what the user must do in
order to allow `--check-access` to succeed.

While at it, I've also added some basic documentation for `--check-filename`.

[0]: https://forum.rclone.org/t/bisync-check-file-check-failed/35682
2023-01-26 11:10:01 +00:00
albertony
c5acb10151 fspath: allow the symbols at and plus in remote names - fixes #6710 2023-01-25 13:37:24 +01:00
Manoj Ghosh
8c8ee9905c oracleobjectstorage: speed up operations by using S3 pacer and setting minsleep to 10ms
Uploading 100 files of each 1 MB took 20 seconds before. With above fix it takes around 2 seconds now.

10x time improvement in line with pacer's sleep reduction from 100ms to 10ms
2023-01-25 10:48:16 +00:00
albertony
e2afd00118 mount: avoid incorrect or premature overlap check on windows
See: #6234
2023-01-24 22:27:02 +01:00
albertony
5b82576dbf build: fix condition for manual workflow run
See #5275
2023-01-24 20:46:33 +01:00
albertony
b9d9f9edb0 docs: use --interactive instead of -i in examples to avoid confusion 2023-01-24 20:43:51 +01:00
Bryan Kaplan
c40b706186 docs: Fix link in bisync doc
This commit fixes the `#check-access` anchor link in the bisync.md document.

`#check-access-option` does not exist in bisync.md; `#check-access` does.
2023-01-24 09:16:43 +01:00
Nick Craig-Wood
351fc609b1 b2: fix uploading files bigger than 1TiB
Before this change when uploading files bigger than 1TiB, the chunk
calculator would work out that the chunk size needed to be bigger than
the default 100 MiB to fit within the 10,000 parts limit.

However the uploader was still using the memory pool for the old chunk
size and this caused errors like

    panic: runtime error: slice bounds out of range [:122683392] with capacity 100663296

The fix for this is to make a temporary pool with the larger chunk
size and use it during the upload of the large file.

See: https://forum.rclone.org/t/rclone-cannot-complete-upload-to-b2-restarts-upload-frequently/35617/
2023-01-22 12:46:23 +00:00
Nick Craig-Wood
a6f6a9dcdf mount,mount2,cmount: fix --allow-non-empty #3562
Since version 3 of fuse libfuse no longer does anything when given the
nonempty option and it's default is to allow mounting over non empty
directories like normal mount does.

Some versions of libfuse give an error when using `--allow-non-empty`
which is annoying for the user.

We now do this check ourselves so we no longer need to pass the option
to libfuse.

Fixes #3562
2023-01-20 15:39:54 +00:00
Nick Craig-Wood
267a09001d mount: fix check for empty mount point on Linux #3562 2023-01-20 15:39:54 +00:00
Nick Craig-Wood
37db2abecd Add alankrit to contributors 2023-01-20 15:39:49 +00:00
albertony
0272d44192 mount: do not treat \\?\ prefixed paths as network share paths on windows
See: #6234
2023-01-20 15:40:03 +01:00
alankrit
6b17044f8e fs:Added multiple ca certificate support. 2023-01-17 12:16:11 +00:00
Nick Craig-Wood
844e8fb8bd lib/errors: add support for unwrapping go1.20 multi errors 2023-01-17 11:35:19 +00:00
Nick Craig-Wood
ca9182d6ae Add IMTheNachoMan to contributors 2023-01-17 11:35:19 +00:00
IMTheNachoMan
ec20c48523 googlephotos: fix grammar in docs (#6699) 2023-01-16 13:40:30 +01:00
Nick Craig-Wood
ec68b72387 lib/file: fix error message test after go1.20 upgrade 2023-01-16 11:19:16 +00:00
Nick Craig-Wood
2d1c2725e4 webdav: fix tests after go1.20 upgrade
Before this change we were sending webdav requests to the go http
FileServer. In go1.20 these (rightly) started returning errors which
caused the tests to fail.

The test has been changed to properly mock up an About query and
response so an end to end test of adding headers is possible.
2023-01-16 11:19:16 +00:00
Nick Craig-Wood
1680c5af8f build: update to go1.20rc3 and make go1.17 the minimum required version 2023-01-16 11:19:16 +00:00
Nick Craig-Wood
88c0d78639 build: update to fuse3 after bazil.org/fuse update 2023-01-16 11:19:16 +00:00
Nick Craig-Wood
559157cb58 azureblob: remove workarounds for SDK bugs after v0.6.1 update 2023-01-16 11:19:16 +00:00
Nick Craig-Wood
10bf8a769e build: update dependencies
This fixes the azureblob backend so it builds again after the SDK
changes.
2023-01-16 11:19:16 +00:00
Fred
f31ab6d178 seafile: renew library password - fixes #6662
Passwords for encrypted libraries are kept in memory in the server
and flushed after an hour.
This MR fixes an issue when the library password expires after 1 hour.
2023-01-15 16:26:29 +00:00
Kaloyan Raev
f08bb5bf66 storj: implement purge 2023-01-15 16:23:49 +00:00
Manoj Ghosh
e2886aaddf oracle-object-storage: expose the storage_tier option in config 2023-01-15 16:20:55 +00:00
albertony
71227986db docs: remove link to nonexistent uploadfile command - fixes #6693 2023-01-12 20:13:02 +01:00
Nick Craig-Wood
8c6ff1fa7e cmount: fix creating and renaming files on case insensitive backends
Before this fix, we told cgofuse/WinFSP that the backend was case
insensitive but didn't implement the Getpath backend function to
return the normalised case of a file.

Resently cgofuse started implementing case insensitive files properly
but since we hadn't implemented Getpath, the file names were taking
the default of all in UPPER CASE.

This patch implements Getpath for cgofuse which fixes the case
problems.

This problem came to light when we upgraded cgofuse and WinFSP (to
1.12) which had the code to implement Getpath.

Fixes #6682
2023-01-11 17:21:57 +00:00
Nick Craig-Wood
9d1b786a39 Add Kaloyan Raev to contributors 2023-01-11 17:21:57 +00:00
Nick Craig-Wood
8ee0e2efb1 Add piyushgarg to contributors 2023-01-11 17:21:57 +00:00
Alex Chen
d66f5e8db0 lib/oauthutil: handle fatal errors better
PR #6678
2023-01-12 00:50:14 +08:00
Ole Frost
02d6d28ec4 crypt: fix for unencrypted directory names on case insensitive remotes
rclone sync erroneously deleted folders renamed to a different case on
crypts where directory name encryption was disabled and the underlying
remote was case insensitive.

Example: Renaming the folder Test to tEST before a sync to a crypt having
remote=OneDrive:crypt and directory_name_encryption=false could result in
the folder and all its content being deleted. The following sync would
correctly create the tEST folder and upload all of the content.

Additional tests have revealed other potential issues when using
filename_encryption=off or directory_name_encryption=false on case
insensitive remotes. The documentation has been updated to warn about
potential problems when using these combinations.
2023-01-11 16:32:40 +00:00
Kaloyan Raev
1cafc12e8c storj: implement public link 2023-01-10 17:40:04 +00:00
piyushgarg
98fa93f6d1 webdav: Document Mapping/Accessing WebDAV shares on windows.
Fixes #6596

Co-authored-by: Piyush <piyushgarg80>
2022-12-30 11:22:46 +00:00
albertony
c6c67a29eb Add Marks Polakovs to contributors 2022-12-26 18:39:49 +01:00
Marks Polakovs
ad5395e953 backend/local: fix %!w(<nil>) in "failed to read directory" error 2022-12-26 18:37:32 +01:00
Nick Craig-Wood
1925ceaade Changelog updates from Version v1.61.1 2022-12-23 18:26:56 +00:00
Nick Craig-Wood
8aebf12797 docs: fix unescaped HTML 2022-12-23 16:53:43 +00:00
Nick Craig-Wood
ffeefe8a56 crypt: obey --ignore-checksum
Before this change the crypt backend would calculate and check upload
checksums regardless of the setting of --ignore-checksum.
2022-12-23 16:52:19 +00:00
Nick Craig-Wood
81ce5e4961 docs: correct RELEASE procedure for stable branch 2022-12-23 12:34:04 +00:00
Nick Craig-Wood
638058ef91 lib/http: shutdown all servers on exit to remove unix socket
Before this change only serve http was Shutting down its server which
was causing other servers such as serve restic to leave behind their
unix sockets.

This change moves the finalisation to lib/http so all servers have it
and removes it from serve http.

Fixes #6648
2022-12-23 12:28:07 +00:00
Nick Craig-Wood
b1b62f70d3 serve webdav: fix running duplicate Serve call
Before this change we were starting the server twice for webdav which
is inefficient and causes problems at exit.
2022-12-23 12:28:07 +00:00
Nick Craig-Wood
823d89af9a serve restic: don't serve via http if serving via --stdio
Before this change, we started the http listener even if --stdio was
supplied.

This also moves the log message so the user won't see the serving via
HTTP message unless they are really using that.

Fixes #6646
2022-12-23 12:28:07 +00:00
Nick Craig-Wood
448fff9a04 serve restic: fix immediate exit when not using stdio
In the lib/http refactor

    52443c2444 restic: refactor to use lib/http

We forgot to serve the data and wait for the server to finish. This is
not tested in the unit tests as it is part of the command line
handler.

Fixes #6644 Fixes #6647
2022-12-23 12:28:07 +00:00
Nick Craig-Wood
6257a6035c serve webdav: fix --baseurl handling after lib/http refactor
The webdav library was confused by the Path manipulation done by
lib/http when stripping the prefix.

This patch adds the prefix back before calling it.

Fixes #6650
2022-12-23 12:28:07 +00:00
Nick Craig-Wood
54c0f17f2a azureblob: fix "409 Public access is not permitted on this storage account"
This error was caused by rclone supplying an empty
`x-ms-blob-public-access:` header when creating a container for
private access, rather than omitting it completely.

This is a valid way of specifying containers should be private, but if
the storage account has the flag "Blob public access" unset then it
gives "409 Public access is not permitted on this storage account".

This patch fixes the problem by only supplying the header if the
access is set.

Fixes #6645
2022-12-23 12:28:07 +00:00
Kaloyan Raev
d049cbb59e s3/storj: update endpoints
Storj switched to a single global s3 endpoint backed by a BGP routing.
We want to stop advertizing the former regional endpoints and have the
global one as the only option.
2022-12-22 15:46:49 +00:00
Anagh Kumar Baranwal
00e853144e rc: set url to the first value of rc-addr since it has been converted to an array of strings now -- fixes #6641
Signed-off-by: Anagh Kumar Baranwal <6824881+darthShadow@users.noreply.github.com>
2022-12-22 09:02:20 +00:00
albertony
5ac8cfee56 docs: show only significant parts of version number in version introduced label 2022-12-21 12:41:47 +00:00
Nick Craig-Wood
496ae8adf6 Start v1.62.0-DEV development 2022-12-20 18:33:59 +00:00
Nick Craig-Wood
2001cc0831 Version v1.61.0 2022-12-20 17:16:14 +00:00
Ole Frost
a35490bf70 docs: Added note on Box API rate limits 2022-12-20 12:49:31 +00:00
Nick Craig-Wood
01877e5a0f s3: ignore versionIDs from uploads unless using --s3-versions or --s3-versions-at
Before this change, when a new object was created s3 returns its
versionID (on a versioned bucket) and rclone recorded it in the
object.

This means that when rclone came to delete the object it would delete
it with the versionID.

However it is common to forbid actions with versionIDs on buckets so
as to preserve the historical record and these operations would fail
whereas they succeeded in pre-v1.60.0 versions.

This patch fixes the problem by not recording versions of objects
supplied by the S3 API on upload unless `--s3-versions` or
`--s3-version-at` is used. This makes rclone behave as it did before
v1.60.0 when version support was introduced.

See: https://forum.rclone.org/t/s3-and-intermittent-403-errors-with-file-renames-and-drag-and-drop-operations-in-windows-explorer/34773
2022-12-17 10:24:56 +00:00
Nick Craig-Wood
614d79121a serve dlna: fix panic: Logger uninitialized.
Before this change we forgot to initialize the logger for the dlna
server. This meant when it needed to log something, it paniced
instead.

See: https://forum.rclone.org/t/rclone-serve-dlna-after-few-hours-of-idle-running-panic-logger-uninitialized-names/34835
2022-12-17 10:23:58 +00:00
Nick Craig-Wood
3a6f1f5cd7 filter: add metadata filters --metadata-include/exclude/filter and friends
Fixes #6353
2022-12-17 10:21:11 +00:00
Nick Craig-Wood
4a31961c4f filter: factor rules into its own file 2022-12-16 17:05:31 +00:00
Abdullah Saglam
7be9855a70 azureblob: implement --use-server-modtime
This patch implements --use-server-modtime for the Azureblob backend.

It does this by not reading the time from the metadata if the global
flag is set.
2022-12-15 15:58:36 +00:00
Nick Craig-Wood
6f8112ff67 Add Abdullah Saglam to contributors 2022-12-15 15:58:36 +00:00
Nick Craig-Wood
67fc227684 config: add config/setpath for setting config path via rc/librclone 2022-12-15 12:41:30 +00:00
Nick Craig-Wood
7edb4c0162 sftp: fix NewObject with leading /
This was breaking the use of operations/stat with remote with an
initial /

See: https://forum.rclone.org/t/rclone-rc-api-operations-stat-is-not-working-for-sftp-remotes/34560
2022-12-15 12:40:59 +00:00
Nick Craig-Wood
5db4493557 lib/http: fix race condition 2022-12-15 12:38:09 +00:00
Nick Craig-Wood
a85c0b0cc2 cmd/serve/httplib: remove as it is now replaced by lib/http 2022-12-15 12:38:09 +00:00
Nolan Woods
52443c2444 restic: refactor to use lib/http
Co-authored-by: Nick Craig-Wood <nick@craig-wood.com>
2022-12-15 12:38:09 +00:00
Nick Craig-Wood
4444d2d102 serve webdav: refactor to use lib/http 2022-12-15 12:38:09 +00:00
Nick Craig-Wood
08a1ca434b rcd: refactor rclone rc server to use lib/http 2022-12-15 12:38:09 +00:00
Nick Craig-Wood
a9ce86f9a3 lib/http: add UsingAuth method 2022-12-15 12:38:09 +00:00
Nick Craig-Wood
3167292c2f lib/http: remove unused Template from Config 2022-12-15 12:38:09 +00:00
Tom Mombourquette
ec7cc2b3c3 lib/http: Simplify server.go to export an http server rather than an interface
This also makes the implementation public.
2022-12-15 12:38:09 +00:00
Tom Mombourquette
2a2fcf1012 lib/http: rationalise names in test servers to be more consistent 2022-12-15 12:38:09 +00:00
Tom Mombourquette
6d62267227 serve http: support unix sockets and multiple listners
- add support for unix sockets (which skip the auth).
- add support for multiple listeners
- collapse unnecessary internal structure of lib/http so it can all be
  imported together
- moves files in sub directories of lib/http into the main lib/http
  directory and reworks the code that uses them.

See: https://forum.rclone.org/t/wip-rc-rcd-over-unix-socket/33619
Fixes: #6605
2022-12-15 12:38:09 +00:00
Nick Craig-Wood
dfd8ad2fff Add compiletest target to compile all the tests only 2022-12-15 12:38:09 +00:00
Nick Craig-Wood
43506f8086 test memory: read metadata if -M flag is specified 2022-12-15 12:37:19 +00:00
Nick Craig-Wood
ec3cee89d3 fstest: switch to port forwarding now Owncloud disallows wildcards
A recent security fix in the Owncloud container now causes it to
disallow wildcards in the OWNCLOUD_TRUSTED_DOMAINS setting.

This patch works around the problem by using port forwarding from the
host so we can keep the domain name constant.
2022-12-15 11:34:12 +00:00
Nick Craig-Wood
a171497a8b Add Jack to contributors 2022-12-15 11:34:12 +00:00
Jack
c6ad15e3b8 s3: make DigitalOcean name canonical 2022-12-14 16:35:05 +00:00
Jack
9a81885b51 s3: add DigitalOcean Spaces regions sfo3, fra1, syd1 2022-12-14 16:35:05 +00:00
Nick Craig-Wood
3d291da0f6 azureblob: fix directory marker detection after SDK upgrade
When the SDK was upgraded it started delivering metadata where the
keys were not in lower case as per the old SDK.

Rclone normalises the case of the keys for storage in the Object, but
the directory marker check was being done with the unnormalised keys
as it needs to be done before the Object is created.

This fixes the directory marker check to do a case insensitive compare
of the metadata keys.
2022-12-14 14:24:26 +00:00
Nick Craig-Wood
43bf177ff7 s3: fix excess memory usage when using versions
Before this change, we were taking the version ID straight from the
XML blob returned by the SDK and thus pinning the XML into memory
which bulked up the average memory per object from about 400 bytes to
4k.

Copying the string fixes the excess memory usage.
2022-12-14 14:24:26 +00:00
Nick Craig-Wood
c446651be8 Revert "s3: turn off list v2 support for Alibaba OSS since it does not work"
This reverts commit 4f386a1ccd.

It turns out that Alibaba OSS does support list v2 and the detection
code was wrong.

This means that users of the gov version of Alibaba will have to add
`list_version 1` to their config files.

See #6600
2022-12-14 14:24:26 +00:00
Nick Craig-Wood
6c407dbe15 s3: fix detection of listing routines which don't support v2 properly
In this commit

ab849b3613 s3: fix listing loop when using v2 listing on v1 server

The ContinuationToken was tested for existence, but it is the
NextContinuationToken that we are interested in.

See: #6600
2022-12-14 14:24:26 +00:00
albertony
5a59b49b6b drive: handle shared drives with leading/trailing space in name (related to #6618) 2022-12-14 10:18:12 +01:00
albertony
8b9f3bbe29 fspath: improved detection of illegal remote names starting with dash (related to #4261) 2022-12-14 10:18:12 +01:00
albertony
8e6a469f98 fspath: allow unicode numbers and letters in remote names
Previously it was limited to plain ASCII (0-9, A-Z, a-z).

Implemented by adding \p{L}\p{N} alongside the \w in the regex,
even though these overlap it means we can be sure it is 100%
backwards compatible.

Fixes #6618
2022-12-12 13:24:32 +00:00
albertony
f650a543ef docs: remote names may not start or end with space 2022-12-12 13:24:32 +00:00
albertony
683178a1f4 fspath: change remote name regex to not match when leading/trailing space 2022-12-12 13:24:32 +00:00
albertony
3937233e1e fspath: refactor away unnecessary constant for remote name regex 2022-12-12 13:24:32 +00:00
albertony
c571200812 fspath: remove unused capture group in remote name regex 2022-12-12 13:24:32 +00:00
albertony
04a663829b fspath: remove duplicate start-of-line anchor in remote name regex 2022-12-12 13:24:32 +00:00
albertony
6b4a2c1c4e fspath: remove superfluous underscore covered by existing word character class in remote name regex 2022-12-12 13:24:32 +00:00
albertony
f73be767a4 fspath: add unit tests for remote names with leading dash 2022-12-12 13:24:32 +00:00
albertony
4120dffcc1 fspath: add unit tests for remote names with space 2022-12-12 13:24:32 +00:00
Nick Craig-Wood
53ff5bb205 build: Update golang.org/x/net/http2 to fix GO-2022-1144
An attacker can cause excessive memory growth in a Go server accepting
HTTP/2 requests. HTTP/2 server connections contain a cache of HTTP
header keys sent by the client. While the total number of entries in
this cache is capped, an attacker sending very large keys can cause
the server to allocate approximately 64 MiB per open connection.
2022-12-12 12:49:12 +00:00
Nick Craig-Wood
397f428c48 Add vanplus to contributors 2022-12-12 12:49:12 +00:00
vanplus
c5a2c9b046 onedrive: document workaround for shared with me files 2022-12-12 12:04:28 +00:00
Kaloyan Raev
b98d7f6634 storj: implement server side Copy 2022-12-12 12:02:38 +00:00
Ole Frost
beea4d5119 lib/oauthutil: Improved usability of config flows needing web browser
The config question "Use auto config?" confused many users and lead to
recurring forum posts from users that were unaware that they were using
a remote or headless machine.

This commit makes the question and possible options more descriptive
and precise.

This commit also adds references to the guide on remote setup in the
documentation of backends using oauth as primary authentication.
2022-12-09 14:41:05 +00:00
Eng Zer Jun
8e507075d1 test: replace defer cleanup with t.Cleanup
Reference: https://pkg.go.dev/testing#T.Cleanup
Signed-off-by: Eng Zer Jun <engzerjun@gmail.com>
2022-12-09 14:38:05 +00:00
Nick Craig-Wood
be783a1856 dlna: properly attribute code used from https://github.com/anacrolix/dms
Fixes #4101
2022-12-09 14:27:10 +00:00
Nick Craig-Wood
450c366403 s3: fix nil pointer exception when using Versions
This was caused by

a9bd0c8de6 s3: reduce memory consumption for s3 objects

Which assumed that the StorageClass would always be set, but it isn't
set for Versions.
2022-12-09 12:23:51 +00:00
Matthew Vernon
1dbdc48a77 WASM: comply with wasm_exec.js licence terms
The BSD-style license that Go uses requires the license to be included
with the source distribution; so add it as LICENSE.wasmexec (to avoid
confusion with the other licenses in rclone) and note the location of
the license in wasm_exec.js itself.
2022-12-07 15:25:46 +00:00
Nick Craig-Wood
d7cb17848d azureblob: revamp authentication to include all methods and docs
The updates the authentication to include

- Auth from the environment
    1. Environment Variables
    2. Managed Service Identity Credentials
    3. Azure CLI credentials (as used by the az tool)
- Account and Shared Key
- SAS URL
- Service principal with client secret
- Service principal with certificate
- User with username and password
- Managed Service Identity Credentials

And rationalises the auth order.
2022-12-06 15:07:01 +00:00
Nick Craig-Wood
f3c8b7a948 azureblob: add --azureblob-no-check-container to assume container exists
Normally rclone will check the container exists before uploading if it
hasn't listed the container yet.

Often rclone will be running with a limited set of permissions which
means rclone can't create the container anyway, so this stops the
check.

This will save a transaction.
2022-12-06 15:07:01 +00:00
Nick Craig-Wood
914fbe242c azureblob: ignore AuthorizationFailure when trying to create a create a container
If we get AuthorizationFailure when trying to create a container, then
assume the container has already been created
2022-12-06 15:07:01 +00:00
Nick Craig-Wood
f746b2fe85 azureblob: port old authentication methods to new SDK
Co-authored-by: Brad Ackerman <brad@facefault.org>
2022-12-06 15:07:01 +00:00
Nick Craig-Wood
a131da2c35 azureblob: Port to new SDK
This commit switches from using the old Azure go modules

    github.com/Azure/azure-pipeline-go/pipeline
    github.com/Azure/azure-storage-blob-go/azblob
    github.com/Azure/go-autorest/autorest/adal

To the new SDK

    github.com/Azure/azure-sdk-for-go/

This stops rclone using deprecated code and enables the full range of
authentication with Azure.

See #6132 and #5284
2022-12-06 15:07:01 +00:00
Nick Craig-Wood
60e4cb6f6f Add MohammadReza to contributors 2022-12-06 15:06:51 +00:00
MohammadReza
0a8b1fe5de s3: add Liara LOS to provider list 2022-12-06 12:25:23 +00:00
asdffdsazqqq
b24c83db21 restic: fix typo in docs 'remove' should be 'remote' 2022-12-06 12:14:25 +00:00
Nick Craig-Wood
4f386a1ccd s3: turn off list v2 support for Alibaba OSS since it does not work
See: #6600
2022-12-06 12:11:21 +00:00
Nick Craig-Wood
ab849b3613 s3: fix listing loop when using v2 listing on v1 server
Before this change, rclone would enter a listing loop if it used v2
listing on a v1 server and the list exceeded 1000 items.

This change detects the problem and gives the user a helpful message.

Fixes #6600
2022-12-06 12:11:21 +00:00
Nick Craig-Wood
10aee3926a Add Kevin Verstaen to contributors 2022-12-06 12:11:21 +00:00
Nick Craig-Wood
4583b61e3d Add Erik Agterdenbos to contributors 2022-12-06 12:11:06 +00:00
Nick Craig-Wood
483e9e1ee3 Add ycdtosa to contributors 2022-12-06 12:11:06 +00:00
Kevin Verstaen
c2dfc3e5b3 fs: Add global flag '--color' to control terminal colors
* fs: add TerminalColorMode type
* fs: add new config(flags) for TerminalColorMode
* lib/terminal: use TerminalColorMode to determine how to handle colors
* Add documentation for '--terminal-color-mode'
* tree: remove obsolete --color replaced by global --color

This changes the default behaviour of tree. It now displays colors by
default instead of only displaying them when the flag -C/--color was
active. Old behaviour (no color) can be achieved by setting --color to
'never'.

Fixes: #6604
2022-12-06 12:07:06 +00:00
Erik Agterdenbos
a9bd0c8de6 s3: reduce memory consumption for s3 objects
Copying the storageClass string instead of using a pointer to the original string.
This prevents the Go garbage collector from keeping large amounts of
XMLNode structs and references in memory, created by xmlutil.XMLToStruct()
from the aws-sdk-go.
2022-12-05 23:07:08 +00:00
Anthony Pessy
1628ca0d46 ftp: Improve performance to speed up --files-from and NewObject
This commit uses the MLST command (where available) to get the status
for single files rather than listing the parent directory and looking
for the file. This makes actions such as using `--files-from` much quicker.

* use getEntry to lookup remote files when supported
*  findItem now expects the full path directly

It makes the expected argument similar to the getInfo method, the
difference now is that one is returning a FileInfo whereas
the other is returning an ftp Entry.

Fixes #6225

Co-authored-by: Nick Craig-Wood <nick@craig-wood.com>
2022-12-05 16:19:04 +00:00
albertony
313493d51b docs: remove minimum versions from command pages of pre v1 commands 2022-12-03 18:58:55 +01:00
albertony
6d18f60725 docs: add minimum versions to the command pages 2022-12-03 18:58:55 +01:00
albertony
d74662a751 docs: add badge showing version introduced and experimental/beta/deprecated status to command doc pages 2022-12-03 18:58:55 +01:00
albertony
d05fd2a14f docs: add badge for experimental/beta/deprecated status next to version in backend docs 2022-12-03 18:58:55 +01:00
albertony
097be753ab docs: minor cleanup of headers in backend docs 2022-12-03 18:58:55 +01:00
ycdtosa
50c9678cea ftp: update help text of implicit/explicit TLS options to refer to FTPS instead of FTP 2022-11-29 14:58:46 +01:00
eNV25
7672cde4f3 cmd/ncdu: use negative values for key runes
The previous version used values after the maximum Unicode code-point
to encode a key. This could lead to an overflow since a key is a int16,
a rune is int32 and the maximum Unicode code-point is larger than int16.

A better solution is to simply use negative runes for keys.
2022-11-28 10:51:11 +00:00
eNV25
a4c65532ea cmd/ncdu: use tcell directly instead of the termbox wrapper
Following up on 36add0af, which switched from termbox
to tcell's termbox wrapper.
2022-11-25 14:42:19 +00:00
Nick Craig-Wood
46b080c092 vfs: Fix IO Error opening a file with O_CREATE|O_RDONLY in --vfs-cache-mode not full
Before this fix, opening a file with `O_CREATE|O_RDONLY` caused an IO error to
be returned when using `--vfs-cache-mode off` or `--vfs-cache-mode writes`.

This was because the file was opened with read intent, but the `O_CREATE`
implies write intent to create the file even though the file is opened
`O_RDONLY`.

This fix sets write intent for the file if `O_CREATE` is passed in which fixes
the problem for all the VFS cache modes.

It also extends the exhaustive open flags testing to `--vfs-cache-mode writes`
as well as `--vfs-cache-mode full` which would have caught this problem.

See: https://forum.rclone.org/t/i-o-error-trashing-file-on-sftp-mount/34317/
2022-11-24 17:04:36 +00:00
Nick Craig-Wood
0edf6478e3 Add Nathaniel Wesley Filardo to contributors 2022-11-24 17:04:36 +00:00
Nathaniel Wesley Filardo
f7cdf318db azureblob: support simple "environment credentials"
As per
https://learn.microsoft.com/en-us/dotnet/api/azure.identity.environmentcredential?view=azure-dotnet

This supports only AZURE_CLIENT_SECRET-based authentication, as with the
existing service principal support.

Co-authored-by: Nick Craig-Wood <nick@craig-wood.com>
2022-11-24 12:06:14 +00:00
Nathaniel Wesley Filardo
6f3682c12f azureblob: make newServicePrincipalTokenRefresher take parsed principal structure 2022-11-24 12:06:14 +00:00
Nick Craig-Wood
e3d593d40c build: update dependencies 2022-11-24 11:05:54 +00:00
Nick Craig-Wood
83551bb02e cmount: update cgofuse for FUSE-T support for mounting volumes on Mac
See: https://forum.rclone.org/t/fr-fuse-t-support-for-mounting-volumes-on-mac/33110/
2022-11-24 10:51:16 +00:00
Nick Craig-Wood
430bf0d5eb crypt: fix compress wrapping crypt giving upload errors
Before this fix a chain compress -> crypt -> s3 was giving errors

    BadDigest: The Content-MD5 you specified did not match what we received.

This was because the crypt backend was encrypting the underlying local
object to calculate the hash rather than the contents of the metadata
stream.

It did this because the crypt backend incorrectly identified the
object as a local object.

This fixes the problem by making sure the crypt backend does not
unwrap anything but fs.OverrideRemote objects.

See: https://forum.rclone.org/t/not-encrypting-or-compressing-before-upload/32261/10
2022-11-21 08:02:09 +00:00
Nick Craig-Wood
dd71f5d968 fs: move operations.NewOverrideRemote to fs.NewOverrideRemote 2022-11-21 08:02:09 +00:00
albertony
7db1c506f2 smb: fix issue where spurious dot directory is created 2022-11-20 17:12:02 +00:00
Nick Craig-Wood
959cd938bc docs: Add minimum versions to all the backend pages and some of the other pages 2022-11-18 14:41:24 +00:00
Nick Craig-Wood
03b07c280c Changelog updates from Version v1.60.1 2022-11-17 16:32:25 +00:00
Nick Craig-Wood
705e8f2fe0 smb: fix Failed to sync: context canceled at the end of syncs
Before this change we were putting connections into the connection
pool which had a local context in.

This meant that when the operation had finished the context was
cancelled and the connection became unusable.

See: https://forum.rclone.org/t/failed-to-sync-context-canceled/34017/
2022-11-16 10:55:25 +00:00
Nick Craig-Wood
591fc3609a vfs: fix deadlock caused by cache cleaner and upload finishing
Before this patch a deadlock could occur if the cache cleaner was
running when an object upload finished.

This fixes the problem by delaying marking the object as clean until
we have notified the VFS layer. This means that the cache cleaner
won't consider the object until **after** the VFS layer has been
notified, thus avoiding the deadlock.

See: https://forum.rclone.org/t/rclone-mount-deadlock-when-dir-cache-time-strikes/33486/
2022-11-15 18:01:36 +00:00
Nick Craig-Wood
b4a3d1b9ed Add asdffdsazqqq to contributors 2022-11-15 18:01:36 +00:00
asdffdsazqqq
84219b95ab docs: faq: how to use a proxy server that requires a username and password - fixes #6565 2022-11-15 17:58:43 +00:00
Nick Craig-Wood
2c78f56d48 webdav: fix Move/Copy/DirMove when using -server-side-across-configs
Before this change, when using -server-side-across-configs rclone
would direct Move/Copy/DirMove to the destination server.

However this should be directed to the source server. This is a little
unclear in the RFC, but the name of the parameter "Destination:" seems
clear and this is how dCache and Rucio have implemented it.

See: https://forum.rclone.org/t/webdav-copy-request-implemented-incorrectly/34072/
2022-11-15 09:51:30 +00:00
Nick Craig-Wood
a61d219bcd local: fix -L/--copy-links with filters missing directories
In this commit

8d1fff9a82 local: obey file filters in listing to fix errors on excluded files

We introduced the concept of local backend filters.

Unfortunately the filters were being applied before we had resolved
the symlink to point to a directory. This meant that symlinks pointing
to directories were filtered out when they shouldn't have been.

This was fixed by moving the filter check until after the symlink had
been resolved.

See: https://forum.rclone.org/t/copy-links-not-following-symlinks-on-1-60-0/34073/7
2022-11-14 18:03:40 +00:00
Nick Craig-Wood
652d3cdee4 vfs: windows: fix slow opening of exe files by not truncating files when not necessary
Before this change we truncated files in the backing store regardless
of whether we needed to or not.

After, we check to see if the file is the right size and don't
truncate if it is.

Apparently Windows Defender likes to check executables each time they
are modified, and truncating a file to its existing size is enough to
trigger the Windows Defender scan. This was causing a big slowdown for
operations which opened and closed the file a lot, such as looking at
properties on an executable.

See: https://forum.rclone.org/t/for-mount-sftp-why-right-click-on-exe-file-is-so-slow-until-it-freezes/33830
2022-11-14 17:05:51 +00:00
Nick Craig-Wood
bb1fc5b86d Add Kamui to contributors 2022-11-14 17:05:51 +00:00
Kamui
efd3c6449b rcserver: avoid generating default credentials with htpasswd - fixes #4839 2022-11-14 15:26:44 +00:00
Nick Craig-Wood
0ac5795f8c fs: make all duration flags take y, M, w, d etc suffixes
Fixes #6556
2022-11-14 15:13:49 +00:00
Nick Craig-Wood
2f77651f64 Add rkettelerij to contributors 2022-11-14 15:13:49 +00:00
Nick Craig-Wood
8daacc2b99 Add techknowlogick to contributors 2022-11-14 15:13:49 +00:00
rkettelerij
87fa9f8e46 azureblob: Add support for custom upload headers 2022-11-14 15:12:28 +00:00
albertony
1392793334 sftp: auto-detect shell type for fish
Fish is different from POSIX-based Unix shells such as bash,
and a bracketed variable references like we use for the
auto-detection echo command is not supported. The command
will return with zero exit code but produce no output on
stdout. There is a message on stderr, but we don't log it
due to the zero exit code:

fish: Variables cannot be bracketed. In fish, please use {$ShellId}.

Fixes #6552
2022-11-11 15:32:44 +00:00
techknowlogick
0e427216db s3: Add additional Wasabi locations 2022-11-11 14:39:12 +00:00
Anagh Kumar Baranwal
0c56c46523 rc: Add commands to set GC Percent & Memory Limit (1.19+)
Signed-off-by: Anagh Kumar Baranwal <6824881+darthShadow@users.noreply.github.com>
2022-11-10 12:07:18 +00:00
Nick Craig-Wood
617c5d5e1b rcat: preserve metadata when Copy falls back to Rcat
Before this change if we copied files of unknown size, then they lost
their metadata.

This was particularly noticeable using --s3-decompress.

This change adds metadata to Rcat and RcatSized and changes Copy to
pass the metadata in when it calls Rcat for an unknown sized input.

Fixes #6546
2022-11-10 12:04:35 +00:00
Nick Craig-Wood
ec2024b907 fstest: use WithMetadata / WithMimeType 2022-11-10 12:04:35 +00:00
Nick Craig-Wood
458845ce89 fs/object: add WithMetadata and WithMimetype to static and memory objects 2022-11-10 12:04:35 +00:00
Nick Craig-Wood
57bde20acd Add Aaron Gokaslan to contributors 2022-11-10 12:04:35 +00:00
Aaron Gokaslan
b0248e8070 s3: fix for unchecked err value in s3 listv2 2022-11-10 11:52:59 +00:00
Nick Craig-Wood
b285efb476 mailru: allow timestamps to be before the epoch 1970-01-01
Fixes #6547
2022-11-10 11:27:01 +00:00
Nick Craig-Wood
be6f29930b dedupe: make dedupe obey the filters
See: https://forum.rclone.org/t/dial-tcp-lookup-api-pcloud-com-no-such-host/33910/
2022-11-10 09:56:02 +00:00
Nick Craig-Wood
653bc23728 dedupe: count Checks in the stats while scanning for duplicates
This allows the user to see rclone has not hung.

See: https://forum.rclone.org/t/dial-tcp-lookup-api-pcloud-com-no-such-host/33910/
2022-11-10 09:56:02 +00:00
Nick Craig-Wood
47b04580db accounting: make it so we can account directories as well as files 2022-11-10 09:56:02 +00:00
Nick Craig-Wood
919e28b8bf lib/cache: fix alias backend shutting down too soon
Before this patch, when an alias backend was created it would be
renamed to be canonical and in the process Shutdown would be called on
it. This was particularly noticeable with the dropbox backend which
gave this error when uploading files after the backend was Shutdown.

    Failed to copy: upload failed: batcher is shutting down

This patch fixes the cache Rename code not to finalize objects if the
object that is being overwritten is the same as the existing object.

See: https://forum.rclone.org/t/upload-failed-batcher-is-shutting-down/33900
2022-11-09 16:29:23 +00:00
Nick Craig-Wood
3a3bc5a1ae mailru: note that an app password is now needed - fixes #6398 2022-11-08 20:33:11 +00:00
Nick Craig-Wood
133c006c37 Add Roel Arents to contributors 2022-11-08 20:33:11 +00:00
Roel Arents
e455940f71 azureblob: allow emulator account/key override 2022-11-08 20:24:06 +00:00
Nick Craig-Wood
65528fd009 docs: remove link to rclone slack as it is no longer supported 2022-11-08 16:11:34 +00:00
Nick Craig-Wood
691159fe94 s3: allow Storj to server side copy since it seems to work now - fixes #6550 2022-11-08 16:05:24 +00:00
Nick Craig-Wood
09858c0c5a Add Arnie97 to contributors 2022-11-08 16:05:24 +00:00
Nick Craig-Wood
5fd0abb2b9 Add x3-apptech to contributors 2022-11-08 16:05:24 +00:00
Arnie97
36c37ffec1 backend/http: rename stat to decodeMetadata 2022-11-08 13:04:17 +00:00
Arnie97
6a5b7664f7 backend/http: support content-range response header 2022-11-08 13:04:17 +00:00
Arnie97
ebac854512 backend/http: do not update object size based on range requests 2022-11-08 13:04:17 +00:00
Arnie97
cafce96185 backend/http: parse get responses when no_head is set 2022-11-08 13:04:17 +00:00
João Henrique Franco
92ffcf9f86 wasm: fix walltime link error by adding up-to-date wasm_exec.js
Solves link error while running rclone's wasm version. Go's `walltime1` function was renamed to `walltime`. This commit updates wasm_exec.js with the new name.
2022-11-07 12:13:23 +00:00
albertony
64cdbb67b5 ncdu: add support for modification time 2022-11-07 11:57:44 +00:00
albertony
528fc899fb ncdu: fallback to sort by name also for sort by average size 2022-11-07 11:57:44 +00:00
x3-apptech
d452f502c3 cmd: Enable SIGINFO (Ctrl-T) handler on FreeBSD, NetBSD, OpenBSD and Dragonfly BSD 2022-11-07 11:45:04 +00:00
albertony
5d6b8141ec Replace deprecated ioutil
As of Go 1.16, the same functionality is now provided by package io or
package os, and those implementations should be preferred in new code.
2022-11-07 11:41:47 +00:00
albertony
776e5ea83a docs: fix character that was incorrectly interpreted as markdown 2022-11-07 08:59:40 +01:00
albertony
c9acc06a49 Add Clément Notin to contributors 2022-11-07 08:51:49 +01:00
Clément Notin
a2dca02594 docs: fix character that was incorrectly interpreted as markdown 2022-11-07 08:50:21 +01:00
Joda Stößer
210331bf61 docs: fix typo remove in rclone_serve_restic command 2022-11-07 08:46:05 +01:00
Nick Craig-Wood
5b5fdc6bc5 s3: add provider quirk --s3-might-gzip to fix corrupted on transfer: sizes differ
Before this change, some files were giving this error when downloaded
from Cloudflare and other providers.

    ERROR corrupted on transfer: sizes differ NNN vs MMM

This is because these providers auto gzips the object when rclone
wasn't expecting it to. (AWS does not gzip objects without their being
uploaded gzipped).

This patch adds a quirk to for fix the problem and a flag to control
it. The quirk `might_gzip` is set to `true` for all providers except
AWS.

See: https://forum.rclone.org/t/s3-error-corrupted-on-transfer-sizes-differ-nnn-vs-mmm/33694/
Fixes: #6533
2022-11-04 16:53:32 +00:00
Nick Craig-Wood
0de74864b6 Add dgouju to contributors 2022-11-04 16:53:32 +00:00
dgouju
7042a11875 sftp: add configuration options to set ssh Ciphers / MACs / KeyExchange 2022-11-03 17:11:28 +00:00
Nick Craig-Wood
028832ce73 s3: if bucket or object ACL is empty string then don't add X-Amz-Acl: header - fixes #5730
Before this fix it was impossible to stop rclone generating an
X-Amx-Acl: header which is incompatible with GCS with uniform access
control and is generally deprecated at AWS.
2022-11-03 17:06:24 +00:00
Philip Harvey
c7c9356af5 s3: stop setting object and bucket ACL to "private" if it is an empty string #5730 2022-11-03 17:06:24 +00:00
Nick Craig-Wood
3292c112c5 Add Philip Harvey to contributors 2022-11-03 17:06:24 +00:00
Nick Craig-Wood
126d71b332 Add Anthony Pessy to contributors 2022-11-03 17:06:24 +00:00
Nick Craig-Wood
df9be72a82 Add coultonluke to contributors 2022-11-03 17:06:24 +00:00
Nick Craig-Wood
6aa8f7409a Add Samuel Johnson to contributors 2022-11-03 17:06:24 +00:00
Anthony Pessy
10c884552c s3: use different strategy to resolve s3 region
The API endpoint GetBucketLocation requires
top level permission.

If we do an authenticated head request to a bucket, the bucket location will be returned in the HTTP headers.

Fixes #5066
2022-11-02 11:48:08 +00:00
albertony
2617610741 docs: add direct download link for windows arm64 2022-10-31 21:14:10 +01:00
coultonluke
53dd174f3d docs: corrected download links in windows install docs 2022-10-31 21:09:53 +01:00
albertony
65987f5970 lib/file: improve error message for create dir on non-existent network host on windows (#6420) 2022-10-28 21:00:22 +02:00
Manoj Ghosh
1fc864fb32 oracle-object-storage: doc fix
See #6521
2022-10-28 20:32:17 +02:00
albertony
22abcc9fd2 build: update golang.org/x/net dependency
This fixes vulnerability GO-2022-0969 reported by govulncheck:

HTTP/2 server connections can hang forever waiting for a clean
shutdown that was preempted by a fatal error. This condition can
be exploited by a malicious client to cause a denial of service.

Call stacks in your code:
Error: cmd/serve/restic/restic.go:150:22: github.com/rclone/rclone/cmd/serve/restic.init$1$1 calls golang.org/x/net/http2.Server.ServeConn

Found in: golang.org/x/net/http2@v0.0.0-20220805013720-a33c5aa5df48
Fixed in: golang.org/x/net/http2@v0.0.0-20220906165146-f3363e06e74c
More info: https://pkg.go.dev/vuln/GO-2022-0969
2022-10-26 12:59:31 +02:00
albertony
178cf821de build: add vulnerability testing using govulncheck 2022-10-26 12:59:31 +02:00
albertony
f4a571786c local: clean absolute paths - fixes #6493 2022-10-25 21:09:56 +02:00
albertony
c0a8ffcbef build: setup-go v3 improved semver notation 2022-10-25 20:25:39 +02:00
albertony
76eeca9eae build: setup-go v3 dropped the stable input 2022-10-25 20:25:39 +02:00
Samuel Johnson
8114744bce docs: Update faq.md with bisync
Updated FAQ to clarify that experimental bi-sync is now available.
2022-10-23 11:15:09 +01:00
Nick Craig-Wood
db5d582404 Start v1.61.0-DEV development 2022-10-21 16:15:53 +01:00
Nick Craig-Wood
01dbbff62e Version v1.60.0 2022-10-21 15:06:08 +01:00
Nick Craig-Wood
afa61e702c docs: remove hosted by tag as server has moved 2022-10-21 12:49:10 +01:00
Nick Craig-Wood
546dc82793 Add Robert Newson to contributors 2022-10-21 12:49:10 +01:00
Nick Craig-Wood
d9c4d95ab3 Add Tom Mombourquette to contributors 2022-10-21 12:49:10 +01:00
Nick Craig-Wood
0fb1b75a02 Add Manoj Ghosh to contributors 2022-10-21 12:49:10 +01:00
Tom Mombourquette
38f1f5b177 rc: Fix mount/listmounts not returning the full Fs entered in mount/mount 2022-10-21 12:48:27 +01:00
Ole Frost
0d2a62a927 docs: Describe connection strings in alias backend 2022-10-21 12:47:51 +01:00
Manoj Ghosh
b75c207208 oracle-object-storage: overview, docs update 2022-10-21 12:47:03 +01:00
Ole Frost
dff223f195 install.sh: fix arm-v7 download 2022-10-21 12:35:58 +01:00
Robert Newson
d2fef05fe4 httplib: Add --xxx-min-tls-version option to select minimum tls values for HTTP servers
This allows administrators to disable TLS 1.0 and 1.1, for example.

Example:

rclone rcd --rc-min-tls-version=tls1.2 --rc-cert <cert> --rc-key <key>
2022-10-19 17:13:12 +01:00
Tom Mombourquette
188b9f8cf1 rc: corrected mount/unmountall help msg and title 2022-10-17 17:34:48 +01:00
Manoj Ghosh
daf3162bcf oracle-object-storage: minor docs update 2022-10-17 17:08:44 +01:00
Nick Craig-Wood
5e59e7f442 ftp: Fix hang when using ExplicitTLS to certain servers.
It was discovered that doing the tls Handshake immediately on
connection causes some FTP servers (proftpd and pureftpd) to hang.

This imports a fix for it by temporarily hard forking jlaffaye/ftp to
include the fix submitted as a pull request.

See: https://forum.rclone.org/t/rclone-ftps-explicit-rclone-touch-empty-files-proftpd-unable-to-build-data-connection-operation-not-permitted/22522
See: https://github.com/rclone/rclone/issues/6426#issuecomment-1243993039
See: https://github.com/jlaffaye/ftp/pull/283
See: https://github.com/jlaffaye/ftp/issues/282
2022-10-14 12:10:03 +01:00
Nick Craig-Wood
fce22c0065 s3: add --s3-no-system-metadata to suppress read and write of system metadata
See: https://forum.rclone.org/t/problems-with-content-disposition-and-backblaze-b2-using-s3/33292/
2022-10-14 11:12:04 +01:00
Nick Craig-Wood
bb3272e837 Add Bachue Zhou to contributors 2022-10-14 11:11:56 +01:00
Nick Craig-Wood
cb5b5635c7 Add Manoj Ghosh to contributors 2022-10-14 11:11:56 +01:00
Bachue Zhou
66ed0ca726 s3: add Qiniu KODO to s3 provider list - fixes #6195 2022-10-13 15:49:22 +01:00
Manoj Ghosh
b16e50851a Add a native backend for oracle object storage - fixes #6299 2022-10-13 13:04:56 +01:00
Nick Craig-Wood
90d23139f6 s3: drop binary metadata with an ERROR message
Before this change, rclone would attempt to upload metadata with
binary contents which fail to be uploaded by net/http.

This checks the keys and values for validity as http header values
before uploading.

See: https://forum.rclone.org/t/invalid-metadata-key-names-result-in-a-failure-to-transfer-xattr-results-in-failure-to-upload-net-http-invalid-header-field-value-for-x-amz-meta-samba-pai/33406/
2022-10-13 12:00:45 +01:00
Nick Craig-Wood
5ea9398b63 swift: add --swift-no-large-objects to reduce HEAD requests
Supplying the flag --swift-no-large-objects is a promise to the swift
backend that there are no dynamic or static large objects stored.

Using that knowledge rclone can speed its operations up reducing the
number of HEAD requests.

See: https://forum.rclone.org/t/handling-or-not-of-large-objects-in-swift/33389/
See: https://forum.rclone.org/t/swift-sync-checksum-calls-head-on-every-object-so-is-very-slow/30322
2022-10-13 11:58:19 +01:00
Isaac Aymerich
3f804224f4 rc: validate Daemon option is not set when mounting a volume via RC - fixes #6469 2022-10-12 12:07:48 +01:00
Nick Craig-Wood
cf0bf159ab s3: try to keep the maximum precision in ModTime with --user-server-modtime
Before this change if --user-server-modtime was in use the ModTime
could change for an object as we receive it accurate to the nearest ms
in listings, but only accurate to the nearest second in HEAD and GET
requests.

Normally AWS returns the milliseconds as .000 in listings, but if
versions are in use it may not. Storj S3 also seems to return
milliseconds.

This patch tries to keep the maximum precision in the last modified
time, so it doesn't update a last modified time with a truncated
version if the times were the same to the nearest second.

See: https://forum.rclone.org/t/cache-fingerprint-miss-behavior-leading-to-false-positive-stalen-cache/33404/
2022-10-12 09:18:10 +01:00
Lesmiscore
6654b66114 union: propagate SlowHash feature 2022-10-10 07:58:01 +01:00
Nick Craig-Wood
9bf78d0373 local: fix "Failed to read metadata: function not implemented" on old Linux kernels
Before this change rclone used statx() to read the metadata for files
from the local filesystem when `-M` was in use.

Unfortunately statx() was only introduced in kernel 4.11 which was
released in April 2017 so there are current systems (eg Centos 7)
still on kernel versions which don't support statx().

This patch checks to see if statx() is available and if it isn't, it
falls back to using fstatat() which was introduced in Linux 2.6.16
which is guaranteed for all Go versions.

See: https://forum.rclone.org/t/metadata-from-linux-local-s3-failed-to-copy-failed-to-read-metadata-from-source-object-function-not-implemented/33233/
2022-10-07 14:14:16 +01:00
Nick Craig-Wood
0c1fb8b2b7 Add YanceyChiew to contributors 2022-10-07 14:14:08 +01:00
YanceyChiew
966654e23a dlna: run assets_generate to make new icons 2022-10-06 16:59:51 +01:00
YanceyChiew
13b65104eb dlna: add SSDP AnnounceInterval flag option
The current default AnnounceInterval is too short, causing the
multicast domain to be flooded with NOTIFY announcements,
which may prevent other dlna devices from sleeping.

This change allows users to set the announcement interval,
and it's default value also increased to 12 minutes.

Even within the interval, rclone can still passively respond to
M-SEARCH requests from other devices.
2022-10-06 16:59:51 +01:00
YanceyChiew
4a35aff33c dlna: add verification of addresses
Verify the http service listening address and the SSDP server
announcement address to prevent accidental listening of IPv6 addresses
that do not support dlna yet and may be globally accessible.

Unlistened addresses on the interface will also be filtered out of the
SSDP announcement to avoid misleading other services in the multicast domain.
2022-10-06 16:59:51 +01:00
YanceyChiew
09b6d939f5 dlna: add support for more external subtitle 2022-10-06 16:59:51 +01:00
Nick Craig-Wood
4e79de106a hubic: remove backend as service has now shutdown - fixes #6481 2022-10-05 13:33:37 +01:00
Nick Craig-Wood
b437d9461a Add Isaac Aymerich to contributors 2022-10-05 13:33:29 +01:00
Nick Craig-Wood
910af597a1 Add Lorenzo Milesi to contributors 2022-10-05 13:33:29 +01:00
Nick Craig-Wood
c10965ecfb Add Dimitri Papadopoulos Orfanos to contributors 2022-10-05 13:33:29 +01:00
albertony
5efb880772 Remove LICENSE 2022-10-04 15:40:37 +02:00
albertony
6c3b7d5820 Create LICENSE 2022-10-04 15:38:58 +02:00
Isaac Aymerich
c5109408c0 rc: handle external unmount when mounting
Before this change, if the a mount was created via the rc but unmounted
externally with `fusermount -u` say, rclone would still believe the mount
was active when it wasn't.
2022-10-03 11:24:58 +01:00
Marco Molteni
a3c06b9bbe docs/content: remove duplicate Scaleway C14 Glacier
Scaleway S3/C14 is now called S3/Glacier. Since Glacier is already
mentioned in the Rclone Scaleway section, let's just remove this
entry from here.
2022-10-02 21:58:16 +01:00
Lesmiscore
2aa264b33c smb: backend to support SMB - fixes #2042 2022-09-30 16:10:57 +01:00
albertony
4e078765f9 docs: improve description of make command in install docs 2022-09-28 16:14:12 +02:00
albertony
7fbc928a19 docs: remove "After" in systemd mount example
See #6459
2022-09-26 19:14:10 +02:00
Lorenzo Milesi
27096323db docs: remove "After" in automount example
According to [systemd.automount](https://www.freedesktop.org/software/systemd/man/systemd.automount.html) manual

> Note that automount units are separate from the mount itself, so you should 
> not set After= or Requires= for mount dependencies here. 
> For example, you should not set After=network-online.target or 
> similar on network filesystems. Doing so may result in an ordering cycle.
2022-09-26 19:11:29 +02:00
Dimitri Papadopoulos Orfanos
7e547822d6 build: update GitHub actions to latest versions 2022-09-19 19:51:07 +01:00
Nick Craig-Wood
67625b1dbd ftp: increase timeouts on tests as they were failing locally 2022-09-19 19:45:52 +01:00
Nick Craig-Wood
88086643f7 ftp: adapt to library changes to fix connection errors #6426
In https://github.com/jlaffaye/ftp/commit/212daf295f the upstream FTP
library changed the way adding your own dialer works which meant that
connections when using explicit FTP were failing.

This patch reworks our connection code to bring it into the
expectations of the library.
2022-09-18 11:31:11 +01:00
Nick Craig-Wood
5f13d84135 compress: add extra debugging in case we have a repeat of #6434 2022-09-18 11:31:11 +01:00
Nick Craig-Wood
07efdb55fa compress: fix error handling to not use or return nil objects #6434 2022-09-18 11:31:11 +01:00
Nick Craig-Wood
fb6ddd680c compress: fix crash due to nil metadata #6434
Before this fix, if an error ocurred reading the metadata, it could be
set as nil and then used, causing a crash.

This fix changes the readMetadata function so it returns an error, and
the error is always set if the metadata returned is nil.
2022-09-18 11:31:11 +01:00
Nick Craig-Wood
bc09105d2e Add Richard Bateman to contributors 2022-09-18 11:31:11 +01:00
Richard Bateman
4f374bc264 s3: add --s3-sse-customer-key-base64 to supply keys with binary data
Fixes #6400
2022-09-17 17:28:44 +01:00
Nick Craig-Wood
1c99661d8c onedrive: disable change notify in China region since it is not supported
Fixes #6444
2022-09-16 16:57:29 +01:00
Nick Craig-Wood
04b54bbb1e Add Alexander Knorr to contributors 2022-09-16 16:57:23 +01:00
Nick Craig-Wood
90cda2d6c2 Add Dmitry Deniskin to contributors 2022-09-16 16:57:23 +01:00
Nick Craig-Wood
dbd9ce78e6 Add Øyvind Heddeland Instefjord to contributors 2022-09-16 16:57:23 +01:00
Nick Craig-Wood
cbc18e2693 docs: update install docs to make more consistent
This also adds repology badges where appropriate to show versions in
external repositories.
2022-09-16 16:56:00 +01:00
Alexander Knorr
67c675d7ad docs: add cholateley package manager to install instructions 2022-09-15 16:12:39 +01:00
Dmitry Deniskin
c080b39e47 s3: add support for IONOS Cloud Storage 2022-09-15 16:04:34 +01:00
Nick Craig-Wood
8504da496b Changelog updates from Version v1.59.2 2022-09-15 11:57:07 +01:00
Lesmiscore
67240bd541 sftp: fix directory creation races
If mkdir fails then before this change it would have thrown an
error.

After this change, if the error indicated that the directory
already exists then the error is not returned to the user.

This fixes a race condition when two rclone threads are trying to
create the same directory.
2022-09-14 16:45:35 +01:00
albertony
6ce0168ba5 docs: better alignment of icons
Fixes issue with spacing between icon and text in backend docs headers.

This reverts the changes from PR #5889 and #5701, which aligned menu/dropdown items when
icons have different sizes, and implements an alternative fix which gives slightly better
results, and also is more of a native Font Awesome solution:

Font Awesome icons are designed on grid and share a consistent height. But they vary in
width depending on how wide or narrow each symbol is. If you prefer to work with icons
that have a consistent width, adding fa-fw will render each icon using the same width.
2022-09-14 12:19:57 +01:00
albertony
67f5f04a77 build: fix lint option max-issues-per-linter 2022-09-14 12:11:54 +01:00
Øyvind Heddeland Instefjord
91f8894285 ftp: Add force_list_hidden option
Forces the use of `LIST -a` command
when listing a directory which should
list all hidden folders and files.
2022-09-14 12:10:58 +01:00
partev
655d63b4fd docs: fix a typo: aftering -> after 2022-09-14 11:14:32 +01:00
Nick Craig-Wood
d3d843a11d fs: warn the user when using a remote name without a colon
A very common mistake for new users of rclone is to use a remote name
without a colon. This can be on the command line or in the config when
setting up a crypt backend.

This change checks to see if the user uses a path which matches a
remote name and gives an NOTICE like this if they do

    NOTICE: "remote" refers to a local folder, use "remote:" to refer to your remote or "./remote" to hide this warning

See: https://forum.rclone.org/t/sync-to-onedrive-personal-lands-file-in-localfilesystem-but-not-in-onedrive/32956
2022-09-13 18:06:19 +01:00
Nick Craig-Wood
57803bee22 build: update tidy-beta to new layout 2022-09-12 20:32:17 +01:00
Nick Craig-Wood
be53dcc9c9 docs: add more information about --track-renames
See: https://forum.rclone.org/t/feature-question-how-does-rclone-track-renames-and-moves/32911/4
2022-09-12 11:54:35 +01:00
Nick Craig-Wood
bd787e8f45 filter: Fix incorrect filtering with UseFilter context flag and wrapping backends
In this commit

8d1fff9a82 local: obey file filters in listing to fix errors on excluded files

We started using filters in the local backend so the user could short
circuit troublesome files/directories at a low level.

However this caused a number of integration tests to fail. This turned
out to be in backends wrapping the local backend. For example the
combine backend test failed because it changes the paths passed to the
local backend so they no longer match the paths in the current filter.

To fix this, a new feature flag `FilterAware` was added and the
UseFilter context flag is only passed to backends which support it. As
the wrapping backends don't support the flag, this fixes the problems
in the integration tests.

In future the wrapping backends could modify the active filters to
match the path modifications and then they could set the FilterAware
flag.

See #6376
2022-09-05 16:19:50 +01:00
Nick Craig-Wood
3cb7734eac config: move locking to fix fatal error: concurrent map read and map write
Before this change we assumed that github.com/Unknwon/goconfig was
threadsafe as documented.

However it turns out it is not threadsafe and looking at the code it
appears that making it threadsafe might be quite hard.

So this change increases the lock coverage in configfile to cover the
goconfig uses also.

Fixes #6378
2022-09-05 12:11:06 +01:00
Nick Craig-Wood
d08ed7d1e9 ftp: add notes on how to avoid deadlocks with concurrency - fixes #6370 2022-09-05 12:11:06 +01:00
Nick Craig-Wood
f279e4ab01 Add Josh Soref to contributors 2022-09-05 12:10:59 +01:00
albertony
35349657cd docs/sftp: document use of chunk_size option in sftp remote paired with serve sftp
Related to 0008cb4934
2022-08-31 00:04:04 +02:00
Josh Soref
ce3b65e6dc all: fix spelling across the project
* abcdefghijklmnopqrstuvwxyz
* accounting
* additional
* allowed
* almost
* already
* appropriately
* arise
* bandwidth
* behave
* bidirectional
* brackets
* cached
* characters
* cloud
* committing
* concatenating
* configured
* constructs
* current
* cutoff
* deferred
* different
* directory
* disposition
* dropbox
* either way
* error
* excess
* experiments
* explicitly
* externally
* files
* github
* gzipped
* hierarchies
* huffman
* hyphen
* implicitly
* independent
* insensitive
* integrity
* libraries
* literally
* metadata
* mimics
* missing
* modification
* multipart
* multiple
* nightmare
* nonexistent
* number
* obscure
* ourselves
* overridden
* potatoes
* preexisting
* priority
* received
* remote
* replacement
* represents
* reproducibility
* response
* satisfies
* sensitive
* separately
* separator
* specifying
* string
* successful
* synchronization
* syncing
* šenfeld
* take
* temporarily
* testcontents
* that
* the
* themselves
* throttling
* timeout
* transaction
* transferred
* unnecessary
* using
* webbrowser
* which
* with
* workspace

Signed-off-by: Josh Soref <2119212+jsoref@users.noreply.github.com>
2022-08-30 11:16:26 +02:00
albertony
0008cb4934 docs: document that serve sftp uses chunk size 32 KiB
See #6404
2022-08-30 10:57:25 +02:00
albertony
2ea5b4f0b8 Add YFdyh000 to contributors 2022-08-30 10:26:03 +02:00
YFdyh000
b5818454f7 onedrive: cleanup brand name 2022-08-30 10:23:29 +02:00
albertony
555def2da7 build: add package comments to silence revive linter 2022-08-28 13:43:51 +02:00
albertony
02b7613104 docs/jottacloud: improve description of the standard authentication 2022-08-28 10:31:39 +02:00
albertony
b342c6cf9c docs/ftp: improve documentation of anonymous ftp - fixes #5650 2022-08-28 10:22:29 +02:00
albertony
8a6857c295 Add Simon Bos to contributors 2022-08-28 10:19:39 +02:00
albertony
21fd13f10d Add Ryan Morey to contributors 2022-08-28 10:18:33 +02:00
albertony
5cc7797f9e Add anonion to contributors 2022-08-28 10:18:14 +02:00
albertony
8bf2d6b6c8 Add João Henrique Franco to contributors 2022-08-28 10:16:37 +02:00
João Henrique Franco
85eb9776bd crypt: fix typo in comment
strign -> string
2022-08-22 10:43:54 +02:00
anonion
47539ec0e6 docs: fix minor typo in onedrive docs 2022-08-21 22:09:33 +02:00
Ryan Morey
58b327a9f6 docs: fix typo in filter pattern example 2022-08-18 21:14:26 +02:00
Simon Bos
1107da7247 dlna: specify SSDP interface names from command line 2022-08-13 12:06:03 +01:00
Nick Craig-Wood
8d1fff9a82 local: obey file filters in listing to fix errors on excluded files
Fixes #6376
2022-08-11 12:23:06 +01:00
Nick Craig-Wood
2c5923ab1a filter: make sure we check --files-from when looking for a single file 2022-08-11 12:20:17 +01:00
Nick Craig-Wood
1ad22b8881 gcs: add --gcs-endpoint flag and config parameter
See: https://forum.rclone.org/t/how-to-modify-google-cloud-storage-endpoint-uri/32342
2022-08-09 17:33:21 +01:00
Nick Craig-Wood
0501773db1 azureblob,b2,s3: fix chunksize calculations producing too many parts
Before this fix, the chunksize calculator was using the previous size
of the object, not the new size of the object to calculate the chunk
sizes.

This meant that uploading a replacement object which needed a new
chunk size would fail, using too many parts.

This fix fixes the calculator to take the size explicitly.
2022-08-09 12:57:38 +01:00
Nick Craig-Wood
cb8842941b Add Mark Trolley to contributors 2022-08-09 12:57:38 +01:00
Mark Trolley
5439a2c5c6 docs: fix script installation command on downloads page
Script installation instructions in `downloads.md` differ from those in
`install.md` and fail on MacOS.
2022-08-09 11:58:04 +01:00
Nick Craig-Wood
d347ac0154 local: disable xattr support if the filesystems indicates it is not supported
Before this change, if rclone was run with `-M` on a filesystem
without xattr support, it would error out.

This patch makes rclone detect the not supported errors and disable
xattrs from then on. It prints one ERROR level message about this.

See: https://forum.rclone.org/t/metadata-update-local-s3/32277/7
2022-08-09 09:27:56 +01:00
Nick Craig-Wood
9f33eb2e65 Changelog updates from Version 1.59.1 2022-08-08 19:01:11 +01:00
Nick Craig-Wood
fe801b8fef Add Joram Schrijver to contributors 2022-08-08 19:01:11 +01:00
albertony
6b158f33a3 serve sftp: document legacy code for checksum detection
See #6351
2022-08-06 20:46:38 +02:00
Joram Schrijver
5a6d233924 dlna: fix SOAP action header parsing - fixes #6354
Changes in github.com/anacrolix/dms changed upnp.ServiceURN to include a
namespace identifier. This identifier was previously hardcoded, but is
now parsed out of the URN. The old SOAP action header parsing logic was
duplicated in rclone and did not handle this field. Resulting responses
included a URN with an empty namespace identifier, breaking clients.
2022-08-06 17:23:37 +01:00
699 changed files with 60004 additions and 17487 deletions

4
.github/FUNDING.yml vendored
View File

@@ -1,4 +0,0 @@
github: [ncw]
patreon: njcw
liberapay: ncw
custom: ["https://rclone.org/donate/"]

6
.github/dependabot.yml vendored Normal file
View File

@@ -0,0 +1,6 @@
version: 2
updates:
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: "daily"

View File

@@ -8,29 +8,31 @@ name: build
on:
push:
branches:
- '*'
- '**'
tags:
- '*'
- '**'
pull_request:
workflow_dispatch:
inputs:
manual:
description: Manual run (bypass default conditions)
type: boolean
required: true
default: true
jobs:
build:
if: ${{ github.repository == 'rclone/rclone' || github.event.inputs.manual }}
if: ${{ github.event.inputs.manual == 'true' || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name)) }}
timeout-minutes: 60
strategy:
fail-fast: false
matrix:
job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.17', 'go1.18']
job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.19', 'go1.20']
include:
- job_name: linux
os: ubuntu-latest
go: '1.19.x'
go: '1.21.0-rc.3'
gotags: cmount
build_flags: '-include "^linux/"'
check: true
@@ -41,14 +43,14 @@ jobs:
- job_name: linux_386
os: ubuntu-latest
go: '1.19.x'
go: '1.21.0-rc.3'
goarch: 386
gotags: cmount
quicktest: true
- job_name: mac_amd64
os: macos-11
go: '1.19.x'
go: '1.21.0-rc.3'
gotags: 'cmount'
build_flags: '-include "^darwin/amd64" -cgo'
quicktest: true
@@ -57,14 +59,14 @@ jobs:
- job_name: mac_arm64
os: macos-11
go: '1.19.x'
go: '1.21.0-rc.3'
gotags: 'cmount'
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
deploy: true
- job_name: windows
os: windows-latest
go: '1.19.x'
go: '1.21.0-rc.3'
gotags: cmount
cgo: '0'
build_flags: '-include "^windows/"'
@@ -74,20 +76,20 @@ jobs:
- job_name: other_os
os: ubuntu-latest
go: '1.19.x'
go: '1.21.0-rc.3'
build_flags: '-exclude "^(windows/|darwin/|linux/)"'
compile_all: true
deploy: true
- job_name: go1.17
- job_name: go1.19
os: ubuntu-latest
go: '1.17.x'
go: '1.19'
quicktest: true
racequicktest: true
- job_name: go1.18
- job_name: go1.20
os: ubuntu-latest
go: '1.18.x'
go: '1.20'
quicktest: true
racequicktest: true
@@ -97,14 +99,13 @@ jobs:
steps:
- name: Checkout
uses: actions/checkout@v2
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Install Go
uses: actions/setup-go@v2
uses: actions/setup-go@v4
with:
stable: 'false'
go-version: ${{ matrix.go }}
check-latest: true
@@ -123,12 +124,17 @@ jobs:
sudo modprobe fuse
sudo chmod 666 /dev/fuse
sudo chown root:$USER /etc/fuse.conf
sudo apt-get install fuse libfuse-dev rpm pkg-config
sudo apt-get install fuse3 libfuse-dev rpm pkg-config
if: matrix.os == 'ubuntu-latest'
- name: Install Libraries on macOS
shell: bash
run: |
# https://github.com/Homebrew/brew/issues/15621#issuecomment-1619266788
# https://github.com/orgs/Homebrew/discussions/4612#discussioncomment-6319008
unset HOMEBREW_NO_INSTALL_FROM_API
brew untap --force homebrew/core
brew untap --force homebrew/cask
brew update
brew install --cask macfuse
if: matrix.os == 'macos-11'
@@ -162,7 +168,7 @@ jobs:
env
- name: Go module cache
uses: actions/cache@v2
uses: actions/cache@v3
with:
path: ~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
@@ -216,17 +222,17 @@ jobs:
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
# working-directory: '$(modulePath)'
# Deploy binaries if enabled in config && not a PR && not a fork
if: matrix.deploy && github.head_ref == '' && github.repository == 'rclone/rclone'
if: env.RCLONE_CONFIG_PASS != '' && matrix.deploy && github.head_ref == '' && github.repository == 'rclone/rclone'
lint:
if: ${{ github.repository == 'rclone/rclone' || github.event.inputs.manual }}
if: ${{ github.event.inputs.manual == 'true' || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name)) }}
timeout-minutes: 30
name: "lint"
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
uses: actions/checkout@v3
- name: Code quality test
uses: golangci/golangci-lint-action@v3
@@ -234,26 +240,39 @@ jobs:
# Optional: version of golangci-lint to use in form of v1.2 or v1.2.3 or `latest` to use the latest version
version: latest
# Run govulncheck on the latest go version, the one we build binaries with
- name: Install Go
uses: actions/setup-go@v4
with:
go-version: '1.21.0-rc.3'
check-latest: true
- name: Install govulncheck
run: go install golang.org/x/vuln/cmd/govulncheck@latest
- name: Scan for vulnerabilities
run: govulncheck ./...
android:
if: ${{ github.repository == 'rclone/rclone' || github.event.inputs.manual }}
if: ${{ github.event.inputs.manual == 'true' || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name)) }}
timeout-minutes: 30
name: "android-all"
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
uses: actions/checkout@v3
with:
fetch-depth: 0
# Upgrade together with NDK version
- name: Set up Go
uses: actions/setup-go@v1
uses: actions/setup-go@v4
with:
go-version: 1.19.x
go-version: '1.21.0-rc.3'
- name: Go module cache
uses: actions/cache@v2
uses: actions/cache@v3
with:
path: ~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
@@ -338,4 +357,4 @@ jobs:
env:
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
# Upload artifacts if not a PR && not a fork
if: github.head_ref == '' && github.repository == 'rclone/rclone'
if: env.RCLONE_CONFIG_PASS != '' && github.head_ref == '' && github.repository == 'rclone/rclone'

View File

@@ -0,0 +1,61 @@
name: Docker beta build
on:
push:
branches:
- master
jobs:
build:
if: github.repository == 'rclone/rclone'
runs-on: ubuntu-latest
name: Build image job
steps:
- name: Checkout master
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Extract metadata (tags, labels) for Docker
id: meta
uses: docker/metadata-action@v4
with:
images: ghcr.io/${{ github.repository }}
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to GitHub Container Registry
uses: docker/login-action@v2
with:
registry: ghcr.io
# This is the user that triggered the Workflow. In this case, it will
# either be the user whom created the Release or manually triggered
# the workflow_dispatch.
username: ${{ github.actor }}
# `secrets.GITHUB_TOKEN` is a secret that's automatically generated by
# GitHub Actions at the start of a workflow run to identify the job.
# This is used to authenticate against GitHub Container Registry.
# See https://docs.github.com/en/actions/security-guides/automatic-token-authentication#about-the-github_token-secret
# for more detailed information.
password: ${{ secrets.GITHUB_TOKEN }}
- name: Build and publish image
uses: docker/build-push-action@v4
with:
file: Dockerfile
context: .
push: true # push the image to ghcr
tags: |
ghcr.io/rclone/rclone:beta
rclone/rclone:beta
labels: ${{ steps.meta.outputs.labels }}
platforms: linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6
cache-from: type=gha
cache-to: type=gha,mode=max
provenance: false
# Eventually cache will need to be cleared if builds more frequent than once a week
# https://github.com/docker/build-push-action/issues/252

View File

@@ -1,26 +0,0 @@
name: Docker beta build
on:
push:
branches:
- master
jobs:
build:
if: github.repository == 'rclone/rclone'
runs-on: ubuntu-latest
name: Build image job
steps:
- name: Checkout master
uses: actions/checkout@v2
with:
fetch-depth: 0
- name: Build and publish image
uses: ilteoood/docker_buildx@1.1.0
with:
tag: beta
imageName: rclone/rclone
platform: linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6
publish: true
dockerHubUser: ${{ secrets.DOCKER_HUB_USER }}
dockerHubPassword: ${{ secrets.DOCKER_HUB_PASSWORD }}

View File

@@ -11,7 +11,7 @@ jobs:
name: Build image job
steps:
- name: Checkout master
uses: actions/checkout@v2
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Get actual patch version
@@ -40,7 +40,7 @@ jobs:
name: Build docker plugin job
steps:
- name: Checkout master
uses: actions/checkout@v2
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Build and publish docker plugin

14
.github/workflows/winget.yml vendored Normal file
View File

@@ -0,0 +1,14 @@
name: Publish to Winget
on:
release:
types: [released]
jobs:
publish:
runs-on: windows-latest # Action can only run on Windows
steps:
- uses: vedantmgoyal2009/winget-releaser@v2
with:
identifier: Rclone.Rclone
installers-regex: '-windows-\w+\.zip$'
token: ${{ secrets.WINGET_TOKEN }}

View File

@@ -2,15 +2,17 @@
linters:
enable:
- deadcode
- errcheck
- goimports
- revive
- ineffassign
- structcheck
- varcheck
- govet
- unconvert
- staticcheck
- gosimple
- stylecheck
- unused
- misspell
#- prealloc
#- maligned
disable-all: true
@@ -20,11 +22,35 @@ issues:
exclude-use-default: false
# Maximum issues count per one linter. Set to 0 to disable. Default is 50.
max-per-linter: 0
max-issues-per-linter: 0
# Maximum count of issues with the same text. Set to 0 to disable. Default is 3.
max-same-issues: 0
exclude-rules:
- linters:
- staticcheck
text: 'SA1019: "github.com/rclone/rclone/cmd/serve/httplib" is deprecated'
run:
# timeout for analysis, e.g. 30s, 5m, default is 1m
timeout: 10m
linters-settings:
revive:
rules:
- name: unreachable-code
disabled: true
- name: unused-parameter
disabled: true
- name: empty-block
disabled: true
- name: redefines-builtin-id
disabled: true
- name: superfluous-else
disabled: true
stylecheck:
# Only enable the checks performed by the staticcheck stand-alone tool,
# as documented here: https://staticcheck.io/docs/configuration/options/#checks
checks: ["all", "-ST1000", "-ST1003", "-ST1016", "-ST1020", "-ST1021", "-ST1022", "-ST1023"]

View File

@@ -77,7 +77,7 @@ Make sure you
* Add [documentation](#writing-documentation) for a new feature.
* [Commit your changes](#committing-your-changes) using the [message guideline](#commit-messages).
When you are done with that push your changes to Github:
When you are done with that push your changes to GitHub:
git push -u origin my-new-feature
@@ -88,7 +88,7 @@ Your changes will then get reviewed and you might get asked to fix some stuff. I
You may sometimes be asked to [base your changes on the latest master](#basing-your-changes-on-the-latest-master) or [squash your commits](#squashing-your-commits).
## Using Git and Github ##
## Using Git and GitHub ##
### Committing your changes ###
@@ -419,7 +419,7 @@ remote or an fs.
Research
* Look at the interfaces defined in `fs/fs.go`
* Look at the interfaces defined in `fs/types.go`
* Study one or more of the existing remotes
Getting going

View File

@@ -11,7 +11,7 @@ RUN ./rclone version
# Begin final image
FROM alpine:latest
RUN apk --no-cache add ca-certificates fuse tzdata && \
RUN apk --no-cache add ca-certificates fuse3 tzdata && \
echo "user_allow_other" >> /etc/fuse.conf
COPY --from=builder /go/src/github.com/rclone/rclone/rclone /usr/local/bin/

View File

@@ -16,6 +16,9 @@ Current active maintainers of rclone are:
| Max Sum | @Max-Sum | union backend |
| Fred | @creativeprojects | seafile backend |
| Caleb Case | @calebcase | storj backend |
| wiserain | @wiserain | pikpak backend |
| albertony | @albertony | |
| Chun-Hung Tseng | @henrybear327 | Proton Drive Backend |
**This is a work in progress Draft**

7712
MANUAL.html generated

File diff suppressed because it is too large Load Diff

8168
MANUAL.md generated

File diff suppressed because it is too large Load Diff

8558
MANUAL.txt generated

File diff suppressed because it is too large Load Diff

View File

@@ -81,6 +81,9 @@ quicktest:
racequicktest:
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) -cpu=2 -race ./...
compiletest:
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) -run XXX ./...
# Do source code quality checks
check: rclone
@echo "-- START CODE QUALITY REPORT -------------------------------"
@@ -93,7 +96,7 @@ build_dep:
# Get the release dependencies we only install on linux
release_dep_linux:
go run bin/get-github-release.go -extract nfpm goreleaser/nfpm 'nfpm_.*_Linux_x86_64\.tar\.gz'
go install github.com/goreleaser/nfpm/v2/cmd/nfpm@latest
# Get the release dependencies we only install on Windows
release_dep_windows:

View File

@@ -25,18 +25,19 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
* Alibaba Cloud (Aliyun) Object Storage System (OSS) [:page_facing_up:](https://rclone.org/s3/#alibaba-oss)
* Amazon Drive [:page_facing_up:](https://rclone.org/amazonclouddrive/) ([See note](https://rclone.org/amazonclouddrive/#status))
* Amazon S3 [:page_facing_up:](https://rclone.org/s3/)
* ArvanCloud Object Storage (AOS) [:page_facing_up:](https://rclone.org/s3/#arvan-cloud-object-storage-aos)
* Backblaze B2 [:page_facing_up:](https://rclone.org/b2/)
* Box [:page_facing_up:](https://rclone.org/box/)
* Ceph [:page_facing_up:](https://rclone.org/s3/#ceph)
* China Mobile Ecloud Elastic Object Storage (EOS) [:page_facing_up:](https://rclone.org/s3/#china-mobile-ecloud-eos)
* Cloudflare R2 [:page_facing_up:](https://rclone.org/s3/#cloudflare-r2)
* Arvan Cloud Object Storage (AOS) [:page_facing_up:](https://rclone.org/s3/#arvan-cloud-object-storage-aos)
* Citrix ShareFile [:page_facing_up:](https://rclone.org/sharefile/)
* DigitalOcean Spaces [:page_facing_up:](https://rclone.org/s3/#digitalocean-spaces)
* Digi Storage [:page_facing_up:](https://rclone.org/koofr/#digi-storage)
* Dreamhost [:page_facing_up:](https://rclone.org/s3/#dreamhost)
* Dropbox [:page_facing_up:](https://rclone.org/dropbox/)
* Enterprise File Fabric [:page_facing_up:](https://rclone.org/filefabric/)
* Fastmail Files [:page_facing_up:](https://rclone.org/webdav/#fastmail-files)
* FTP [:page_facing_up:](https://rclone.org/ftp/)
* Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/)
* Google Drive [:page_facing_up:](https://rclone.org/drive/)
@@ -45,11 +46,13 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
* HiDrive [:page_facing_up:](https://rclone.org/hidrive/)
* HTTP [:page_facing_up:](https://rclone.org/http/)
* Huawei Cloud Object Storage Service(OBS) [:page_facing_up:](https://rclone.org/s3/#huawei-obs)
* Hubic [:page_facing_up:](https://rclone.org/hubic/)
* Internet Archive [:page_facing_up:](https://rclone.org/internetarchive/)
* Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
* IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3)
* IONOS Cloud [:page_facing_up:](https://rclone.org/s3/#ionos)
* Koofr [:page_facing_up:](https://rclone.org/koofr/)
* Leviia Object Storage [:page_facing_up:](https://rclone.org/s3/#leviia)
* Liara Object Storage [:page_facing_up:](https://rclone.org/s3/#liara-object-storage)
* Mail.ru Cloud [:page_facing_up:](https://rclone.org/mailru/)
* Memset Memstore [:page_facing_up:](https://rclone.org/swift/)
* Mega [:page_facing_up:](https://rclone.org/mega/)
@@ -59,23 +62,30 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
* Minio [:page_facing_up:](https://rclone.org/s3/#minio)
* Nextcloud [:page_facing_up:](https://rclone.org/webdav/#nextcloud)
* OVH [:page_facing_up:](https://rclone.org/swift/)
* Blomp Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
* OpenDrive [:page_facing_up:](https://rclone.org/opendrive/)
* OpenStack Swift [:page_facing_up:](https://rclone.org/swift/)
* Oracle Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
* Oracle Object Storage [:page_facing_up:](https://rclone.org/oracleobjectstorage/)
* ownCloud [:page_facing_up:](https://rclone.org/webdav/#owncloud)
* pCloud [:page_facing_up:](https://rclone.org/pcloud/)
* Petabox [:page_facing_up:](https://rclone.org/s3/#petabox)
* PikPak [:page_facing_up:](https://rclone.org/pikpak/)
* premiumize.me [:page_facing_up:](https://rclone.org/premiumizeme/)
* put.io [:page_facing_up:](https://rclone.org/putio/)
* QingStor [:page_facing_up:](https://rclone.org/qingstor/)
* Qiniu Cloud Object Storage (Kodo) [:page_facing_up:](https://rclone.org/s3/#qiniu)
* Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
* RackCorp Object Storage [:page_facing_up:](https://rclone.org/s3/#RackCorp)
* Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)
* Seafile [:page_facing_up:](https://rclone.org/seafile/)
* SeaweedFS [:page_facing_up:](https://rclone.org/s3/#seaweedfs)
* SFTP [:page_facing_up:](https://rclone.org/sftp/)
* SMB / CIFS [:page_facing_up:](https://rclone.org/smb/)
* StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
* Storj [:page_facing_up:](https://rclone.org/storj/)
* SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)
* Synology C2 Object Storage [:page_facing_up:](https://rclone.org/s3/#synology-c2)
* Tencent Cloud Object Storage (COS) [:page_facing_up:](https://rclone.org/s3/#tencent-cos)
* Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi)
* WebDAV [:page_facing_up:](https://rclone.org/webdav/)

View File

@@ -10,7 +10,7 @@ This file describes how to make the various kinds of releases
## Making a release
* git checkout master # see below for stable branch
* git pull
* git pull # IMPORTANT
* git status - make sure everything is checked in
* Check GitHub actions build for master is Green
* make test # see integration test server or run locally
@@ -21,6 +21,7 @@ This file describes how to make the various kinds of releases
* git status - to check for new man pages - git add them
* git commit -a -v -m "Version v1.XX.0"
* make retag
* git push origin # without --follow-tags so it doesn't push the tag if it fails
* git push --follow-tags origin
* # Wait for the GitHub builds to complete then...
* make fetch_binaries
@@ -53,6 +54,14 @@ doing that so it may be necessary to roll back dependencies to the
version specified by `make updatedirect` in order to get rclone to
build.
## Tidy beta
At some point after the release run
bin/tidy-beta v1.55
where the version number is that of a couple ago to remove old beta binaries.
## Making a point release
If rclone needs a point release due to some horrendous bug:
@@ -66,8 +75,7 @@ Set vars
First make the release branch. If this is a second point release then
this will be done already.
* git branch ${BASE_TAG} ${BASE_TAG}-stable
* git co ${BASE_TAG}-stable
* git co -b ${BASE_TAG}-stable ${BASE_TAG}.0
* make startstable
Now

View File

@@ -1 +1 @@
v1.60.0
v1.64.0

View File

@@ -1,3 +1,4 @@
// Package alias implements a virtual provider to rename existing remotes.
package alias
import (

View File

@@ -1,3 +1,4 @@
// Package all imports all the backends
package all
import (
@@ -23,7 +24,6 @@ import (
_ "github.com/rclone/rclone/backend/hdfs"
_ "github.com/rclone/rclone/backend/hidrive"
_ "github.com/rclone/rclone/backend/http"
_ "github.com/rclone/rclone/backend/hubic"
_ "github.com/rclone/rclone/backend/internetarchive"
_ "github.com/rclone/rclone/backend/jottacloud"
_ "github.com/rclone/rclone/backend/koofr"
@@ -34,8 +34,11 @@ import (
_ "github.com/rclone/rclone/backend/netstorage"
_ "github.com/rclone/rclone/backend/onedrive"
_ "github.com/rclone/rclone/backend/opendrive"
_ "github.com/rclone/rclone/backend/oracleobjectstorage"
_ "github.com/rclone/rclone/backend/pcloud"
_ "github.com/rclone/rclone/backend/pikpak"
_ "github.com/rclone/rclone/backend/premiumizeme"
_ "github.com/rclone/rclone/backend/protondrive"
_ "github.com/rclone/rclone/backend/putio"
_ "github.com/rclone/rclone/backend/qingstor"
_ "github.com/rclone/rclone/backend/s3"
@@ -43,6 +46,7 @@ import (
_ "github.com/rclone/rclone/backend/sftp"
_ "github.com/rclone/rclone/backend/sharefile"
_ "github.com/rclone/rclone/backend/sia"
_ "github.com/rclone/rclone/backend/smb"
_ "github.com/rclone/rclone/backend/storj"
_ "github.com/rclone/rclone/backend/sugarsync"
_ "github.com/rclone/rclone/backend/swift"

File diff suppressed because it is too large Load Diff

View File

@@ -6,10 +6,10 @@
package azureblob
import (
"context"
"testing"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
"github.com/stretchr/testify/assert"
)
@@ -17,10 +17,31 @@ import (
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestAzureBlob:",
NilObject: (*Object)(nil),
TiersToTest: []string{"Hot", "Cool"},
ChunkedUpload: fstests.ChunkedUploadConfig{},
RemoteName: "TestAzureBlob:",
NilObject: (*Object)(nil),
TiersToTest: []string{"Hot", "Cool"},
ChunkedUpload: fstests.ChunkedUploadConfig{
MinChunkSize: defaultChunkSize,
},
})
}
// TestIntegration2 runs integration tests against the remote
func TestIntegration2(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
name := "TestAzureBlob:"
fstests.Run(t, &fstests.Opt{
RemoteName: name,
NilObject: (*Object)(nil),
TiersToTest: []string{"Hot", "Cool"},
ChunkedUpload: fstests.ChunkedUploadConfig{
MinChunkSize: defaultChunkSize,
},
ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "directory_markers", Value: "true"},
},
})
}
@@ -32,36 +53,6 @@ var (
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
)
// TestServicePrincipalFileSuccess checks that, given a proper JSON file, we can create a token.
func TestServicePrincipalFileSuccess(t *testing.T) {
ctx := context.TODO()
credentials := `
{
"appId": "my application (client) ID",
"password": "my secret",
"tenant": "my active directory tenant ID"
}
`
tokenRefresher, err := newServicePrincipalTokenRefresher(ctx, []byte(credentials))
if assert.NoError(t, err) {
assert.NotNil(t, tokenRefresher)
}
}
// TestServicePrincipalFileFailure checks that, given a JSON file with a missing secret, it returns an error.
func TestServicePrincipalFileFailure(t *testing.T) {
ctx := context.TODO()
credentials := `
{
"appId": "my application (client) ID",
"tenant": "my active directory tenant ID"
}
`
_, err := newServicePrincipalTokenRefresher(ctx, []byte(credentials))
assert.Error(t, err)
assert.EqualError(t, err, "error creating service principal token: parameter 'secret' cannot be empty")
}
func TestValidateAccessTier(t *testing.T) {
tests := map[string]struct {
accessTier string

View File

@@ -1,137 +0,0 @@
//go:build !plan9 && !solaris && !js
// +build !plan9,!solaris,!js
package azureblob
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"github.com/Azure/go-autorest/autorest/adal"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/fshttp"
)
const (
azureResource = "https://storage.azure.com"
imdsAPIVersion = "2018-02-01"
msiEndpointDefault = "http://169.254.169.254/metadata/identity/oauth2/token"
)
// This custom type is used to add the port the test server has bound to
// to the request context.
type testPortKey string
type msiIdentifierType int
const (
msiClientID msiIdentifierType = iota
msiObjectID
msiResourceID
)
type userMSI struct {
Type msiIdentifierType
Value string
}
type httpError struct {
Response *http.Response
}
func (e httpError) Error() string {
return fmt.Sprintf("HTTP error %v (%v)", e.Response.StatusCode, e.Response.Status)
}
// GetMSIToken attempts to obtain an MSI token from the Azure Instance
// Metadata Service.
func GetMSIToken(ctx context.Context, identity *userMSI) (adal.Token, error) {
// Attempt to get an MSI token; silently continue if unsuccessful.
// This code has been lovingly stolen from azcopy's OAuthTokenManager.
result := adal.Token{}
req, err := http.NewRequestWithContext(ctx, "GET", msiEndpointDefault, nil)
if err != nil {
fs.Debugf(nil, "Failed to create request: %v", err)
return result, err
}
params := req.URL.Query()
params.Set("resource", azureResource)
params.Set("api-version", imdsAPIVersion)
// Specify user-assigned identity if requested.
if identity != nil {
switch identity.Type {
case msiClientID:
params.Set("client_id", identity.Value)
case msiObjectID:
params.Set("object_id", identity.Value)
case msiResourceID:
params.Set("mi_res_id", identity.Value)
default:
// If this happens, the calling function and this one don't agree on
// what valid ID types exist.
return result, fmt.Errorf("unknown MSI identity type specified")
}
}
req.URL.RawQuery = params.Encode()
// The Metadata header is required by all calls to IMDS.
req.Header.Set("Metadata", "true")
// If this function is run in a test, query the test server instead of IMDS.
testPort, isTest := ctx.Value(testPortKey("testPort")).(int)
if isTest {
req.URL.Host = fmt.Sprintf("localhost:%d", testPort)
req.Host = req.URL.Host
}
// Send request
httpClient := fshttp.NewClient(ctx)
resp, err := httpClient.Do(req)
if err != nil {
return result, fmt.Errorf("MSI is not enabled on this VM: %w", err)
}
defer func() { // resp and Body should not be nil
_, err = io.Copy(ioutil.Discard, resp.Body)
if err != nil {
fs.Debugf(nil, "Unable to drain IMDS response: %v", err)
}
err = resp.Body.Close()
if err != nil {
fs.Debugf(nil, "Unable to close IMDS response: %v", err)
}
}()
// Check if the status code indicates success
// The request returns 200 currently, add 201 and 202 as well for possible extension.
switch resp.StatusCode {
case 200, 201, 202:
break
default:
body, _ := ioutil.ReadAll(resp.Body)
fs.Errorf(nil, "Couldn't obtain OAuth token from IMDS; server returned status code %d and body: %v", resp.StatusCode, string(body))
return result, httpError{Response: resp}
}
b, err := ioutil.ReadAll(resp.Body)
if err != nil {
return result, fmt.Errorf("couldn't read IMDS response: %w", err)
}
// Remove BOM, if any. azcopy does this so I'm following along.
b = bytes.TrimPrefix(b, []byte("\xef\xbb\xbf"))
// This would be a good place to persist the token if a large number of rclone
// invocations are being made in a short amount of time. If the token is
// persisted, the azureblob code will need to check for expiry before every
// storage API call.
err = json.Unmarshal(b, &result)
if err != nil {
return result, fmt.Errorf("couldn't unmarshal IMDS response: %w", err)
}
return result, nil
}

View File

@@ -1,118 +0,0 @@
//go:build !plan9 && !solaris && !js
// +build !plan9,!solaris,!js
package azureblob
import (
"context"
"encoding/json"
"net/http"
"net/http/httptest"
"strconv"
"strings"
"testing"
"github.com/Azure/go-autorest/autorest/adal"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func handler(t *testing.T, actual *map[string]string) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
err := r.ParseForm()
require.NoError(t, err)
parameters := r.URL.Query()
(*actual)["path"] = r.URL.Path
(*actual)["Metadata"] = r.Header.Get("Metadata")
(*actual)["method"] = r.Method
for paramName := range parameters {
(*actual)[paramName] = parameters.Get(paramName)
}
// Make response.
response := adal.Token{}
responseBytes, err := json.Marshal(response)
require.NoError(t, err)
_, err = w.Write(responseBytes)
require.NoError(t, err)
}
}
func TestManagedIdentity(t *testing.T) {
// test user-assigned identity specifiers to use
testMSIClientID := "d859b29f-5c9c-42f8-a327-ec1bc6408d79"
testMSIObjectID := "9ffeb650-3ca0-4278-962b-5a38d520591a"
testMSIResourceID := "/subscriptions/fe714c49-b8a4-4d49-9388-96a20daa318f/resourceGroups/somerg/providers/Microsoft.ManagedIdentity/userAssignedIdentities/someidentity"
tests := []struct {
identity *userMSI
identityParameterName string
expectedAbsent []string
}{
{&userMSI{msiClientID, testMSIClientID}, "client_id", []string{"object_id", "mi_res_id"}},
{&userMSI{msiObjectID, testMSIObjectID}, "object_id", []string{"client_id", "mi_res_id"}},
{&userMSI{msiResourceID, testMSIResourceID}, "mi_res_id", []string{"object_id", "client_id"}},
{nil, "(default)", []string{"object_id", "client_id", "mi_res_id"}},
}
alwaysExpected := map[string]string{
"path": "/metadata/identity/oauth2/token",
"resource": "https://storage.azure.com",
"Metadata": "true",
"api-version": "2018-02-01",
"method": "GET",
}
for _, test := range tests {
actual := make(map[string]string, 10)
testServer := httptest.NewServer(handler(t, &actual))
defer testServer.Close()
testServerPort, err := strconv.Atoi(strings.Split(testServer.URL, ":")[2])
require.NoError(t, err)
ctx := context.WithValue(context.TODO(), testPortKey("testPort"), testServerPort)
_, err = GetMSIToken(ctx, test.identity)
require.NoError(t, err)
// Validate expected query parameters present
expected := make(map[string]string)
for k, v := range alwaysExpected {
expected[k] = v
}
if test.identity != nil {
expected[test.identityParameterName] = test.identity.Value
}
for key := range expected {
value, exists := actual[key]
if assert.Truef(t, exists, "test of %s: query parameter %s was not passed",
test.identityParameterName, key) {
assert.Equalf(t, expected[key], value,
"test of %s: parameter %s has incorrect value", test.identityParameterName, key)
}
}
// Validate unexpected query parameters absent
for _, key := range test.expectedAbsent {
_, exists := actual[key]
assert.Falsef(t, exists, "query parameter %s was unexpectedly passed")
}
}
}
func errorHandler(resultCode int) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
http.Error(w, "Test error generated", resultCode)
}
}
func TestIMDSErrors(t *testing.T) {
errorCodes := []int{404, 429, 500}
for _, code := range errorCodes {
testServer := httptest.NewServer(errorHandler(code))
defer testServer.Close()
testServerPort, err := strconv.Atoi(strings.Split(testServer.URL, ":")[2])
require.NoError(t, err)
ctx := context.WithValue(context.TODO(), testPortKey("testPort"), testServerPort)
_, err = GetMSIToken(ctx, nil)
require.Error(t, err)
httpErr, ok := err.(httpError)
require.Truef(t, ok, "HTTP error %d did not result in an httpError object", code)
assert.Equalf(t, httpErr.Response.StatusCode, code, "desired error %d but didn't get it", code)
}
}

View File

@@ -1,3 +1,4 @@
// Package api provides types used by the Backblaze B2 API.
package api
import (
@@ -238,7 +239,7 @@ type GetFileInfoRequest struct {
// If the original source of the file being uploaded has a last
// modified time concept, Backblaze recommends using
// src_last_modified_millis as the name, and a string holding the base
// 10 number number of milliseconds since midnight, January 1, 1970
// 10 number of milliseconds since midnight, January 1, 1970
// UTC. This fits in a 64 bit integer such as the type "long" in the
// programming language Java. It is intended to be compatible with
// Java's time long. For example, it can be passed directly into the

View File

@@ -1,4 +1,4 @@
// Package b2 provides an interface to the Backblaze B2 object storage system
// Package b2 provides an interface to the Backblaze B2 object storage system.
package b2
// FIXME should we remove sha1 checks from here as rclone now supports
@@ -75,13 +75,15 @@ func init() {
Description: "Backblaze B2",
NewFs: NewFs,
Options: []fs.Option{{
Name: "account",
Help: "Account ID or Application Key ID.",
Required: true,
Name: "account",
Help: "Account ID or Application Key ID.",
Required: true,
Sensitive: true,
}, {
Name: "key",
Help: "Application Key.",
Required: true,
Name: "key",
Help: "Application Key.",
Required: true,
Sensitive: true,
}, {
Name: "endpoint",
Help: "Endpoint for the service.\n\nLeave blank normally.",
@@ -1221,7 +1223,7 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool) error {
fs.Errorf(object.Name, "Can't create object %v", err)
continue
}
tr := accounting.Stats(ctx).NewCheckingTransfer(oi)
tr := accounting.Stats(ctx).NewCheckingTransfer(oi, "deleting")
err = f.deleteByID(ctx, object.ID, object.Name)
checkErr(err)
tr.Done(ctx, err)
@@ -1235,7 +1237,7 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool) error {
if err != nil {
fs.Errorf(object, "Can't create object %+v", err)
}
tr := accounting.Stats(ctx).NewCheckingTransfer(oi)
tr := accounting.Stats(ctx).NewCheckingTransfer(oi, "checking")
if oldOnly && last != remote {
// Check current version of the file
if object.Action == "hide" {

View File

@@ -14,6 +14,7 @@ import (
"io"
"strings"
"sync"
"time"
"github.com/rclone/rclone/backend/b2/api"
"github.com/rclone/rclone/fs"
@@ -21,6 +22,7 @@ import (
"github.com/rclone/rclone/fs/chunksize"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/atexit"
"github.com/rclone/rclone/lib/pool"
"github.com/rclone/rclone/lib/rest"
"golang.org/x/sync/errgroup"
)
@@ -97,7 +99,7 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
if size == -1 {
fs.Debugf(o, "Streaming upload with --b2-chunk-size %s allows uploads of up to %s and will fail only when that limit is reached.", f.opt.ChunkSize, maxParts*f.opt.ChunkSize)
} else {
chunkSize = chunksize.Calculator(src, maxParts, defaultChunkSize)
chunkSize = chunksize.Calculator(o, size, maxParts, defaultChunkSize)
parts = size / int64(chunkSize)
if size%int64(chunkSize) != 0 {
parts++
@@ -428,18 +430,47 @@ func (up *largeUpload) Upload(ctx context.Context) (err error) {
defer atexit.OnError(&err, func() { _ = up.cancel(ctx) })()
fs.Debugf(up.o, "Starting %s of large file in %d chunks (id %q)", up.what, up.parts, up.id)
var (
g, gCtx = errgroup.WithContext(ctx)
remaining = up.size
g, gCtx = errgroup.WithContext(ctx)
remaining = up.size
uploadPool *pool.Pool
ci = fs.GetConfig(ctx)
)
// If using large chunk size then make a temporary pool
if up.chunkSize <= int64(up.f.opt.ChunkSize) {
uploadPool = up.f.pool
} else {
uploadPool = pool.New(
time.Duration(up.f.opt.MemoryPoolFlushTime),
int(up.chunkSize),
ci.Transfers,
up.f.opt.MemoryPoolUseMmap,
)
defer uploadPool.Flush()
}
// Get an upload token and a buffer
getBuf := func() (buf []byte) {
up.f.getBuf(true)
if !up.doCopy {
buf = uploadPool.Get()
}
return buf
}
// Put an upload token and a buffer
putBuf := func(buf []byte) {
if !up.doCopy {
uploadPool.Put(buf)
}
up.f.putBuf(nil, true)
}
g.Go(func() error {
for part := int64(1); part <= up.parts; part++ {
// Get a block of memory from the pool and token which limits concurrency.
buf := up.f.getBuf(up.doCopy)
buf := getBuf()
// Fail fast, in case an errgroup managed function returns an error
// gCtx is cancelled. There is no point in uploading all the other parts.
if gCtx.Err() != nil {
up.f.putBuf(buf, up.doCopy)
putBuf(buf)
return nil
}
@@ -453,14 +484,14 @@ func (up *largeUpload) Upload(ctx context.Context) (err error) {
buf = buf[:reqSize]
_, err = io.ReadFull(up.in, buf)
if err != nil {
up.f.putBuf(buf, up.doCopy)
putBuf(buf)
return err
}
}
part := part // for the closure
g.Go(func() (err error) {
defer up.f.putBuf(buf, up.doCopy)
defer putBuf(buf)
if !up.doCopy {
err = up.transferChunk(gCtx, part, buf)
} else {

View File

@@ -14,7 +14,7 @@ const (
timeFormat = `"` + time.RFC3339 + `"`
)
// Time represents represents date and time information for the
// Time represents date and time information for the
// box API, by using RFC3339
type Time time.Time

View File

@@ -17,9 +17,9 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"os"
"path"
"strconv"
"strings"
@@ -27,6 +27,7 @@ import (
"sync/atomic"
"time"
"github.com/golang-jwt/jwt/v4"
"github.com/rclone/rclone/backend/box/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
@@ -45,7 +46,6 @@ import (
"github.com/rclone/rclone/lib/rest"
"github.com/youmark/pkcs8"
"golang.org/x/oauth2"
"golang.org/x/oauth2/jws"
)
const (
@@ -76,6 +76,11 @@ var (
}
)
type boxCustomClaims struct {
jwt.StandardClaims
BoxSubType string `json:"box_sub_type,omitempty"`
}
// Register with Fs
func init() {
fs.Register(&fs.RegInfo{
@@ -102,16 +107,18 @@ func init() {
return nil, nil
},
Options: append(oauthutil.SharedOptions, []fs.Option{{
Name: "root_folder_id",
Help: "Fill in for rclone to use a non root folder as its starting point.",
Default: "0",
Advanced: true,
Name: "root_folder_id",
Help: "Fill in for rclone to use a non root folder as its starting point.",
Default: "0",
Advanced: true,
Sensitive: true,
}, {
Name: "box_config_file",
Help: "Box App config.json location\n\nLeave blank normally." + env.ShellExpandHelp,
}, {
Name: "access_token",
Help: "Box App Primary Access Token\n\nLeave blank normally.",
Name: "access_token",
Help: "Box App Primary Access Token\n\nLeave blank normally.",
Sensitive: true,
}, {
Name: "box_sub_type",
Default: "user",
@@ -178,12 +185,12 @@ func refreshJWTToken(ctx context.Context, jsonFile string, boxSubType string, na
signingHeaders := getSigningHeaders(boxConfig)
queryParams := getQueryParams(boxConfig)
client := fshttp.NewClient(ctx)
err = jwtutil.Config("box", name, claims, signingHeaders, queryParams, privateKey, m, client)
err = jwtutil.Config("box", name, tokenURL, *claims, signingHeaders, queryParams, privateKey, m, client)
return err
}
func getBoxConfig(configFile string) (boxConfig *api.ConfigJSON, err error) {
file, err := ioutil.ReadFile(configFile)
file, err := os.ReadFile(configFile)
if err != nil {
return nil, fmt.Errorf("box: failed to read Box config: %w", err)
}
@@ -194,34 +201,31 @@ func getBoxConfig(configFile string) (boxConfig *api.ConfigJSON, err error) {
return boxConfig, nil
}
func getClaims(boxConfig *api.ConfigJSON, boxSubType string) (claims *jws.ClaimSet, err error) {
func getClaims(boxConfig *api.ConfigJSON, boxSubType string) (claims *boxCustomClaims, err error) {
val, err := jwtutil.RandomHex(20)
if err != nil {
return nil, fmt.Errorf("box: failed to generate random string for jti: %w", err)
}
claims = &jws.ClaimSet{
Iss: boxConfig.BoxAppSettings.ClientID,
Sub: boxConfig.EnterpriseID,
Aud: tokenURL,
Exp: time.Now().Add(time.Second * 45).Unix(),
PrivateClaims: map[string]interface{}{
"box_sub_type": boxSubType,
"aud": tokenURL,
"jti": val,
claims = &boxCustomClaims{
//lint:ignore SA1019 since we need to use jwt.StandardClaims even if deprecated in jwt-go v4 until a more permanent solution is ready in time before jwt-go v5 where it is removed entirely
//nolint:staticcheck // Don't include staticcheck when running golangci-lint to avoid SA1019
StandardClaims: jwt.StandardClaims{
Id: val,
Issuer: boxConfig.BoxAppSettings.ClientID,
Subject: boxConfig.EnterpriseID,
Audience: tokenURL,
ExpiresAt: time.Now().Add(time.Second * 45).Unix(),
},
BoxSubType: boxSubType,
}
return claims, nil
}
func getSigningHeaders(boxConfig *api.ConfigJSON) *jws.Header {
signingHeaders := &jws.Header{
Algorithm: "RS256",
Typ: "JWT",
KeyID: boxConfig.BoxAppSettings.AppAuth.PublicKeyID,
func getSigningHeaders(boxConfig *api.ConfigJSON) map[string]interface{} {
signingHeaders := map[string]interface{}{
"kid": boxConfig.BoxAppSettings.AppAuth.PublicKeyID,
}
return signingHeaders
}
@@ -266,7 +270,7 @@ type Fs struct {
root string // the path we are working on
opt Options // parsed options
features *fs.Features // optional features
srv *rest.Client // the connection to the one drive server
srv *rest.Client // the connection to the server
dirCache *dircache.DirCache // Map of directory path to directory id
pacer *fs.Pacer // pacer for API calls
tokenRenewer *oauthutil.Renew // renew the token on expiry

View File

@@ -1,6 +1,7 @@
//go:build !plan9 && !js
// +build !plan9,!js
// Package cache implements a virtual provider to cache existing remotes.
package cache
import (
@@ -75,17 +76,19 @@ func init() {
Name: "plex_url",
Help: "The URL of the Plex server.",
}, {
Name: "plex_username",
Help: "The username of the Plex user.",
Name: "plex_username",
Help: "The username of the Plex user.",
Sensitive: true,
}, {
Name: "plex_password",
Help: "The password of the Plex user.",
IsPassword: true,
}, {
Name: "plex_token",
Help: "The plex token for authentication - auto set normally.",
Hide: fs.OptionHideBoth,
Advanced: true,
Name: "plex_token",
Help: "The plex token for authentication - auto set normally.",
Hide: fs.OptionHideBoth,
Advanced: true,
Sensitive: true,
}, {
Name: "plex_insecure",
Help: "Skip all certificate verification when connecting to the Plex server.",
@@ -1037,7 +1040,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
}
fs.Debugf(dir, "list: remove entry: %v", entryRemote)
}
entries = nil
entries = nil //nolint:ineffassign
// and then iterate over the ones from source (temp Objects will override source ones)
var batchDirectories []*Directory
@@ -1786,7 +1789,7 @@ func (f *Fs) CleanUpCache(ignoreLastTs bool) {
}
}
// StopBackgroundRunners will signall all the runners to stop their work
// StopBackgroundRunners will signal all the runners to stop their work
// can be triggered from a terminate signal or from testing between runs
func (f *Fs) StopBackgroundRunners() {
f.cleanupChan <- false

View File

@@ -11,7 +11,6 @@ import (
goflag "flag"
"fmt"
"io"
"io/ioutil"
"log"
"math/rand"
"os"
@@ -102,14 +101,12 @@ func TestMain(m *testing.M) {
func TestInternalListRootAndInnerRemotes(t *testing.T) {
id := fmt.Sprintf("tilrair%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
// Instantiate inner fs
innerFolder := "inner"
runInstance.mkdir(t, rootFs, innerFolder)
rootFs2, boltDb2 := runInstance.newCacheFs(t, remoteName, id+"/"+innerFolder, true, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs2, boltDb2)
rootFs2, _ := runInstance.newCacheFs(t, remoteName, id+"/"+innerFolder, true, true, nil)
runInstance.writeObjectString(t, rootFs2, "one", "content")
listRoot, err := runInstance.list(t, rootFs, "")
@@ -167,7 +164,7 @@ func TestInternalVfsCache(t *testing.T) {
li2 := [2]string{path.Join("test", "one"), path.Join("test", "second")}
for _, r := range li2 {
var err error
ci, err := ioutil.ReadDir(path.Join(runInstance.chunkPath, runInstance.encryptRemoteIfNeeded(t, path.Join(id, r))))
ci, err := os.ReadDir(path.Join(runInstance.chunkPath, runInstance.encryptRemoteIfNeeded(t, path.Join(id, r))))
if err != nil || len(ci) == 0 {
log.Printf("========== '%v' not in cache", r)
} else {
@@ -226,8 +223,7 @@ func TestInternalVfsCache(t *testing.T) {
func TestInternalObjWrapFsFound(t *testing.T) {
id := fmt.Sprintf("tiowff%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
cfs, err := runInstance.getCacheFs(rootFs)
require.NoError(t, err)
@@ -259,8 +255,7 @@ func TestInternalObjWrapFsFound(t *testing.T) {
func TestInternalObjNotFound(t *testing.T) {
id := fmt.Sprintf("tionf%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil)
obj, err := rootFs.NewObject(context.Background(), "404")
require.Error(t, err)
@@ -270,8 +265,7 @@ func TestInternalObjNotFound(t *testing.T) {
func TestInternalCachedWrittenContentMatches(t *testing.T) {
testy.SkipUnreliable(t)
id := fmt.Sprintf("ticwcm%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil)
cfs, err := runInstance.getCacheFs(rootFs)
require.NoError(t, err)
@@ -298,8 +292,7 @@ func TestInternalDoubleWrittenContentMatches(t *testing.T) {
t.Skip("Skip test on windows/386")
}
id := fmt.Sprintf("tidwcm%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil)
// write the object
runInstance.writeRemoteString(t, rootFs, "one", "one content")
@@ -317,8 +310,7 @@ func TestInternalDoubleWrittenContentMatches(t *testing.T) {
func TestInternalCachedUpdatedContentMatches(t *testing.T) {
testy.SkipUnreliable(t)
id := fmt.Sprintf("ticucm%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil)
var err error
// create some rand test data
@@ -347,8 +339,7 @@ func TestInternalCachedUpdatedContentMatches(t *testing.T) {
func TestInternalWrappedWrittenContentMatches(t *testing.T) {
id := fmt.Sprintf("tiwwcm%v", time.Now().Unix())
vfsflags.Opt.DirCacheTime = time.Second
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
if runInstance.rootIsCrypt {
t.Skip("test skipped with crypt remote")
}
@@ -378,8 +369,7 @@ func TestInternalWrappedWrittenContentMatches(t *testing.T) {
func TestInternalLargeWrittenContentMatches(t *testing.T) {
id := fmt.Sprintf("tilwcm%v", time.Now().Unix())
vfsflags.Opt.DirCacheTime = time.Second
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
if runInstance.rootIsCrypt {
t.Skip("test skipped with crypt remote")
}
@@ -405,8 +395,7 @@ func TestInternalLargeWrittenContentMatches(t *testing.T) {
func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
id := fmt.Sprintf("tiwfcns%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil)
cfs, err := runInstance.getCacheFs(rootFs)
require.NoError(t, err)
@@ -460,8 +449,7 @@ func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
func TestInternalMoveWithNotify(t *testing.T) {
id := fmt.Sprintf("timwn%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil)
if !runInstance.wrappedIsExternal {
t.Skipf("Not external")
}
@@ -547,8 +535,7 @@ func TestInternalMoveWithNotify(t *testing.T) {
func TestInternalNotifyCreatesEmptyParts(t *testing.T) {
id := fmt.Sprintf("tincep%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil)
if !runInstance.wrappedIsExternal {
t.Skipf("Not external")
}
@@ -634,8 +621,7 @@ func TestInternalNotifyCreatesEmptyParts(t *testing.T) {
func TestInternalChangeSeenAfterDirCacheFlush(t *testing.T) {
id := fmt.Sprintf("ticsadcf%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil)
cfs, err := runInstance.getCacheFs(rootFs)
require.NoError(t, err)
@@ -667,8 +653,7 @@ func TestInternalChangeSeenAfterDirCacheFlush(t *testing.T) {
func TestInternalCacheWrites(t *testing.T) {
id := "ticw"
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, map[string]string{"writes": "true"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, map[string]string{"writes": "true"})
cfs, err := runInstance.getCacheFs(rootFs)
require.NoError(t, err)
@@ -689,8 +674,7 @@ func TestInternalMaxChunkSizeRespected(t *testing.T) {
t.Skip("Skip test on windows/386")
}
id := fmt.Sprintf("timcsr%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, map[string]string{"workers": "1"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, map[string]string{"workers": "1"})
cfs, err := runInstance.getCacheFs(rootFs)
require.NoError(t, err)
@@ -725,8 +709,7 @@ func TestInternalMaxChunkSizeRespected(t *testing.T) {
func TestInternalExpiredEntriesRemoved(t *testing.T) {
id := fmt.Sprintf("tieer%v", time.Now().Unix())
vfsflags.Opt.DirCacheTime = time.Second * 4 // needs to be lower than the defined
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, map[string]string{"info_age": "5s"}, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
cfs, err := runInstance.getCacheFs(rootFs)
require.NoError(t, err)
@@ -763,9 +746,7 @@ func TestInternalBug2117(t *testing.T) {
vfsflags.Opt.DirCacheTime = time.Second * 10
id := fmt.Sprintf("tib2117%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil,
map[string]string{"info_age": "72h", "chunk_clean_interval": "15m"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, map[string]string{"info_age": "72h", "chunk_clean_interval": "15m"})
if runInstance.rootIsCrypt {
t.Skipf("skipping crypt")
@@ -841,7 +822,7 @@ func newRun() *run {
}
if uploadDir == "" {
r.tmpUploadDir, err = ioutil.TempDir("", "rclonecache-tmp")
r.tmpUploadDir, err = os.MkdirTemp("", "rclonecache-tmp")
if err != nil {
panic(fmt.Sprintf("Failed to create temp dir: %v", err))
}
@@ -866,7 +847,7 @@ func (r *run) encryptRemoteIfNeeded(t *testing.T, remote string) string {
return enc
}
func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool, cfg map[string]string, flags map[string]string) (fs.Fs, *cache.Persistent) {
func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool, flags map[string]string) (fs.Fs, *cache.Persistent) {
fstest.Initialise()
remoteExists := false
for _, s := range config.FileSections() {
@@ -959,10 +940,15 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
}
err = f.Mkdir(context.Background(), "")
require.NoError(t, err)
t.Cleanup(func() {
runInstance.cleanupFs(t, f)
})
return f, boltDb
}
func (r *run) cleanupFs(t *testing.T, f fs.Fs, b *cache.Persistent) {
func (r *run) cleanupFs(t *testing.T, f fs.Fs) {
err := f.Features().Purge(context.Background(), "")
require.NoError(t, err)
cfs, err := r.getCacheFs(f)
@@ -984,7 +970,7 @@ func (r *run) randomReader(t *testing.T, size int64) io.ReadCloser {
chunk := int64(1024)
cnt := size / chunk
left := size % chunk
f, err := ioutil.TempFile("", "rclonecache-tempfile")
f, err := os.CreateTemp("", "rclonecache-tempfile")
require.NoError(t, err)
for i := 0; i < int(cnt); i++ {
@@ -1112,27 +1098,6 @@ func (r *run) list(t *testing.T, f fs.Fs, remote string) ([]interface{}, error)
return l, err
}
func (r *run) copyFile(t *testing.T, f fs.Fs, src, dst string) error {
in, err := os.Open(src)
if err != nil {
return err
}
defer func() {
_ = in.Close()
}()
out, err := os.Create(dst)
if err != nil {
return err
}
defer func() {
_ = out.Close()
}()
_, err = io.Copy(out, in)
return err
}
func (r *run) dirMove(t *testing.T, rootFs fs.Fs, src, dst string) error {
var err error

View File

@@ -21,10 +21,8 @@ import (
func TestInternalUploadTempDirCreated(t *testing.T) {
id := fmt.Sprintf("tiutdc%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true,
nil,
runInstance.newCacheFs(t, remoteName, id, false, true,
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id)})
defer runInstance.cleanupFs(t, rootFs, boltDb)
_, err := os.Stat(path.Join(runInstance.tmpUploadDir, id))
require.NoError(t, err)
@@ -63,9 +61,7 @@ func testInternalUploadQueueOneFile(t *testing.T, id string, rootFs fs.Fs, boltD
func TestInternalUploadQueueOneFileNoRest(t *testing.T) {
id := fmt.Sprintf("tiuqofnr%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "0s"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
testInternalUploadQueueOneFile(t, id, rootFs, boltDb)
}
@@ -73,19 +69,15 @@ func TestInternalUploadQueueOneFileNoRest(t *testing.T) {
func TestInternalUploadQueueOneFileWithRest(t *testing.T) {
id := fmt.Sprintf("tiuqofwr%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1m"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
testInternalUploadQueueOneFile(t, id, rootFs, boltDb)
}
func TestInternalUploadMoveExistingFile(t *testing.T) {
id := fmt.Sprintf("tiumef%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true,
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "3s"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
err := rootFs.Mkdir(context.Background(), "one")
require.NoError(t, err)
@@ -119,10 +111,8 @@ func TestInternalUploadMoveExistingFile(t *testing.T) {
func TestInternalUploadTempPathCleaned(t *testing.T) {
id := fmt.Sprintf("tiutpc%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true,
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "5s"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
err := rootFs.Mkdir(context.Background(), "one")
require.NoError(t, err)
@@ -162,21 +152,19 @@ func TestInternalUploadTempPathCleaned(t *testing.T) {
func TestInternalUploadQueueMoreFiles(t *testing.T) {
id := fmt.Sprintf("tiuqmf%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true,
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1s"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
err := rootFs.Mkdir(context.Background(), "test")
require.NoError(t, err)
minSize := 5242880
maxSize := 10485760
totalFiles := 10
rand.Seed(time.Now().Unix())
randInstance := rand.New(rand.NewSource(time.Now().Unix()))
lastFile := ""
for i := 0; i < totalFiles; i++ {
size := int64(rand.Intn(maxSize-minSize) + minSize)
size := int64(randInstance.Intn(maxSize-minSize) + minSize)
testReader := runInstance.randomReader(t, size)
remote := "test/" + strconv.Itoa(i) + ".bin"
runInstance.writeRemoteReader(t, rootFs, remote, testReader)
@@ -213,9 +201,7 @@ func TestInternalUploadQueueMoreFiles(t *testing.T) {
func TestInternalUploadTempFileOperations(t *testing.T) {
id := "tiutfo"
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1h"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
boltDb.PurgeTempUploads()
@@ -343,9 +329,7 @@ func TestInternalUploadTempFileOperations(t *testing.T) {
func TestInternalUploadUploadingFileOperations(t *testing.T) {
id := "tiuufo"
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1h"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
boltDb.PurgeTempUploads()

View File

@@ -8,7 +8,7 @@ import (
"crypto/tls"
"encoding/json"
"fmt"
"io/ioutil"
"io"
"net/http"
"net/url"
"strings"
@@ -167,7 +167,7 @@ func (p *plexConnector) listenWebsocket() {
continue
}
var data []byte
data, err = ioutil.ReadAll(resp.Body)
data, err = io.ReadAll(resp.Body)
if err != nil {
continue
}

View File

@@ -9,7 +9,6 @@ import (
"encoding/binary"
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path"
"strconv"
@@ -473,7 +472,7 @@ func (b *Persistent) GetChunk(cachedObject *Object, offset int64) ([]byte, error
var data []byte
fp := path.Join(b.dataPath, cachedObject.abs(), strconv.FormatInt(offset, 10))
data, err := ioutil.ReadFile(fp)
data, err := os.ReadFile(fp)
if err != nil {
return nil, err
}
@@ -486,7 +485,7 @@ func (b *Persistent) AddChunk(fp string, data []byte, offset int64) error {
_ = os.MkdirAll(path.Join(b.dataPath, fp), os.ModePerm)
filePath := path.Join(b.dataPath, fp, strconv.FormatInt(offset, 10))
err := ioutil.WriteFile(filePath, data, os.ModePerm)
err := os.WriteFile(filePath, data, os.ModePerm)
if err != nil {
return err
}

View File

@@ -12,7 +12,6 @@ import (
"fmt"
gohash "hash"
"io"
"io/ioutil"
"math/rand"
"path"
"regexp"
@@ -64,7 +63,7 @@ import (
// length of 13 decimals it makes a 7-digit base-36 number.
//
// When transactions is set to the norename style, data chunks will
// keep their temporary chunk names (with the transacion identifier
// keep their temporary chunk names (with the transaction identifier
// suffix). To distinguish them from temporary chunks, the txn field
// of the metadata file is set to match the transaction identifier of
// the data chunks.
@@ -1038,7 +1037,7 @@ func (o *Object) readMetadata(ctx context.Context) error {
if err != nil {
return err
}
metadata, err := ioutil.ReadAll(reader)
metadata, err := io.ReadAll(reader)
_ = reader.Close() // ensure file handle is freed on windows
if err != nil {
return err
@@ -1079,7 +1078,7 @@ func (o *Object) readMetadata(ctx context.Context) error {
// readXactID returns the transaction ID stored in the passed metadata object
func (o *Object) readXactID(ctx context.Context) (xactID string, err error) {
// if xactID has already been read and cahced return it now
// if xactID has already been read and cached return it now
if o.xIDCached {
return o.xactID, nil
}
@@ -1097,7 +1096,7 @@ func (o *Object) readXactID(ctx context.Context) (xactID string, err error) {
if err != nil {
return "", err
}
data, err := ioutil.ReadAll(reader)
data, err := io.ReadAll(reader)
_ = reader.Close() // ensure file handle is freed on windows
if err != nil {
return "", err

View File

@@ -5,7 +5,7 @@ import (
"context"
"flag"
"fmt"
"io/ioutil"
"io"
"path"
"regexp"
"strings"
@@ -413,7 +413,7 @@ func testSmallFileInternals(t *testing.T, f *Fs) {
if r == nil {
return
}
data, err := ioutil.ReadAll(r)
data, err := io.ReadAll(r)
assert.NoError(t, err)
assert.Equal(t, contents, string(data))
_ = r.Close()
@@ -538,7 +538,7 @@ func testPreventCorruption(t *testing.T, f *Fs) {
assert.NoError(t, err)
var chunkContents []byte
assert.NotPanics(t, func() {
chunkContents, err = ioutil.ReadAll(r)
chunkContents, err = io.ReadAll(r)
_ = r.Close()
})
assert.NoError(t, err)
@@ -573,7 +573,7 @@ func testPreventCorruption(t *testing.T, f *Fs) {
r, err = willyChunk.Open(ctx)
assert.NoError(t, err)
assert.NotPanics(t, func() {
_, err = ioutil.ReadAll(r)
_, err = io.ReadAll(r)
_ = r.Close()
})
assert.NoError(t, err)
@@ -672,7 +672,7 @@ func testMetadataInput(t *testing.T, f *Fs) {
assert.NoError(t, err, "open "+description)
assert.NotNil(t, r, "open stream of "+description)
if err == nil && r != nil {
data, err := ioutil.ReadAll(r)
data, err := io.ReadAll(r)
assert.NoError(t, err, "read all of "+description)
assert.Equal(t, contents, string(data), description+" contents is ok")
_ = r.Close()
@@ -758,8 +758,8 @@ func testFutureProof(t *testing.T, f *Fs) {
assert.Error(t, err)
// Rcat must fail
in := ioutil.NopCloser(bytes.NewBufferString("abc"))
robj, err := operations.Rcat(ctx, f, file, in, modTime)
in := io.NopCloser(bytes.NewBufferString("abc"))
robj, err := operations.Rcat(ctx, f, file, in, modTime, nil)
assert.Nil(t, robj)
assert.NotNil(t, err)
if err != nil {
@@ -854,7 +854,7 @@ func testChunkerServerSideMove(t *testing.T, f *Fs) {
r, err := dstFile.Open(ctx)
assert.NoError(t, err)
assert.NotNil(t, r)
data, err := ioutil.ReadAll(r)
data, err := io.ReadAll(r)
assert.NoError(t, err)
assert.Equal(t, contents, string(data))
_ = r.Close()

View File

@@ -1,4 +1,4 @@
// Package combine implents a backend to combine multipe remotes in a directory tree
// Package combine implements a backend to combine multiple remotes in a directory tree
package combine
/*
@@ -233,6 +233,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs
ReadMetadata: true,
WriteMetadata: true,
UserMetadata: true,
PartialUploads: true,
}).Fill(ctx, f)
canMove := true
for _, u := range f.upstreams {
@@ -289,6 +290,16 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs
}
}
// Enable CleanUp when any upstreams support it
if features.CleanUp == nil {
for _, u := range f.upstreams {
if u.f.Features().CleanUp != nil {
features.CleanUp = f.CleanUp
break
}
}
}
// Enable ChangeNotify when any upstreams support it
if features.ChangeNotify == nil {
for _, u := range f.upstreams {
@@ -299,6 +310,9 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs
}
}
// show that we wrap other backends
features.Overlay = true
f.features = features
// Get common intersection of hashes
@@ -351,7 +365,7 @@ func (f *Fs) multithread(ctx context.Context, fn func(context.Context, *upstream
return g.Wait()
}
// join the elements together but unline path.Join return empty string
// join the elements together but unlike path.Join return empty string
func join(elem ...string) string {
result := path.Join(elem...)
if result == "." {
@@ -631,7 +645,7 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, stream bo
if err != nil {
return nil, err
}
uSrc := operations.NewOverrideRemote(src, uRemote)
uSrc := fs.NewOverrideRemote(src, uRemote)
var o fs.Object
if stream {
o, err = u.f.Features().PutStream(ctx, in, uSrc, options...)
@@ -887,6 +901,100 @@ func (f *Fs) Shutdown(ctx context.Context) error {
})
}
// PublicLink generates a public link to the remote path (usually readable by anyone)
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (string, error) {
u, uRemote, err := f.findUpstream(remote)
if err != nil {
return "", err
}
do := u.f.Features().PublicLink
if do == nil {
return "", fs.ErrorNotImplemented
}
return do(ctx, uRemote, expire, unlink)
}
// Put in to the remote path with the modTime given of the given size
//
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
// nil and the error
//
// May create duplicates or return errors if src already
// exists.
func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
srcPath := src.Remote()
u, uRemote, err := f.findUpstream(srcPath)
if err != nil {
return nil, err
}
do := u.f.Features().PutUnchecked
if do == nil {
return nil, fs.ErrorNotImplemented
}
uSrc := fs.NewOverrideRemote(src, uRemote)
return do(ctx, in, uSrc, options...)
}
// MergeDirs merges the contents of all the directories passed
// in into the first one and rmdirs the other directories.
func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
if len(dirs) == 0 {
return nil
}
var (
u *upstream
uDirs []fs.Directory
)
for _, dir := range dirs {
uNew, uDir, err := f.findUpstream(dir.Remote())
if err != nil {
return err
}
if u == nil {
u = uNew
} else if u != uNew {
return fmt.Errorf("can't merge directories from different upstreams")
}
uDirs = append(uDirs, fs.NewOverrideDirectory(dir, uDir))
}
do := u.f.Features().MergeDirs
if do == nil {
return fs.ErrorNotImplemented
}
return do(ctx, uDirs)
}
// CleanUp the trash in the Fs
//
// Implement this if you have a way of emptying the trash or
// otherwise cleaning up old versions of files.
func (f *Fs) CleanUp(ctx context.Context) error {
return f.multithread(ctx, func(ctx context.Context, u *upstream) error {
if do := u.f.Features().CleanUp; do != nil {
return do(ctx)
}
return nil
})
}
// OpenWriterAt opens with a handle for random access writes
//
// Pass in the remote desired and the size if known.
//
// It truncates any existing object
func (f *Fs) OpenWriterAt(ctx context.Context, remote string, size int64) (fs.WriterAtCloser, error) {
u, uRemote, err := f.findUpstream(remote)
if err != nil {
return nil, err
}
do := u.f.Features().OpenWriterAt
if do == nil {
return nil, fs.ErrorNotImplemented
}
return do(ctx, uRemote, size)
}
// Object describes a wrapped Object
//
// This is a wrapped Object which knows its path prefix
@@ -916,7 +1024,7 @@ func (o *Object) String() string {
func (o *Object) Remote() string {
newPath, err := o.u.pathAdjustment.do(o.Object.String())
if err != nil {
fs.Errorf(o, "Bad object: %v", err)
fs.Errorf(o.Object, "Bad object: %v", err)
return err.Error()
}
return newPath
@@ -988,5 +1096,10 @@ var (
_ fs.Abouter = (*Fs)(nil)
_ fs.ListRer = (*Fs)(nil)
_ fs.Shutdowner = (*Fs)(nil)
_ fs.PublicLinker = (*Fs)(nil)
_ fs.PutUncheckeder = (*Fs)(nil)
_ fs.MergeDirser = (*Fs)(nil)
_ fs.CleanUpper = (*Fs)(nil)
_ fs.OpenWriterAter = (*Fs)(nil)
_ fs.FullObject = (*Object)(nil)
)

View File

@@ -10,6 +10,11 @@ import (
"github.com/rclone/rclone/fstest/fstests"
)
var (
unimplementableFsMethods = []string{"UnWrap", "WrapFs", "SetWrapper", "UserInfo", "Disconnect"}
unimplementableObjectMethods = []string{}
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
if *fstest.RemoteName == "" {
@@ -17,8 +22,8 @@ func TestIntegration(t *testing.T) {
}
fstests.Run(t, &fstests.Opt{
RemoteName: *fstest.RemoteName,
UnimplementableFsMethods: []string{"OpenWriterAt", "DuplicateFiles"},
UnimplementableObjectMethods: []string{"MimeType"},
UnimplementableFsMethods: unimplementableFsMethods,
UnimplementableObjectMethods: unimplementableObjectMethods,
})
}
@@ -35,7 +40,9 @@ func TestLocal(t *testing.T) {
{Name: name, Key: "type", Value: "combine"},
{Name: name, Key: "upstreams", Value: upstreams},
},
QuickTestOK: true,
QuickTestOK: true,
UnimplementableFsMethods: unimplementableFsMethods,
UnimplementableObjectMethods: unimplementableObjectMethods,
})
}
@@ -51,7 +58,9 @@ func TestMemory(t *testing.T) {
{Name: name, Key: "type", Value: "combine"},
{Name: name, Key: "upstreams", Value: upstreams},
},
QuickTestOK: true,
QuickTestOK: true,
UnimplementableFsMethods: unimplementableFsMethods,
UnimplementableObjectMethods: unimplementableObjectMethods,
})
}
@@ -68,6 +77,8 @@ func TestMixed(t *testing.T) {
{Name: name, Key: "type", Value: "combine"},
{Name: name, Key: "upstreams", Value: upstreams},
},
UnimplementableFsMethods: unimplementableFsMethods,
UnimplementableObjectMethods: unimplementableObjectMethods,
})
}

View File

@@ -13,7 +13,6 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"regexp"
"strings"
@@ -29,6 +28,7 @@ import (
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fspath"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/log"
"github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/fs/operations"
)
@@ -90,7 +90,7 @@ Generally -1 (default, equivalent to 5) is recommended.
Levels 1 to 9 increase compression at the cost of speed. Going past 6
generally offers very little return.
Level -2 uses Huffmann encoding only. Only use if you know what you
Level -2 uses Huffman encoding only. Only use if you know what you
are doing.
Level 0 turns off compression.`,
Default: sgzip.DefaultCompression,
@@ -130,7 +130,7 @@ type Fs struct {
features *fs.Features // optional features
}
// NewFs contstructs an Fs from the path, container:path
// NewFs constructs an Fs from the path, container:path
func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
@@ -186,6 +186,7 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
ReadMetadata: true,
WriteMetadata: true,
UserMetadata: true,
PartialUploads: true,
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
// We support reading MIME types no matter the wrapped fs
f.features.ReadMimeType = true
@@ -367,13 +368,16 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
if err != nil {
return nil, err
}
meta := readMetadata(ctx, mo)
if meta == nil {
return nil, errors.New("error decoding metadata")
meta, err := readMetadata(ctx, mo)
if err != nil {
return nil, fmt.Errorf("error decoding metadata: %w", err)
}
// Create our Object
o, err := f.Fs.NewObject(ctx, makeDataName(remote, meta.CompressionMetadata.Size, meta.Mode))
return f.newObject(o, mo, meta), err
if err != nil {
return nil, err
}
return f.newObject(o, mo, meta), nil
}
// checkCompressAndType checks if an object is compressible and determines it's mime type
@@ -451,7 +455,7 @@ func (f *Fs) rcat(ctx context.Context, dstFileName string, in io.ReadCloser, mod
return f.Fs.Put(ctx, bytes.NewBuffer(buf[:n]), src, options...)
}
// Need to include what we allready read
// Need to include what we already read
in = &ReadCloserWrapper{
Reader: io.MultiReader(bytes.NewReader(buf), in),
Closer: in,
@@ -464,7 +468,7 @@ func (f *Fs) rcat(ctx context.Context, dstFileName string, in io.ReadCloser, mod
}
fs.Debugf(f, "Target remote doesn't support streaming uploads, creating temporary local file")
tempFile, err := ioutil.TempFile("", "rclone-press-")
tempFile, err := os.CreateTemp("", "rclone-press-")
defer func() {
// these errors should be relatively uncritical and the upload should've succeeded so it's okay-ish
// to ignore them
@@ -542,8 +546,8 @@ func (f *Fs) putCompress(ctx context.Context, in io.Reader, src fs.ObjectInfo, o
}
// Transfer the data
o, err := f.rcat(ctx, makeDataName(src.Remote(), src.Size(), f.mode), ioutil.NopCloser(wrappedIn), src.ModTime(ctx), options)
//o, err := operations.Rcat(ctx, f.Fs, makeDataName(src.Remote(), src.Size(), f.mode), ioutil.NopCloser(wrappedIn), src.ModTime(ctx))
o, err := f.rcat(ctx, makeDataName(src.Remote(), src.Size(), f.mode), io.NopCloser(wrappedIn), src.ModTime(ctx), options)
//o, err := operations.Rcat(ctx, f.Fs, makeDataName(src.Remote(), src.Size(), f.mode), io.NopCloser(wrappedIn), src.ModTime(ctx))
if err != nil {
if o != nil {
removeErr := o.Remove(ctx)
@@ -677,7 +681,7 @@ func (f *Fs) putWithCustomFunctions(ctx context.Context, in io.Reader, src fs.Ob
}
return nil, err
}
return f.newObject(dataObject, mo, meta), err
return f.newObject(dataObject, mo, meta), nil
}
// Put in to the remote path with the modTime given of the given size
@@ -731,7 +735,7 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
}
// If our new object is compressed we have to rename it with the correct size.
// Uncompressed objects don't store the size in the name so we they'll allready have the correct name.
// Uncompressed objects don't store the size in the name so we they'll already have the correct name.
if compressible {
wrapObj, err := operations.Move(ctx, f.Fs, nil, f.dataName(src.Remote(), newObj.size, compressible), newObj.Object)
if err != nil {
@@ -742,7 +746,7 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
return newObj, nil
}
// Temporarely disabled. There might be a way to implement this correctly but with the current handling metadata duplicate objects
// Temporarily disabled. There might be a way to implement this correctly but with the current handling metadata duplicate objects
// will break stuff. Right no I can't think of a way to make this work.
// PutUnchecked uploads the object
@@ -1040,24 +1044,19 @@ func newMetadata(size int64, mode int, cmeta sgzip.GzipMetadata, md5 string, mim
}
// This function will read the metadata from a metadata object.
func readMetadata(ctx context.Context, mo fs.Object) (meta *ObjectMetadata) {
func readMetadata(ctx context.Context, mo fs.Object) (meta *ObjectMetadata, err error) {
// Open our meradata object
rc, err := mo.Open(ctx)
if err != nil {
return nil
return nil, err
}
defer func() {
err := rc.Close()
if err != nil {
fs.Errorf(mo, "Error closing object: %v", err)
}
}()
defer fs.CheckClose(rc, &err)
jr := json.NewDecoder(rc)
meta = new(ObjectMetadata)
if err = jr.Decode(meta); err != nil {
return nil
return nil, err
}
return meta
return meta, nil
}
// Remove removes this object
@@ -1102,6 +1101,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
origName := o.Remote()
if o.meta.Mode != Uncompressed || compressible {
newObject, err = o.f.putWithCustomFunctions(ctx, in, o.f.wrapInfo(src, origName, src.Size()), options, o.f.Fs.Put, updateMeta, compressible, mimeType)
if err != nil {
return err
}
if newObject.Object.Remote() != o.Object.Remote() {
if removeErr := o.Object.Remove(ctx); removeErr != nil {
return removeErr
@@ -1115,9 +1117,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
}
// If we are, just update the object and metadata
newObject, err = o.f.putWithCustomFunctions(ctx, in, src, options, update, updateMeta, compressible, mimeType)
}
if err != nil {
return err
if err != nil {
return err
}
}
// Update object metadata and return
o.Object = newObject.Object
@@ -1128,6 +1130,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// This will initialize the variables of a new press Object. The metadata object, mo, and metadata struct, meta, must be specified.
func (f *Fs) newObject(o fs.Object, mo fs.Object, meta *ObjectMetadata) *Object {
if o == nil {
log.Trace(nil, "newObject(%#v, %#v, %#v) called with nil o", o, mo, meta)
}
return &Object{
Object: o,
f: f,
@@ -1140,6 +1145,9 @@ func (f *Fs) newObject(o fs.Object, mo fs.Object, meta *ObjectMetadata) *Object
// This initializes the variables of a press Object with only the size. The metadata will be loaded later on demand.
func (f *Fs) newObjectSizeAndNameOnly(o fs.Object, moName string, size int64) *Object {
if o == nil {
log.Trace(nil, "newObjectSizeAndNameOnly(%#v, %#v, %#v) called with nil o", o, moName, size)
}
return &Object{
Object: o,
f: f,
@@ -1167,7 +1175,7 @@ func (o *Object) loadMetadataIfNotLoaded(ctx context.Context) (err error) {
return err
}
if o.meta == nil {
o.meta = readMetadata(ctx, o.mo)
o.meta, err = readMetadata(ctx, o.mo)
}
return err
}

View File

@@ -21,6 +21,7 @@ import (
"github.com/rclone/rclone/backend/crypt/pkcs7"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/lib/readers"
"github.com/rclone/rclone/lib/version"
"github.com/rfjakob/eme"
"golang.org/x/crypto/nacl/secretbox"
@@ -37,7 +38,6 @@ const (
blockHeaderSize = secretbox.Overhead
blockDataSize = 64 * 1024
blockSize = blockHeaderSize + blockDataSize
encryptedSuffix = ".bin" // when file name encryption is off we add this suffix to make sure the cloud provider doesn't process the file
)
// Errors returned by cipher
@@ -53,8 +53,9 @@ var (
ErrorEncryptedBadBlock = errors.New("failed to authenticate decrypted block - bad password?")
ErrorBadBase32Encoding = errors.New("bad base32 filename encoding")
ErrorFileClosed = errors.New("file already closed")
ErrorNotAnEncryptedFile = errors.New("not an encrypted file - no \"" + encryptedSuffix + "\" suffix")
ErrorNotAnEncryptedFile = errors.New("not an encrypted file - does not match suffix")
ErrorBadSeek = errors.New("Seek beyond end of file")
ErrorSuffixMissingDot = errors.New("suffix config setting should include a '.'")
defaultSalt = []byte{0xA8, 0x0D, 0xF4, 0x3A, 0x8F, 0xBD, 0x03, 0x08, 0xA7, 0xCA, 0xB8, 0x3E, 0x58, 0x1F, 0x86, 0xB1}
obfuscQuoteRune = '!'
)
@@ -131,7 +132,7 @@ type fileNameEncoding interface {
// - we strip the padding character `=`
type caseInsensitiveBase32Encoding struct{}
// EncodeToString encodes a strign using the modified version of
// EncodeToString encodes a string using the modified version of
// base32 encoding.
func (caseInsensitiveBase32Encoding) EncodeToString(src []byte) string {
encoded := base32.HexEncoding.EncodeToString(src)
@@ -169,27 +170,30 @@ func NewNameEncoding(s string) (enc fileNameEncoding, err error) {
// Cipher defines an encoding and decoding cipher for the crypt backend
type Cipher struct {
dataKey [32]byte // Key for secretbox
nameKey [32]byte // 16,24 or 32 bytes
nameTweak [nameCipherBlockSize]byte // used to tweak the name crypto
block gocipher.Block
mode NameEncryptionMode
fileNameEnc fileNameEncoding
buffers sync.Pool // encrypt/decrypt buffers
cryptoRand io.Reader // read crypto random numbers from here
dirNameEncrypt bool
dataKey [32]byte // Key for secretbox
nameKey [32]byte // 16,24 or 32 bytes
nameTweak [nameCipherBlockSize]byte // used to tweak the name crypto
block gocipher.Block
mode NameEncryptionMode
fileNameEnc fileNameEncoding
buffers sync.Pool // encrypt/decrypt buffers
cryptoRand io.Reader // read crypto random numbers from here
dirNameEncrypt bool
passBadBlocks bool // if set passed bad blocks as zeroed blocks
encryptedSuffix string
}
// newCipher initialises the cipher. If salt is "" then it uses a built in salt val
func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bool, enc fileNameEncoding) (*Cipher, error) {
c := &Cipher{
mode: mode,
fileNameEnc: enc,
cryptoRand: rand.Reader,
dirNameEncrypt: dirNameEncrypt,
mode: mode,
fileNameEnc: enc,
cryptoRand: rand.Reader,
dirNameEncrypt: dirNameEncrypt,
encryptedSuffix: ".bin",
}
c.buffers.New = func() interface{} {
return make([]byte, blockSize)
return new([blockSize]byte)
}
err := c.Key(password, salt)
if err != nil {
@@ -198,11 +202,29 @@ func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bo
return c, nil
}
// setEncryptedSuffix set suffix, or an empty string
func (c *Cipher) setEncryptedSuffix(suffix string) {
if strings.EqualFold(suffix, "none") {
c.encryptedSuffix = ""
return
}
if !strings.HasPrefix(suffix, ".") {
fs.Errorf(nil, "crypt: bad suffix: %v", ErrorSuffixMissingDot)
suffix = "." + suffix
}
c.encryptedSuffix = suffix
}
// Call to set bad block pass through
func (c *Cipher) setPassBadBlocks(passBadBlocks bool) {
c.passBadBlocks = passBadBlocks
}
// Key creates all the internal keys from the password passed in using
// scrypt.
//
// If salt is "" we use a fixed salt just to make attackers lives
// slighty harder than using no salt.
// slightly harder than using no salt.
//
// Note that empty password makes all 0x00 keys which is used in the
// tests.
@@ -230,15 +252,12 @@ func (c *Cipher) Key(password, salt string) (err error) {
}
// getBlock gets a block from the pool of size blockSize
func (c *Cipher) getBlock() []byte {
return c.buffers.Get().([]byte)
func (c *Cipher) getBlock() *[blockSize]byte {
return c.buffers.Get().(*[blockSize]byte)
}
// putBlock returns a block to the pool of size blockSize
func (c *Cipher) putBlock(buf []byte) {
if len(buf) != blockSize {
panic("bad blocksize returned to pool")
}
func (c *Cipher) putBlock(buf *[blockSize]byte) {
c.buffers.Put(buf)
}
@@ -508,7 +527,7 @@ func (c *Cipher) encryptFileName(in string) string {
// EncryptFileName encrypts a file path
func (c *Cipher) EncryptFileName(in string) string {
if c.mode == NameEncryptionOff {
return in + encryptedSuffix
return in + c.encryptedSuffix
}
return c.encryptFileName(in)
}
@@ -568,8 +587,8 @@ func (c *Cipher) decryptFileName(in string) (string, error) {
// DecryptFileName decrypts a file path
func (c *Cipher) DecryptFileName(in string) (string, error) {
if c.mode == NameEncryptionOff {
remainingLength := len(in) - len(encryptedSuffix)
if remainingLength == 0 || !strings.HasSuffix(in, encryptedSuffix) {
remainingLength := len(in) - len(c.encryptedSuffix)
if remainingLength == 0 || !strings.HasSuffix(in, c.encryptedSuffix) {
return "", ErrorNotAnEncryptedFile
}
decrypted := in[:remainingLength]
@@ -609,7 +628,7 @@ func (n *nonce) pointer() *[fileNonceSize]byte {
// fromReader fills the nonce from an io.Reader - normally the OSes
// crypto random number generator
func (n *nonce) fromReader(in io.Reader) error {
read, err := io.ReadFull(in, (*n)[:])
read, err := readers.ReadFill(in, (*n)[:])
if read != fileNonceSize {
return fmt.Errorf("short read of nonce: %w", err)
}
@@ -664,8 +683,8 @@ type encrypter struct {
in io.Reader
c *Cipher
nonce nonce
buf []byte
readBuf []byte
buf *[blockSize]byte
readBuf *[blockSize]byte
bufIndex int
bufSize int
err error
@@ -690,9 +709,9 @@ func (c *Cipher) newEncrypter(in io.Reader, nonce *nonce) (*encrypter, error) {
}
}
// Copy magic into buffer
copy(fh.buf, fileMagicBytes)
copy((*fh.buf)[:], fileMagicBytes)
// Copy nonce into buffer
copy(fh.buf[fileMagicSize:], fh.nonce[:])
copy((*fh.buf)[fileMagicSize:], fh.nonce[:])
return fh, nil
}
@@ -707,22 +726,20 @@ func (fh *encrypter) Read(p []byte) (n int, err error) {
if fh.bufIndex >= fh.bufSize {
// Read data
// FIXME should overlap the reads with a go-routine and 2 buffers?
readBuf := fh.readBuf[:blockDataSize]
n, err = io.ReadFull(fh.in, readBuf)
readBuf := (*fh.readBuf)[:blockDataSize]
n, err = readers.ReadFill(fh.in, readBuf)
if n == 0 {
// err can't be nil since:
// n == len(buf) if and only if err == nil.
return fh.finish(err)
}
// possibly err != nil here, but we will process the
// data and the next call to ReadFull will return 0, err
// data and the next call to ReadFill will return 0, err
// Encrypt the block using the nonce
secretbox.Seal(fh.buf[:0], readBuf[:n], fh.nonce.pointer(), &fh.c.dataKey)
secretbox.Seal((*fh.buf)[:0], readBuf[:n], fh.nonce.pointer(), &fh.c.dataKey)
fh.bufIndex = 0
fh.bufSize = blockHeaderSize + n
fh.nonce.increment()
}
n = copy(p, fh.buf[fh.bufIndex:fh.bufSize])
n = copy(p, (*fh.buf)[fh.bufIndex:fh.bufSize])
fh.bufIndex += n
return n, nil
}
@@ -763,8 +780,8 @@ type decrypter struct {
nonce nonce
initialNonce nonce
c *Cipher
buf []byte
readBuf []byte
buf *[blockSize]byte
readBuf *[blockSize]byte
bufIndex int
bufSize int
err error
@@ -782,12 +799,12 @@ func (c *Cipher) newDecrypter(rc io.ReadCloser) (*decrypter, error) {
limit: -1,
}
// Read file header (magic + nonce)
readBuf := fh.readBuf[:fileHeaderSize]
_, err := io.ReadFull(fh.rc, readBuf)
if err == io.EOF || err == io.ErrUnexpectedEOF {
readBuf := (*fh.readBuf)[:fileHeaderSize]
n, err := readers.ReadFill(fh.rc, readBuf)
if n < fileHeaderSize && err == io.EOF {
// This read from 0..fileHeaderSize-1 bytes
return nil, fh.finishAndClose(ErrorEncryptedFileTooShort)
} else if err != nil {
} else if err != io.EOF && err != nil {
return nil, fh.finishAndClose(err)
}
// check the magic
@@ -845,10 +862,8 @@ func (c *Cipher) newDecrypterSeek(ctx context.Context, open OpenRangeSeek, offse
func (fh *decrypter) fillBuffer() (err error) {
// FIXME should overlap the reads with a go-routine and 2 buffers?
readBuf := fh.readBuf
n, err := io.ReadFull(fh.rc, readBuf)
n, err := readers.ReadFill(fh.rc, (*readBuf)[:])
if n == 0 {
// err can't be nil since:
// n == len(buf) if and only if err == nil.
return err
}
// possibly err != nil here, but we will process the data and
@@ -856,18 +871,25 @@ func (fh *decrypter) fillBuffer() (err error) {
// Check header + 1 byte exists
if n <= blockHeaderSize {
if err != nil {
if err != nil && err != io.EOF {
return err // return pending error as it is likely more accurate
}
return ErrorEncryptedFileBadHeader
}
// Decrypt the block using the nonce
_, ok := secretbox.Open(fh.buf[:0], readBuf[:n], fh.nonce.pointer(), &fh.c.dataKey)
_, ok := secretbox.Open((*fh.buf)[:0], (*readBuf)[:n], fh.nonce.pointer(), &fh.c.dataKey)
if !ok {
if err != nil {
if err != nil && err != io.EOF {
return err // return pending error as it is likely more accurate
}
return ErrorEncryptedBadBlock
if !fh.c.passBadBlocks {
return ErrorEncryptedBadBlock
}
fs.Errorf(nil, "crypt: ignoring: %v", ErrorEncryptedBadBlock)
// Zero out the bad block and continue
for i := range (*fh.buf)[:n] {
(*fh.buf)[i] = 0
}
}
fh.bufIndex = 0
fh.bufSize = n - blockHeaderSize
@@ -893,7 +915,7 @@ func (fh *decrypter) Read(p []byte) (n int, err error) {
if fh.limit >= 0 && fh.limit < int64(toCopy) {
toCopy = int(fh.limit)
}
n = copy(p, fh.buf[fh.bufIndex:fh.bufIndex+toCopy])
n = copy(p, (*fh.buf)[fh.bufIndex:fh.bufIndex+toCopy])
fh.bufIndex += n
if fh.limit >= 0 {
fh.limit -= int64(n)
@@ -904,9 +926,8 @@ func (fh *decrypter) Read(p []byte) (n int, err error) {
return n, nil
}
// calculateUnderlying converts an (offset, limit) in a crypted file
// into an (underlyingOffset, underlyingLimit) for the underlying
// file.
// calculateUnderlying converts an (offset, limit) in an encrypted file
// into an (underlyingOffset, underlyingLimit) for the underlying file.
//
// It also returns number of bytes to discard after reading the first
// block and number of blocks this is from the start so the nonce can

View File

@@ -8,7 +8,6 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"strings"
"testing"
@@ -28,14 +27,14 @@ func TestNewNameEncryptionMode(t *testing.T) {
{"off", NameEncryptionOff, ""},
{"standard", NameEncryptionStandard, ""},
{"obfuscate", NameEncryptionObfuscated, ""},
{"potato", NameEncryptionOff, "Unknown file name encryption mode \"potato\""},
{"potato", NameEncryptionOff, "unknown file name encryption mode \"potato\""},
} {
actual, actualErr := NewNameEncryptionMode(test.in)
assert.Equal(t, actual, test.expected)
if test.expectedErr == "" {
assert.NoError(t, actualErr)
} else {
assert.Error(t, actualErr, test.expectedErr)
assert.EqualError(t, actualErr, test.expectedErr)
}
}
}
@@ -406,6 +405,13 @@ func TestNonStandardEncryptFileName(t *testing.T) {
// Off mode
c, _ := newCipher(NameEncryptionOff, "", "", true, nil)
assert.Equal(t, "1/12/123.bin", c.EncryptFileName("1/12/123"))
// Off mode with custom suffix
c, _ = newCipher(NameEncryptionOff, "", "", true, nil)
c.setEncryptedSuffix(".jpg")
assert.Equal(t, "1/12/123.jpg", c.EncryptFileName("1/12/123"))
// Off mode with empty suffix
c.setEncryptedSuffix("none")
assert.Equal(t, "1/12/123", c.EncryptFileName("1/12/123"))
// Obfuscation mode
c, _ = newCipher(NameEncryptionObfuscated, "", "", true, nil)
assert.Equal(t, "49.6/99.23/150.890/53.!!lipps", c.EncryptFileName("1/12/123/!hello"))
@@ -484,21 +490,27 @@ func TestNonStandardDecryptFileName(t *testing.T) {
in string
expected string
expectedErr error
customSuffix string
}{
{NameEncryptionOff, true, "1/12/123.bin", "1/12/123", nil},
{NameEncryptionOff, true, "1/12/123.bix", "", ErrorNotAnEncryptedFile},
{NameEncryptionOff, true, ".bin", "", ErrorNotAnEncryptedFile},
{NameEncryptionOff, true, "1/12/123-v2001-02-03-040506-123.bin", "1/12/123-v2001-02-03-040506-123", nil},
{NameEncryptionOff, true, "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.bin", "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123", nil},
{NameEncryptionOff, true, "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.txt.bin", "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.txt", nil},
{NameEncryptionObfuscated, true, "!.hello", "hello", nil},
{NameEncryptionObfuscated, true, "hello", "", ErrorNotAnEncryptedFile},
{NameEncryptionObfuscated, true, "161.\u00e4", "\u00a1", nil},
{NameEncryptionObfuscated, true, "160.\u03c2", "\u03a0", nil},
{NameEncryptionObfuscated, false, "1/12/123/53.!!lipps", "1/12/123/!hello", nil},
{NameEncryptionObfuscated, false, "1/12/123/53-v2001-02-03-040506-123.!!lipps", "1/12/123/!hello-v2001-02-03-040506-123", nil},
{NameEncryptionOff, true, "1/12/123.bin", "1/12/123", nil, ""},
{NameEncryptionOff, true, "1/12/123.bix", "", ErrorNotAnEncryptedFile, ""},
{NameEncryptionOff, true, ".bin", "", ErrorNotAnEncryptedFile, ""},
{NameEncryptionOff, true, "1/12/123-v2001-02-03-040506-123.bin", "1/12/123-v2001-02-03-040506-123", nil, ""},
{NameEncryptionOff, true, "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.bin", "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123", nil, ""},
{NameEncryptionOff, true, "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.txt.bin", "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.txt", nil, ""},
{NameEncryptionOff, true, "1/12/123.jpg", "1/12/123", nil, ".jpg"},
{NameEncryptionOff, true, "1/12/123", "1/12/123", nil, "none"},
{NameEncryptionObfuscated, true, "!.hello", "hello", nil, ""},
{NameEncryptionObfuscated, true, "hello", "", ErrorNotAnEncryptedFile, ""},
{NameEncryptionObfuscated, true, "161.\u00e4", "\u00a1", nil, ""},
{NameEncryptionObfuscated, true, "160.\u03c2", "\u03a0", nil, ""},
{NameEncryptionObfuscated, false, "1/12/123/53.!!lipps", "1/12/123/!hello", nil, ""},
{NameEncryptionObfuscated, false, "1/12/123/53-v2001-02-03-040506-123.!!lipps", "1/12/123/!hello-v2001-02-03-040506-123", nil, ""},
} {
c, _ := newCipher(test.mode, "", "", test.dirNameEncrypt, enc)
if test.customSuffix != "" {
c.setEncryptedSuffix(test.customSuffix)
}
actual, actualErr := c.DecryptFileName(test.in)
what := fmt.Sprintf("Testing %q (mode=%v)", test.in, test.mode)
assert.Equal(t, test.expected, actual, what)
@@ -727,7 +739,7 @@ func TestNonceFromReader(t *testing.T) {
assert.Equal(t, nonce{'1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o'}, x)
buf = bytes.NewBufferString("123456789abcdefghijklmn")
err = x.fromReader(buf)
assert.Error(t, err, "short read of nonce")
assert.EqualError(t, err, "short read of nonce: EOF")
}
func TestNonceFromBuf(t *testing.T) {
@@ -1051,7 +1063,7 @@ func TestRandomSource(t *testing.T) {
_, _ = source.Read(buf)
sink = newRandomSource(1e8)
_, err = io.Copy(sink, source)
assert.Error(t, err, "Error in stream")
assert.EqualError(t, err, "Error in stream at 1")
}
type zeroes struct{}
@@ -1073,7 +1085,7 @@ func testEncryptDecrypt(t *testing.T, bufSize int, copySize int64) {
source := newRandomSource(copySize)
encrypted, err := c.newEncrypter(source, nil)
assert.NoError(t, err)
decrypted, err := c.newDecrypter(ioutil.NopCloser(encrypted))
decrypted, err := c.newDecrypter(io.NopCloser(encrypted))
assert.NoError(t, err)
sink := newRandomSource(copySize)
n, err := io.CopyBuffer(sink, decrypted, buf)
@@ -1144,15 +1156,15 @@ func TestEncryptData(t *testing.T) {
buf := bytes.NewBuffer(test.in)
encrypted, err := c.EncryptData(buf)
assert.NoError(t, err)
out, err := ioutil.ReadAll(encrypted)
out, err := io.ReadAll(encrypted)
assert.NoError(t, err)
assert.Equal(t, test.expected, out)
// Check we can decode the data properly too...
buf = bytes.NewBuffer(out)
decrypted, err := c.DecryptData(ioutil.NopCloser(buf))
decrypted, err := c.DecryptData(io.NopCloser(buf))
assert.NoError(t, err)
out, err = ioutil.ReadAll(decrypted)
out, err = io.ReadAll(decrypted)
assert.NoError(t, err)
assert.Equal(t, test.in, out)
}
@@ -1168,13 +1180,13 @@ func TestNewEncrypter(t *testing.T) {
fh, err := c.newEncrypter(z, nil)
assert.NoError(t, err)
assert.Equal(t, nonce{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18}, fh.nonce)
assert.Equal(t, []byte{'R', 'C', 'L', 'O', 'N', 'E', 0x00, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18}, fh.buf[:32])
assert.Equal(t, []byte{'R', 'C', 'L', 'O', 'N', 'E', 0x00, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18}, (*fh.buf)[:32])
// Test error path
c.cryptoRand = bytes.NewBufferString("123456789abcdefghijklmn")
fh, err = c.newEncrypter(z, nil)
assert.Nil(t, fh)
assert.Error(t, err, "short read of nonce")
assert.EqualError(t, err, "short read of nonce: EOF")
}
// Test the stream returning 0, io.ErrUnexpectedEOF - this used to
@@ -1187,7 +1199,7 @@ func TestNewEncrypterErrUnexpectedEOF(t *testing.T) {
fh, err := c.newEncrypter(in, nil)
assert.NoError(t, err)
n, err := io.CopyN(ioutil.Discard, fh, 1e6)
n, err := io.CopyN(io.Discard, fh, 1e6)
assert.Equal(t, io.ErrUnexpectedEOF, err)
assert.Equal(t, int64(32), n)
}
@@ -1225,7 +1237,7 @@ func TestNewDecrypter(t *testing.T) {
cd := newCloseDetector(bytes.NewBuffer(file0[:i]))
fh, err = c.newDecrypter(cd)
assert.Nil(t, fh)
assert.Error(t, err, ErrorEncryptedFileTooShort.Error())
assert.EqualError(t, err, ErrorEncryptedFileTooShort.Error())
assert.Equal(t, 1, cd.closed)
}
@@ -1233,7 +1245,7 @@ func TestNewDecrypter(t *testing.T) {
cd = newCloseDetector(er)
fh, err = c.newDecrypter(cd)
assert.Nil(t, fh)
assert.Error(t, err, "potato")
assert.EqualError(t, err, "potato")
assert.Equal(t, 1, cd.closed)
// bad magic
@@ -1244,7 +1256,7 @@ func TestNewDecrypter(t *testing.T) {
cd := newCloseDetector(bytes.NewBuffer(file0copy))
fh, err := c.newDecrypter(cd)
assert.Nil(t, fh)
assert.Error(t, err, ErrorEncryptedBadMagic.Error())
assert.EqualError(t, err, ErrorEncryptedBadMagic.Error())
file0copy[i] ^= 0x1
assert.Equal(t, 1, cd.closed)
}
@@ -1257,12 +1269,12 @@ func TestNewDecrypterErrUnexpectedEOF(t *testing.T) {
in2 := &readers.ErrorReader{Err: io.ErrUnexpectedEOF}
in1 := bytes.NewBuffer(file16)
in := ioutil.NopCloser(io.MultiReader(in1, in2))
in := io.NopCloser(io.MultiReader(in1, in2))
fh, err := c.newDecrypter(in)
assert.NoError(t, err)
n, err := io.CopyN(ioutil.Discard, fh, 1e6)
n, err := io.CopyN(io.Discard, fh, 1e6)
assert.Equal(t, io.ErrUnexpectedEOF, err)
assert.Equal(t, int64(16), n)
}
@@ -1274,14 +1286,14 @@ func TestNewDecrypterSeekLimit(t *testing.T) {
// Make random data
const dataSize = 150000
plaintext, err := ioutil.ReadAll(newRandomSource(dataSize))
plaintext, err := io.ReadAll(newRandomSource(dataSize))
assert.NoError(t, err)
// Encrypt the data
buf := bytes.NewBuffer(plaintext)
encrypted, err := c.EncryptData(buf)
assert.NoError(t, err)
ciphertext, err := ioutil.ReadAll(encrypted)
ciphertext, err := io.ReadAll(encrypted)
assert.NoError(t, err)
trials := []int{0, 1, 2, 3, 4, 5, 7, 8, 9, 15, 16, 17, 31, 32, 33, 63, 64, 65,
@@ -1300,7 +1312,7 @@ func TestNewDecrypterSeekLimit(t *testing.T) {
end = len(ciphertext)
}
}
reader = ioutil.NopCloser(bytes.NewBuffer(ciphertext[int(underlyingOffset):end]))
reader = io.NopCloser(bytes.NewBuffer(ciphertext[int(underlyingOffset):end]))
return reader, nil
}
@@ -1490,14 +1502,16 @@ func TestDecrypterRead(t *testing.T) {
assert.NoError(t, err, what)
continue
}
_, err = ioutil.ReadAll(fh)
_, err = io.ReadAll(fh)
var expectedErr error
switch {
case i == fileHeaderSize:
// This would normally produce an error *except* on the first block
expectedErr = nil
case i <= fileHeaderSize+blockHeaderSize:
expectedErr = ErrorEncryptedFileBadHeader
default:
expectedErr = io.ErrUnexpectedEOF
expectedErr = ErrorEncryptedBadBlock
}
if expectedErr != nil {
assert.EqualError(t, err, expectedErr.Error(), what)
@@ -1514,8 +1528,8 @@ func TestDecrypterRead(t *testing.T) {
cd := newCloseDetector(in)
fh, err := c.newDecrypter(cd)
assert.NoError(t, err)
_, err = ioutil.ReadAll(fh)
assert.Error(t, err, "potato")
_, err = io.ReadAll(fh)
assert.EqualError(t, err, "potato")
assert.Equal(t, 0, cd.closed)
// Test corrupting the input
@@ -1524,17 +1538,28 @@ func TestDecrypterRead(t *testing.T) {
copy(file16copy, file16)
for i := range file16copy {
file16copy[i] ^= 0xFF
fh, err := c.newDecrypter(ioutil.NopCloser(bytes.NewBuffer(file16copy)))
fh, err := c.newDecrypter(io.NopCloser(bytes.NewBuffer(file16copy)))
if i < fileMagicSize {
assert.Error(t, err, ErrorEncryptedBadMagic.Error())
assert.EqualError(t, err, ErrorEncryptedBadMagic.Error())
assert.Nil(t, fh)
} else {
assert.NoError(t, err)
_, err = ioutil.ReadAll(fh)
assert.Error(t, err, ErrorEncryptedFileBadHeader.Error())
_, err = io.ReadAll(fh)
assert.EqualError(t, err, ErrorEncryptedBadBlock.Error())
}
file16copy[i] ^= 0xFF
}
// Test that we can corrupt a byte and read zeroes if
// passBadBlocks is set
copy(file16copy, file16)
file16copy[len(file16copy)-1] ^= 0xFF
c.passBadBlocks = true
fh, err = c.newDecrypter(io.NopCloser(bytes.NewBuffer(file16copy)))
assert.NoError(t, err)
buf, err := io.ReadAll(fh)
assert.NoError(t, err)
assert.Equal(t, make([]byte, 16), buf)
}
func TestDecrypterClose(t *testing.T) {
@@ -1555,7 +1580,7 @@ func TestDecrypterClose(t *testing.T) {
// double close
err = fh.Close()
assert.Error(t, err, ErrorFileClosed.Error())
assert.EqualError(t, err, ErrorFileClosed.Error())
assert.Equal(t, 1, cd.closed)
// try again reading the file this time
@@ -1565,7 +1590,7 @@ func TestDecrypterClose(t *testing.T) {
assert.Equal(t, 0, cd.closed)
// close after reading
out, err := ioutil.ReadAll(fh)
out, err := io.ReadAll(fh)
assert.NoError(t, err)
assert.Equal(t, []byte{1}, out)
assert.Equal(t, io.EOF, fh.err)
@@ -1582,8 +1607,6 @@ func TestPutGetBlock(t *testing.T) {
block := c.getBlock()
c.putBlock(block)
c.putBlock(block)
assert.Panics(t, func() { c.putBlock(block[:len(block)-1]) })
}
func TestKey(t *testing.T) {

View File

@@ -48,7 +48,7 @@ func init() {
Help: "Very simple filename obfuscation.",
}, {
Value: "off",
Help: "Don't encrypt the file names.\nAdds a \".bin\" extension only.",
Help: "Don't encrypt the file names.\nAdds a \".bin\", or \"suffix\" extension only.",
},
},
}, {
@@ -79,7 +79,9 @@ NB If filename_encryption is "off" then this option will do nothing.`,
}, {
Name: "server_side_across_configs",
Default: false,
Help: `Allow server-side operations (e.g. copy) to work across different crypt configs.
Help: `Deprecated: use --server-side-across-configs instead.
Allow server-side operations (e.g. copy) to work across different crypt configs.
Normally this option is not what you want, but if you have two crypts
pointing to the same backend you can use it.
@@ -119,13 +121,22 @@ names, or for debugging purposes.`,
Help: "Encrypt file data.",
},
},
}, {
Name: "pass_bad_blocks",
Help: `If set this will pass bad blocks through as all 0.
This should not be set in normal operation, it should only be set if
trying to recover an encrypted file with errors and it is desired to
recover as much of the file as possible.`,
Default: false,
Advanced: true,
}, {
Name: "filename_encoding",
Help: `How to encode the encrypted filename to text string.
This option could help with shortening the encrypted filename. The
suitable option would depend on the way your remote count the filename
length and if it's case sensitve.`,
length and if it's case sensitive.`,
Default: "base32",
Examples: []fs.OptionExample{
{
@@ -138,10 +149,18 @@ length and if it's case sensitve.`,
},
{
Value: "base32768",
Help: "Encode using base32768. Suitable if your remote counts UTF-16 or\nUnicode codepoint instead of UTF-8 byte length. (Eg. Onedrive)",
Help: "Encode using base32768. Suitable if your remote counts UTF-16 or\nUnicode codepoint instead of UTF-8 byte length. (Eg. Onedrive, Dropbox)",
},
},
Advanced: true,
}, {
Name: "suffix",
Help: `If this is set it will override the default suffix of ".bin".
Setting suffix to "none" will result in an empty suffix. This may be useful
when the path length is critical.`,
Default: ".bin",
Advanced: true,
}},
})
}
@@ -174,6 +193,8 @@ func newCipherForConfig(opt *Options) (*Cipher, error) {
if err != nil {
return nil, fmt.Errorf("failed to make cipher: %w", err)
}
cipher.setEncryptedSuffix(opt.Suffix)
cipher.setPassBadBlocks(opt.PassBadBlocks)
return cipher, nil
}
@@ -235,7 +256,7 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
// the features here are ones we could support, and they are
// ANDed with the ones from wrappedFs
f.features = (&fs.Features{
CaseInsensitive: cipher.NameEncryptionMode() == NameEncryptionOff,
CaseInsensitive: !cipher.dirNameEncrypt || cipher.NameEncryptionMode() == NameEncryptionOff,
DuplicateFiles: true,
ReadMimeType: false, // MimeTypes not supported with crypt
WriteMimeType: false,
@@ -247,6 +268,7 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
ReadMetadata: true,
WriteMetadata: true,
UserMetadata: true,
PartialUploads: true,
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
return f, err
@@ -262,7 +284,9 @@ type Options struct {
Password2 string `config:"password2"`
ServerSideAcrossConfigs bool `config:"server_side_across_configs"`
ShowMapping bool `config:"show_mapping"`
PassBadBlocks bool `config:"pass_bad_blocks"`
FilenameEncoding string `config:"filename_encoding"`
Suffix string `config:"suffix"`
}
// Fs represents a wrapped fs.Fs
@@ -396,6 +420,8 @@ type putFn func(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ..
// put implements Put or PutStream
func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn) (fs.Object, error) {
ci := fs.GetConfig(ctx)
if f.opt.NoDataEncryption {
o, err := put(ctx, in, f.newObjectInfo(src, nonce{}), options...)
if err == nil && o != nil {
@@ -413,6 +439,9 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
// Find a hash the destination supports to compute a hash of
// the encrypted data
ht := f.Fs.Hashes().GetOne()
if ci.IgnoreChecksum {
ht = hash.None
}
var hasher *hash.MultiHasher
if ht != hash.None {
hasher, err = hash.NewMultiHasherTypes(hash.NewHashSet(ht))
@@ -449,7 +478,7 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
if err != nil {
fs.Errorf(o, "Failed to remove corrupted object: %v", err)
}
return nil, fmt.Errorf("corrupted on transfer: %v crypted hash differ src %q vs dst %q", ht, srcHash, dstHash)
return nil, fmt.Errorf("corrupted on transfer: %v encrypted hash differ src %q vs dst %q", ht, srcHash, dstHash)
}
fs.Debugf(src, "%v = %s OK", ht, srcHash)
}
@@ -1047,10 +1076,11 @@ func (o *ObjectInfo) Hash(ctx context.Context, hash hash.Type) (string, error) {
// Get the underlying object if there is one
if srcObj, ok = o.ObjectInfo.(fs.Object); ok {
// Prefer direct interface assertion
} else if do, ok := o.ObjectInfo.(fs.ObjectUnWrapper); ok {
// Otherwise likely is an operations.OverrideRemote
} else if do, ok := o.ObjectInfo.(*fs.OverrideRemote); ok {
// Unwrap if it is an operations.OverrideRemote
srcObj = do.UnWrap()
} else {
// Otherwise don't unwrap any further
return "", nil
}
// if this is wrapping a local object then we work out the hash

View File

@@ -17,41 +17,28 @@ import (
"github.com/stretchr/testify/require"
)
type testWrapper struct {
fs.ObjectInfo
}
// UnWrap returns the Object that this Object is wrapping or nil if it
// isn't wrapping anything
func (o testWrapper) UnWrap() fs.Object {
if o, ok := o.ObjectInfo.(fs.Object); ok {
return o
}
return nil
}
// Create a temporary local fs to upload things from
func makeTempLocalFs(t *testing.T) (localFs fs.Fs, cleanup func()) {
func makeTempLocalFs(t *testing.T) (localFs fs.Fs) {
localFs, err := fs.TemporaryLocalFs(context.Background())
require.NoError(t, err)
cleanup = func() {
t.Cleanup(func() {
require.NoError(t, localFs.Rmdir(context.Background(), ""))
}
return localFs, cleanup
})
return localFs
}
// Upload a file to a remote
func uploadFile(t *testing.T, f fs.Fs, remote, contents string) (obj fs.Object, cleanup func()) {
func uploadFile(t *testing.T, f fs.Fs, remote, contents string) (obj fs.Object) {
inBuf := bytes.NewBufferString(contents)
t1 := time.Date(2012, time.December, 17, 18, 32, 31, 0, time.UTC)
upSrc := object.NewStaticObjectInfo(remote, t1, int64(len(contents)), true, nil, nil)
obj, err := f.Put(context.Background(), inBuf, upSrc)
require.NoError(t, err)
cleanup = func() {
t.Cleanup(func() {
require.NoError(t, obj.Remove(context.Background()))
}
return obj, cleanup
})
return obj
}
// Test the ObjectInfo
@@ -65,11 +52,9 @@ func testObjectInfo(t *testing.T, f *Fs, wrap bool) {
path = "_wrap"
}
localFs, cleanupLocalFs := makeTempLocalFs(t)
defer cleanupLocalFs()
localFs := makeTempLocalFs(t)
obj, cleanupObj := uploadFile(t, localFs, path, contents)
defer cleanupObj()
obj := uploadFile(t, localFs, path, contents)
// encrypt the data
inBuf := bytes.NewBufferString(contents)
@@ -83,7 +68,7 @@ func testObjectInfo(t *testing.T, f *Fs, wrap bool) {
var oi fs.ObjectInfo = obj
if wrap {
// wrap the object in an fs.ObjectUnwrapper if required
oi = testWrapper{oi}
oi = fs.NewOverrideRemote(oi, "new_remote")
}
// wrap the object in a crypt for upload using the nonce we
@@ -116,16 +101,13 @@ func testComputeHash(t *testing.T, f *Fs) {
t.Skipf("%v: does not support hashes", f.Fs)
}
localFs, cleanupLocalFs := makeTempLocalFs(t)
defer cleanupLocalFs()
localFs := makeTempLocalFs(t)
// Upload a file to localFs as a test object
localObj, cleanupLocalObj := uploadFile(t, localFs, path, contents)
defer cleanupLocalObj()
localObj := uploadFile(t, localFs, path, contents)
// Upload the same data to the remote Fs also
remoteObj, cleanupRemoteObj := uploadFile(t, f, path, contents)
defer cleanupRemoteObj()
remoteObj := uploadFile(t, f, path, contents)
// Calculate the expected Hash of the remote object
computedHash, err := f.ComputeHash(ctx, remoteObj.(*Object), localObj, hashType)

View File

@@ -14,11 +14,10 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"mime"
"net/http"
"os"
"path"
"regexp"
"sort"
"strconv"
"strings"
@@ -203,7 +202,7 @@ func init() {
m.Set("root_folder_id", "appDataFolder")
}
if opt.ServiceAccountFile == "" && opt.ServiceAccountCredentials == "" {
if opt.ServiceAccountFile == "" && opt.ServiceAccountCredentials == "" && !opt.EnvAuth {
return oauthutil.ConfigOut("teamdrive", &oauthutil.Options{
OAuth2Config: driveConfig,
})
@@ -278,20 +277,23 @@ Leave blank normally.
Fill in to access "Computers" folders (see docs), or for rclone to use
a non root folder as its starting point.
`,
Advanced: true,
Advanced: true,
Sensitive: true,
}, {
Name: "service_account_file",
Help: "Service Account Credentials JSON file path.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login." + env.ShellExpandHelp,
}, {
Name: "service_account_credentials",
Help: "Service Account Credentials JSON blob.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
Hide: fs.OptionHideConfigurator,
Advanced: true,
Name: "service_account_credentials",
Help: "Service Account Credentials JSON blob.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
Hide: fs.OptionHideConfigurator,
Advanced: true,
Sensitive: true,
}, {
Name: "team_drive",
Help: "ID of the Shared Drive (Team Drive).",
Hide: fs.OptionHideConfigurator,
Advanced: true,
Name: "team_drive",
Help: "ID of the Shared Drive (Team Drive).",
Hide: fs.OptionHideConfigurator,
Advanced: true,
Sensitive: true,
}, {
Name: "auth_owner_only",
Default: false,
@@ -417,10 +419,11 @@ date is used.`,
Help: "Size of listing chunk 100-1000, 0 to disable.",
Advanced: true,
}, {
Name: "impersonate",
Default: "",
Help: `Impersonate this user when using a service account.`,
Advanced: true,
Name: "impersonate",
Default: "",
Help: `Impersonate this user when using a service account.`,
Advanced: true,
Sensitive: true,
}, {
Name: "alternate_export",
Default: false,
@@ -452,7 +455,11 @@ If downloading a file returns the error "This file has been identified
as malware or spam and cannot be downloaded" with the error code
"cannotDownloadAbusiveFile" then supply this flag to rclone to
indicate you acknowledge the risks of downloading the file and rclone
will download it anyway.`,
will download it anyway.
Note that if you are using service account it will need Manager
permission (not Content Manager) to for this flag to work. If the SA
does not have the right permission, Google will just ignore the flag.`,
Advanced: true,
}, {
Name: "keep_revision_forever",
@@ -496,7 +503,9 @@ need to use --ignore size also.`,
}, {
Name: "server_side_across_configs",
Default: false,
Help: `Allow server-side operations (e.g. copy) to work across different drive configs.
Help: `Deprecated: use --server-side-across-configs instead.
Allow server-side operations (e.g. copy) to work across different drive configs.
This can be useful if you wish to do a server-side copy between two
different Google drives. Note that this isn't enabled by default
@@ -587,7 +596,8 @@ Note also that opening the folder once in the web interface (with the
user you've authenticated rclone with) seems to be enough so that the
resource key is no needed.
`,
Advanced: true,
Advanced: true,
Sensitive: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
@@ -595,6 +605,18 @@ resource key is no needed.
// Encode invalid UTF-8 bytes as json doesn't handle them properly.
// Don't encode / as it's a valid name character in drive.
Default: encoder.EncodeInvalidUtf8,
}, {
Name: "env_auth",
Help: "Get IAM credentials from runtime (environment variables or instance meta data if no env vars).\n\nOnly applies if service_account_file and service_account_credentials is blank.",
Default: false,
Advanced: true,
Examples: []fs.OptionExample{{
Value: "false",
Help: "Enter credentials in the next step.",
}, {
Value: "true",
Help: "Get GCP IAM credentials from the environment (env vars or IAM).",
}},
}}...),
})
@@ -651,6 +673,7 @@ type Options struct {
SkipDanglingShortcuts bool `config:"skip_dangling_shortcuts"`
ResourceKey string `config:"resource_key"`
Enc encoder.MultiEncoder `config:"encoding"`
EnvAuth bool `config:"env_auth"`
}
// Fs represents a remote drive server
@@ -758,7 +781,7 @@ func (f *Fs) shouldRetry(ctx context.Context, err error) (bool, error) {
} else if f.opt.StopOnDownloadLimit && reason == "downloadQuotaExceeded" {
fs.Errorf(f, "Received download limit error: %v", err)
return false, fserrors.FatalError(err)
} else if f.opt.StopOnUploadLimit && reason == "quotaExceeded" {
} else if f.opt.StopOnUploadLimit && (reason == "quotaExceeded" || reason == "storageQuotaExceeded") {
fs.Errorf(f, "Received upload limit error: %v", err)
return false, fserrors.FatalError(err)
} else if f.opt.StopOnUploadLimit && reason == "teamDriveFileLimitExceeded" {
@@ -1108,7 +1131,7 @@ func createOAuthClient(ctx context.Context, opt *Options, name string, m configm
// try loading service account credentials from env variable, then from a file
if len(opt.ServiceAccountCredentials) == 0 && opt.ServiceAccountFile != "" {
loadedCreds, err := ioutil.ReadFile(env.ShellExpand(opt.ServiceAccountFile))
loadedCreds, err := os.ReadFile(env.ShellExpand(opt.ServiceAccountFile))
if err != nil {
return nil, fmt.Errorf("error opening service account credentials file: %w", err)
}
@@ -1119,6 +1142,12 @@ func createOAuthClient(ctx context.Context, opt *Options, name string, m configm
if err != nil {
return nil, fmt.Errorf("failed to create oauth client from service account: %w", err)
}
} else if opt.EnvAuth {
scopes := driveScopes(opt.Scope)
oAuthClient, err = google.DefaultClient(ctx, scopes...)
if err != nil {
return nil, fmt.Errorf("failed to create client from environment: %w", err)
}
} else {
oAuthClient, _, err = oauthutil.NewClientWithBaseClient(ctx, name, m, driveConfig, getClient(ctx, opt))
if err != nil {
@@ -1210,6 +1239,7 @@ func newFs(ctx context.Context, name, path string, m configmap.Mapper) (*Fs, err
WriteMimeType: true,
CanHaveEmptyDirectories: true,
ServerSideAcrossConfigs: opt.ServerSideAcrossConfigs,
FilterAware: true,
}).Fill(ctx, f)
// Create a new authorized Drive client.
@@ -1489,6 +1519,9 @@ func (f *Fs) newObjectWithExportInfo(
// NewObject finds the Object at remote. If it can't be found
// it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
if strings.HasSuffix(remote, "/") {
return nil, fs.ErrorIsDir
}
info, extension, exportName, exportMimeType, isDocument, err := f.getRemoteInfoWithExport(ctx, remote)
if err != nil {
return nil, err
@@ -2876,6 +2909,7 @@ func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.
if f.rootFolderID == "appDataFolder" {
changesCall.Spaces("appDataFolder")
}
changesCall.RestrictToMyDrive(!f.opt.SharedWithMe)
changeList, err = changesCall.Context(ctx).Do()
return f.shouldRetry(ctx, err)
})
@@ -3305,7 +3339,7 @@ drives found and a combined drive.
upstreams = "My Drive=My Drive:" "Test Drive=Test Drive:"
Adding this to the rclone config file will cause those team drives to
be accessible with the aliases shown. Any illegal charactes will be
be accessible with the aliases shown. Any illegal characters will be
substituted with "_" and duplicate names will have numbers suffixed.
It will also add a remote called AllDrives which shows all the shared
drives combined into one directory tree.
@@ -3322,9 +3356,9 @@ This takes an optional directory to trash which make this easier to
use via the API.
rclone backend untrash drive:directory
rclone backend -i untrash drive:directory subdir
rclone backend --interactive untrash drive:directory subdir
Use the -i flag to see what would be restored before restoring it.
Use the --interactive/-i or --dry-run flag to see what would be restored before restoring it.
Result:
@@ -3354,7 +3388,7 @@ component will be used as the file name.
If the destination is a drive backend then server-side copying will be
attempted if possible.
Use the -i flag to see what would be copied before copying.
Use the --interactive/-i or --dry-run flag to see what would be copied before copying.
`,
}, {
Name: "exportformats",
@@ -3430,13 +3464,12 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
if err != nil {
return nil, err
}
re := regexp.MustCompile(`[^\w_. -]+`)
if _, ok := opt["config"]; ok {
lines := []string{}
upstreams := []string{}
names := make(map[string]struct{}, len(drives))
for i, drive := range drives {
name := re.ReplaceAllString(drive.Name, "_")
name := fspath.MakeConfigName(drive.Name)
for {
if _, found := names[name]; !found {
break
@@ -3799,7 +3832,7 @@ func (o *linkObject) Open(ctx context.Context, options ...fs.OpenOption) (in io.
data = data[:limit]
}
return ioutil.NopCloser(bytes.NewReader(data)), nil
return io.NopCloser(bytes.NewReader(data)), nil
}
func (o *baseObject) update(ctx context.Context, updateInfo *drive.File, uploadMimeType string, in io.Reader,
@@ -3858,7 +3891,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
if err != nil {
return err
}
newO, err := o.fs.newObjectWithInfo(ctx, src.Remote(), info)
newO, err := o.fs.newObjectWithInfo(ctx, o.remote, info)
if err != nil {
return err
}

View File

@@ -7,7 +7,6 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"mime"
"os"
"path"
@@ -78,7 +77,7 @@ var additionalMimeTypes = map[string]string{
// Load the example export formats into exportFormats for testing
func TestInternalLoadExampleFormats(t *testing.T) {
fetchFormatsOnce.Do(func() {})
buf, err := ioutil.ReadFile(filepath.FromSlash("test/about.json"))
buf, err := os.ReadFile(filepath.FromSlash("test/about.json"))
var about struct {
ExportFormats map[string][]string `json:"exportFormats,omitempty"`
ImportFormats map[string][]string `json:"importFormats,omitempty"`
@@ -244,6 +243,15 @@ func (f *Fs) InternalTestShouldRetry(t *testing.T) {
quotaExceededRetry, quotaExceededError := f.shouldRetry(ctx, &generic403)
assert.False(t, quotaExceededRetry)
assert.Equal(t, quotaExceededError, expectedQuotaError)
sqEItem := googleapi.ErrorItem{
Reason: "storageQuotaExceeded",
}
generic403.Errors[0] = sqEItem
expectedStorageQuotaError := fserrors.FatalError(&generic403)
storageQuotaExceededRetry, storageQuotaExceededError := f.shouldRetry(ctx, &generic403)
assert.False(t, storageQuotaExceededRetry)
assert.Equal(t, storageQuotaExceededError, expectedStorageQuotaError)
}
func (f *Fs) InternalTestDocumentImport(t *testing.T) {
@@ -518,6 +526,9 @@ func (f *Fs) InternalTestCopyID(t *testing.T) {
// TestIntegration/FsMkdir/FsPutFiles/Internal/AgeQuery
func (f *Fs) InternalTestAgeQuery(t *testing.T) {
// Check set up for filtering
assert.True(t, f.Features().FilterAware)
opt := &filter.Opt{}
err := opt.MaxAge.Set("1h")
assert.NoError(t, err)

View File

@@ -13,7 +13,6 @@ import (
"sync"
"time"
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/async"
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/files"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/fserrors"
@@ -140,55 +139,12 @@ func (b *batcher) finishBatch(ctx context.Context, items []*files.UploadSessionF
return complete, nil
}
// finishBatchJobStatus waits for the batch to complete returning completed entries
func (b *batcher) finishBatchJobStatus(ctx context.Context, launchBatchStatus *files.UploadSessionFinishBatchLaunch) (complete *files.UploadSessionFinishBatchResult, err error) {
if launchBatchStatus.AsyncJobId == "" {
return nil, errors.New("wait for batch completion: empty job ID")
}
var batchStatus *files.UploadSessionFinishBatchJobStatus
sleepTime := 100 * time.Millisecond
const maxSleepTime = 1 * time.Second
startTime := time.Now()
try := 1
for {
remaining := time.Duration(b.f.opt.BatchCommitTimeout) - time.Since(startTime)
if remaining < 0 {
break
}
err = b.f.pacer.Call(func() (bool, error) {
batchStatus, err = b.f.srv.UploadSessionFinishBatchCheck(&async.PollArg{
AsyncJobId: launchBatchStatus.AsyncJobId,
})
return shouldRetry(ctx, err)
})
if err != nil {
fs.Debugf(b.f, "Wait for batch: sleeping for %v after error: %v: try %d remaining %v", sleepTime, err, try, remaining)
} else {
if batchStatus.Tag == "complete" {
fs.Debugf(b.f, "Upload batch completed in %v", time.Since(startTime))
return batchStatus.Complete, nil
}
fs.Debugf(b.f, "Wait for batch: sleeping for %v after status: %q: try %d remaining %v", sleepTime, batchStatus.Tag, try, remaining)
}
time.Sleep(sleepTime)
sleepTime *= 2
if sleepTime > maxSleepTime {
sleepTime = maxSleepTime
}
try++
}
if err == nil {
err = errors.New("batch didn't complete")
}
return nil, fmt.Errorf("wait for batch failed after %d tries in %v: %w", try, time.Since(startTime), err)
}
// commit a batch
func (b *batcher) commitBatch(ctx context.Context, items []*files.UploadSessionFinishArg, results []chan<- batcherResponse) (err error) {
// If commit fails then signal clients if sync
var signalled = b.async
defer func() {
if err != nil && signalled {
if err != nil && !signalled {
// Signal to clients that there was an error
for _, result := range results {
result <- batcherResponse{err: err}
@@ -309,7 +265,7 @@ func (b *batcher) Shutdown() {
}
b.shutOnce.Do(func() {
atexit.Unregister(b.atexit)
fs.Infof(b.f, "Commiting uploads - please wait...")
fs.Infof(b.f, "Committing uploads - please wait...")
// show that batcher is shutting down
close(b.closed)
// quit the commitLoop by sending a quitRequest message

View File

@@ -58,7 +58,7 @@ import (
const (
rcloneClientID = "5jcck7diasz0rqy"
rcloneEncryptedClientSecret = "fRS5vVLr2v6FbyXYnIgjwBuUAt0osq_QZTXAEcmZ7g"
minSleep = 10 * time.Millisecond
defaultMinSleep = fs.Duration(10 * time.Millisecond)
maxSleep = 2 * time.Second
decayConstant = 2 // bigger for slower decay, exponential
// Upload chunk size - setting too small makes uploads slow.
@@ -182,8 +182,9 @@ client_secret) to use this option as currently rclone's default set of
permissions doesn't include "members.read". This can be added once
v1.55 or later is in use everywhere.
`,
Default: "",
Advanced: true,
Default: "",
Advanced: true,
Sensitive: true,
}, {
Name: "shared_files",
Help: `Instructs rclone to work on individual shared files.
@@ -260,17 +261,22 @@ uploaded.
The default for this is 0 which means rclone will choose a sensible
default based on the batch_mode in use.
- batch_mode: async - default batch_timeout is 500ms
- batch_mode: sync - default batch_timeout is 10s
- batch_mode: async - default batch_timeout is 10s
- batch_mode: sync - default batch_timeout is 500ms
- batch_mode: off - not in use
`,
Default: fs.Duration(0),
Advanced: true,
}, {
Name: "batch_commit_timeout",
Help: `Max time to wait for a batch to finish comitting`,
Help: `Max time to wait for a batch to finish committing`,
Default: fs.Duration(10 * time.Minute),
Advanced: true,
}, {
Name: "pacer_min_sleep",
Default: defaultMinSleep,
Help: "Minimum time to sleep between API calls.",
Advanced: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
@@ -299,6 +305,7 @@ type Options struct {
BatchTimeout fs.Duration `config:"batch_timeout"`
BatchCommitTimeout fs.Duration `config:"batch_commit_timeout"`
AsyncBatch bool `config:"async_batch"`
PacerMinSleep fs.Duration `config:"pacer_min_sleep"`
Enc encoder.MultiEncoder `config:"encoding"`
}
@@ -442,7 +449,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
name: name,
opt: *opt,
ci: ci,
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(opt.PacerMinSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
}
f.batcher, err = newBatcher(ctx, f, f.opt.BatchMode, f.opt.BatchSize, time.Duration(f.opt.BatchTimeout))
if err != nil {
@@ -536,7 +543,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
default:
return nil, err
}
// if the moint failed we have to abort here
// if the mount failed we have to abort here
}
// if the mount succeeded it's now a normal folder in the users root namespace
// we disable shared folder mode and proceed normally
@@ -719,7 +726,7 @@ func (f *Fs) listSharedFolders(ctx context.Context) (entries fs.DirEntries, err
}
for _, entry := range res.Entries {
leaf := f.opt.Enc.ToStandardName(entry.Name)
d := fs.NewDir(leaf, time.Now()).SetID(entry.SharedFolderId)
d := fs.NewDir(leaf, time.Time{}).SetID(entry.SharedFolderId)
entries = append(entries, d)
if err != nil {
return nil, err
@@ -906,7 +913,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
leaf := f.opt.Enc.ToStandardName(path.Base(entryPath))
remote := path.Join(dir, leaf)
if folderInfo != nil {
d := fs.NewDir(remote, time.Now()).SetID(folderInfo.Id)
d := fs.NewDir(remote, time.Time{}).SetID(folderInfo.Id)
entries = append(entries, d)
} else if fileInfo != nil {
o, err := f.newObjectWithInfo(ctx, remote, fileInfo)
@@ -1669,7 +1676,7 @@ func (o *Object) uploadChunked(ctx context.Context, in0 io.Reader, commitInfo *f
correctOffset := uErr.EndpointError.IncorrectOffset.CorrectOffset
delta := int64(correctOffset) - int64(cursor.Offset)
skip += delta
what := fmt.Sprintf("incorrect offset error receved: sent %d, need %d, skip %d", cursor.Offset, correctOffset, skip)
what := fmt.Sprintf("incorrect offset error received: sent %d, need %d, skip %d", cursor.Offset, correctOffset, skip)
if skip < 0 {
return false, fmt.Errorf("can't seek backwards to correct offset: %s", what)
} else if skip == chunkSize {

View File

@@ -118,6 +118,9 @@ func (f *Fs) getDownloadToken(ctx context.Context, url string) (*GetTokenRespons
Single: 1,
Pass: f.opt.FilePassword,
}
if f.opt.CDN {
request.CDN = 1
}
opts := rest.Opts{
Method: "POST",
Path: "/download/get_token.cgi",
@@ -405,6 +408,32 @@ func (f *Fs) moveFile(ctx context.Context, url string, folderID int, rename stri
return response, nil
}
func (f *Fs) moveDir(ctx context.Context, folderID int, newLeaf string, destinationFolderID int) (response *MoveDirResponse, err error) {
request := &MoveDirRequest{
FolderID: folderID,
DestinationFolderID: destinationFolderID,
Rename: newLeaf,
// DestinationUser: destinationUser,
}
opts := rest.Opts{
Method: "POST",
Path: "/folder/mv.cgi",
}
response = &MoveDirResponse{}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, request, response)
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, fmt.Errorf("couldn't move dir: %w", err)
}
return response, nil
}
func (f *Fs) copyFile(ctx context.Context, url string, folderID int, rename string) (response *CopyFileResponse, err error) {
request := &CopyFileRequest{
URLs: []string{url},
@@ -473,7 +502,7 @@ func (f *Fs) getUploadNode(ctx context.Context) (response *GetUploadNodeResponse
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, fmt.Errorf("didnt got an upload node: %w", err)
return nil, fmt.Errorf("didn't get an upload node: %w", err)
}
// fs.Debugf(f, "Got Upload node")

View File

@@ -1,3 +1,4 @@
// Package fichier provides an interface to the 1Fichier storage system.
package fichier
import (
@@ -37,8 +38,9 @@ func init() {
Description: "1Fichier",
NewFs: NewFs,
Options: []fs.Option{{
Help: "Your API Key, get it from https://1fichier.com/console/params.pl.",
Name: "api_key",
Help: "Your API Key, get it from https://1fichier.com/console/params.pl.",
Name: "api_key",
Sensitive: true,
}, {
Help: "If you want to download a shared folder, add this parameter.",
Name: "shared_folder",
@@ -53,6 +55,11 @@ func init() {
Name: "folder_password",
Advanced: true,
IsPassword: true,
}, {
Help: "Set if you wish to use CDN download links.",
Name: "cdn",
Default: false,
Advanced: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
@@ -88,6 +95,7 @@ type Options struct {
SharedFolder string `config:"shared_folder"`
FilePassword string `config:"file_password"`
FolderPassword string `config:"folder_password"`
CDN bool `config:"cdn"`
Enc encoder.MultiEncoder `config:"encoding"`
}
@@ -332,7 +340,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
// checking to see if there is one already - use Put() for that.
func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size int64, options ...fs.OpenOption) (fs.Object, error) {
if size > int64(300e9) {
return nil, errors.New("File too big, cant upload")
return nil, errors.New("File too big, can't upload")
} else if size == 0 {
return nil, fs.ErrorCantUploadEmptyFiles
}
@@ -480,6 +488,51 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
return dstObj, nil
}
// DirMove moves src, srcRemote to this remote at dstRemote
// using server-side move operations.
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantDirMove.
//
// If destination exists then return fs.ErrorDirExists.
//
// This is complicated by the fact that we can't use moveDir to move
// to a different directory AND rename at the same time as it can
// overwrite files in the source directory.
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
srcFs, ok := src.(*Fs)
if !ok {
fs.Debugf(srcFs, "Can't move directory - not same remote type")
return fs.ErrorCantDirMove
}
srcID, _, _, dstDirectoryID, dstLeaf, err := f.dirCache.DirMove(ctx, srcFs.dirCache, srcFs.root, srcRemote, f.root, dstRemote)
if err != nil {
return err
}
srcIDnumeric, err := strconv.Atoi(srcID)
if err != nil {
return err
}
dstDirectoryIDnumeric, err := strconv.Atoi(dstDirectoryID)
if err != nil {
return err
}
var resp *MoveDirResponse
resp, err = f.moveDir(ctx, srcIDnumeric, dstLeaf, dstDirectoryIDnumeric)
if err != nil {
return fmt.Errorf("couldn't rename leaf: %w", err)
}
if resp.Status != "OK" {
return fmt.Errorf("couldn't rename leaf: %s", resp.Message)
}
srcFs.dirCache.FlushDir(srcRemote)
return nil
}
// Copy src to this remote using server side move operations.
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object)
@@ -553,6 +606,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
var (
_ fs.Fs = (*Fs)(nil)
_ fs.Mover = (*Fs)(nil)
_ fs.DirMover = (*Fs)(nil)
_ fs.Copier = (*Fs)(nil)
_ fs.PublicLinker = (*Fs)(nil)
_ fs.PutUncheckeder = (*Fs)(nil)

View File

@@ -20,6 +20,7 @@ type DownloadRequest struct {
URL string `json:"url"`
Single int `json:"single"`
Pass string `json:"pass,omitempty"`
CDN int `json:"cdn,omitempty"`
}
// RemoveFolderRequest is the request structure of the corresponding request
@@ -69,6 +70,22 @@ type MoveFileResponse struct {
URLs []string `json:"urls"`
}
// MoveDirRequest is the request structure of the corresponding request
type MoveDirRequest struct {
FolderID int `json:"folder_id"`
DestinationFolderID int `json:"destination_folder_id,omitempty"`
DestinationUser string `json:"destination_user"`
Rename string `json:"rename,omitempty"`
}
// MoveDirResponse is the response structure of the corresponding request
type MoveDirResponse struct {
Status string `json:"status"`
Message string `json:"message"`
OldName string `json:"old_name"`
NewName string `json:"new_name"`
}
// CopyFileRequest is the request structure of the corresponding request
type CopyFileRequest struct {
URLs []string `json:"urls"`
@@ -84,7 +101,7 @@ type CopyFileResponse struct {
URLs []FileCopy `json:"urls"`
}
// FileCopy is used in the the CopyFileResponse
// FileCopy is used in the CopyFileResponse
type FileCopy struct {
FromURL string `json:"from_url"`
ToURL string `json:"to_url"`

View File

@@ -19,7 +19,7 @@ const (
timeFormatJSON = `"` + timeFormatParameters + `"`
)
// Time represents represents date and time information for the
// Time represents date and time information for the
// filefabric API
type Time time.Time
@@ -95,7 +95,7 @@ type Status struct {
// Warning string `json:"warning"` // obsolete
}
// Status statisfies the error interface
// Status satisfies the error interface
func (e *Status) Error() string {
return fmt.Sprintf("%s (%s)", e.Message, e.Code)
}

View File

@@ -20,7 +20,6 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"path"
@@ -85,6 +84,7 @@ Leave blank normally.
Fill in to make rclone start with directory of a given ID.
`,
Sensitive: true,
}, {
Name: "permanent_token",
Help: `Permanent Authentication Token.
@@ -98,6 +98,7 @@ These tokens are normally valid for several years.
For more info see: https://docs.storagemadeeasy.com/organisationcloud/api-tokens
`,
Sensitive: true,
}, {
Name: "token",
Help: `Session Token.
@@ -107,7 +108,8 @@ usually valid for 1 hour.
Don't set this value - rclone will set it automatically.
`,
Advanced: true,
Advanced: true,
Sensitive: true,
}, {
Name: "token_expiry",
Help: `Token expiry time.
@@ -150,7 +152,7 @@ type Fs struct {
opt Options // parsed options
features *fs.Features // optional features
m configmap.Mapper // to save config
srv *rest.Client // the connection to the one drive server
srv *rest.Client // the connection to the server
dirCache *dircache.DirCache // Map of directory path to directory id
pacer *fs.Pacer // pacer for API calls
tokenMu sync.Mutex // hold when reading the token
@@ -843,7 +845,7 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
return f.purgeCheck(ctx, dir, false)
}
// Wait for the the background task to complete if necessary
// Wait for the background task to complete if necessary
func (f *Fs) waitForBackgroundTask(ctx context.Context, taskID api.String) (err error) {
if taskID == "" || taskID == "0" {
// No task to wait for
@@ -1186,7 +1188,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
return nil, errors.New("can't download - no id")
}
if o.contentType == emptyMimeType {
return ioutil.NopCloser(bytes.NewReader([]byte{})), nil
return io.NopCloser(bytes.NewReader([]byte{})), nil
}
fs.FixRangeOption(options, o.size)
resp, err := o.fs.rpc(ctx, "getFile", params{

View File

@@ -15,7 +15,7 @@ import (
"sync"
"time"
"github.com/jlaffaye/ftp"
"github.com/rclone/ftp"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/config"
@@ -28,6 +28,7 @@ import (
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/env"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/proxy"
"github.com/rclone/rclone/lib/readers"
)
@@ -48,13 +49,15 @@ func init() {
Description: "FTP",
NewFs: NewFs,
Options: []fs.Option{{
Name: "host",
Help: "FTP host to connect to.\n\nE.g. \"ftp.example.com\".",
Required: true,
Name: "host",
Help: "FTP host to connect to.\n\nE.g. \"ftp.example.com\".",
Required: true,
Sensitive: true,
}, {
Name: "user",
Help: "FTP username.",
Default: currentUser,
Name: "user",
Help: "FTP username.",
Default: currentUser,
Sensitive: true,
}, {
Name: "port",
Help: "FTP port number.",
@@ -70,7 +73,7 @@ func init() {
When using implicit FTP over TLS the client connects using TLS
right from the start which breaks compatibility with
non-TLS-aware servers. This is usually served over port 990 rather
than port 21. Cannot be used in combination with explicit FTP.`,
than port 21. Cannot be used in combination with explicit FTPS.`,
Default: false,
}, {
Name: "explicit_tls",
@@ -78,11 +81,25 @@ than port 21. Cannot be used in combination with explicit FTP.`,
When using explicit FTP over TLS the client explicitly requests
security from the server in order to upgrade a plain text connection
to an encrypted one. Cannot be used in combination with implicit FTP.`,
to an encrypted one. Cannot be used in combination with implicit FTPS.`,
Default: false,
}, {
Name: "concurrency",
Help: "Maximum number of FTP simultaneous connections, 0 for unlimited.",
Name: "concurrency",
Help: strings.Replace(`Maximum number of FTP simultaneous connections, 0 for unlimited.
Note that setting this is very likely to cause deadlocks so it should
be used with care.
If you are doing a sync or copy then make sure concurrency is one more
than the sum of |--transfers| and |--checkers|.
If you use |--check-first| then it just needs to be one more than the
maximum of |--checkers| and |--transfers|.
So for |concurrency 3| you'd use |--checkers 2 --transfers 2
--check-first| or |--checkers 1 --transfers 1|.
`, "|", "`", -1),
Default: 0,
Advanced: true,
}, {
@@ -110,6 +127,11 @@ to an encrypted one. Cannot be used in combination with implicit FTP.`,
Help: "Use MDTM to set modification time (VsFtpd quirk)",
Default: false,
Advanced: true,
}, {
Name: "force_list_hidden",
Help: "Use LIST -a to force listing of hidden files and folders. This will disable the use of MLSD.",
Default: false,
Advanced: true,
}, {
Name: "idle_timeout",
Default: fs.Duration(60 * time.Second),
@@ -153,6 +175,18 @@ Enabled by default. Use 0 to disable.`,
If this is set and no password is supplied then rclone will ask for a password
`,
Advanced: true,
}, {
Name: "socks_proxy",
Default: "",
Help: `Socks 5 proxy host.
Supports the format user:pass@host:port, user@host:port, host:port.
Example:
myUser:myPass@localhost:9005
`,
Advanced: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
@@ -191,11 +225,13 @@ type Options struct {
DisableMLSD bool `config:"disable_mlsd"`
DisableUTF8 bool `config:"disable_utf8"`
WritingMDTM bool `config:"writing_mdtm"`
ForceListHidden bool `config:"force_list_hidden"`
IdleTimeout fs.Duration `config:"idle_timeout"`
CloseTimeout fs.Duration `config:"close_timeout"`
ShutTimeout fs.Duration `config:"shut_timeout"`
AskPassword bool `config:"ask_password"`
Enc encoder.MultiEncoder `config:"encoding"`
SocksProxy string `config:"socks_proxy"`
}
// Fs represents a remote FTP server
@@ -295,18 +331,33 @@ func (dl *debugLog) Write(p []byte) (n int, err error) {
return len(p), nil
}
// Return a *textproto.Error if err contains one or nil otherwise
func textprotoError(err error) (errX *textproto.Error) {
if errors.As(err, &errX) {
return errX
}
return nil
}
// returns true if this FTP error should be retried
func isRetriableFtpError(err error) bool {
if errX := textprotoError(err); errX != nil {
switch errX.Code {
case ftp.StatusNotAvailable, ftp.StatusTransfertAborted:
return true
}
}
return false
}
// shouldRetry returns a boolean as to whether this err deserve to be
// retried. It returns the err as a convenience
func shouldRetry(ctx context.Context, err error) (bool, error) {
if fserrors.ContextError(ctx, &err) {
return false, err
}
switch errX := err.(type) {
case *textproto.Error:
switch errX.Code {
case ftp.StatusNotAvailable:
return true, err
}
if isRetriableFtpError(err) {
return true, err
}
return fserrors.ShouldRetry(err), err
}
@@ -316,14 +367,49 @@ func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
fs.Debugf(f, "Connecting to FTP server")
// Make ftp library dial with fshttp dialer optionally using TLS
initialConnection := true
dial := func(network, address string) (conn net.Conn, err error) {
conn, err = fshttp.NewDialer(ctx).Dial(network, address)
if f.tlsConf != nil && err == nil {
conn = tls.Client(conn, f.tlsConf)
fs.Debugf(f, "dial(%q,%q)", network, address)
defer func() {
fs.Debugf(f, "> dial: conn=%T, err=%v", conn, err)
}()
baseDialer := fshttp.NewDialer(ctx)
if f.opt.SocksProxy != "" {
conn, err = proxy.SOCKS5Dial(network, address, f.opt.SocksProxy, baseDialer)
} else {
conn, err = baseDialer.Dial(network, address)
}
return
if err != nil {
return nil, err
}
// Connect using cleartext only for non TLS
if f.tlsConf == nil {
return conn, nil
}
// Initial connection only needs to be cleartext for explicit TLS
if f.opt.ExplicitTLS && initialConnection {
initialConnection = false
return conn, nil
}
// Upgrade connection to TLS
tlsConn := tls.Client(conn, f.tlsConf)
// Do the initial handshake - tls.Client doesn't do it for us
// If we do this then connections to proftpd/pureftpd lock up
// See: https://github.com/rclone/rclone/issues/6426
// See: https://github.com/jlaffaye/ftp/issues/282
if false {
err = tlsConn.HandshakeContext(ctx)
if err != nil {
_ = conn.Close()
return nil, err
}
}
return tlsConn, nil
}
ftpConfig := []ftp.DialOption{
ftp.DialWithContext(ctx),
ftp.DialWithDialFunc(dial),
}
ftpConfig := []ftp.DialOption{ftp.DialWithDialFunc(dial)}
if f.opt.TLS {
// Our dialer takes care of TLS but ftp library also needs tlsConf
@@ -331,12 +417,6 @@ func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
ftpConfig = append(ftpConfig, ftp.DialWithTLS(f.tlsConf))
} else if f.opt.ExplicitTLS {
ftpConfig = append(ftpConfig, ftp.DialWithExplicitTLS(f.tlsConf))
// Initial connection needs to be cleartext for explicit TLS
conn, err := fshttp.NewDialer(ctx).Dial("tcp", f.dialAddr)
if err != nil {
return nil, err
}
ftpConfig = append(ftpConfig, ftp.DialWithNetConn(conn))
}
if f.opt.DisableEPSV {
ftpConfig = append(ftpConfig, ftp.DialWithDisabledEPSV(true))
@@ -353,6 +433,9 @@ func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
if f.opt.WritingMDTM {
ftpConfig = append(ftpConfig, ftp.DialWithWritingMDTM(true))
}
if f.opt.ForceListHidden {
ftpConfig = append(ftpConfig, ftp.DialWithForceListHidden(true))
}
if f.ci.Dump&(fs.DumpHeaders|fs.DumpBodies|fs.DumpRequests|fs.DumpResponses) != 0 {
ftpConfig = append(ftpConfig, ftp.DialWithDebugOutput(&debugLog{auth: f.ci.Dump&fs.DumpAuth != 0}))
}
@@ -416,8 +499,7 @@ func (f *Fs) putFtpConnection(pc **ftp.ServerConn, err error) {
*pc = nil
if err != nil {
// If not a regular FTP error code then check the connection
var tpErr *textproto.Error
if !errors.As(err, &tpErr) {
if tpErr := textprotoError(err); tpErr != nil {
nopErr := c.NoOp()
if nopErr != nil {
fs.Debugf(f, "Connection failed, closing: %v", nopErr)
@@ -519,6 +601,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
}
f.features = (&fs.Features{
CanHaveEmptyDirectories: true,
PartialUploads: true,
}).Fill(ctx, f)
// set the pool drainer timer going
if f.opt.IdleTimeout > 0 {
@@ -566,8 +649,7 @@ func (f *Fs) Shutdown(ctx context.Context) error {
// translateErrorFile turns FTP errors into rclone errors if possible for a file
func translateErrorFile(err error) error {
switch errX := err.(type) {
case *textproto.Error:
if errX := textprotoError(err); errX != nil {
switch errX.Code {
case ftp.StatusFileUnavailable, ftp.StatusFileActionIgnored:
err = fs.ErrorObjectNotFound
@@ -578,8 +660,7 @@ func translateErrorFile(err error) error {
// translateErrorDir turns FTP errors into rclone errors if possible for a directory
func translateErrorDir(err error) error {
switch errX := err.(type) {
case *textproto.Error:
if errX := textprotoError(err); errX != nil {
switch errX.Code {
case ftp.StatusFileUnavailable, ftp.StatusFileActionIgnored:
err = fs.ErrorDirNotFound
@@ -610,8 +691,7 @@ func (f *Fs) dirFromStandardPath(dir string) string {
// findItem finds a directory entry for the name in its parent directory
func (f *Fs) findItem(ctx context.Context, remote string) (entry *ftp.Entry, err error) {
// defer fs.Trace(remote, "")("o=%v, err=%v", &o, &err)
fullPath := path.Join(f.root, remote)
if fullPath == "" || fullPath == "." || fullPath == "/" {
if remote == "" || remote == "." || remote == "/" {
// if root, assume exists and synthesize an entry
return &ftp.Entry{
Name: "",
@@ -619,13 +699,38 @@ func (f *Fs) findItem(ctx context.Context, remote string) (entry *ftp.Entry, err
Time: time.Now(),
}, nil
}
dir := path.Dir(fullPath)
base := path.Base(fullPath)
c, err := f.getFtpConnection(ctx)
if err != nil {
return nil, fmt.Errorf("findItem: %w", err)
}
// returns TRUE if MLST is supported which is required to call GetEntry
if c.IsTimePreciseInList() {
entry, err := c.GetEntry(f.opt.Enc.FromStandardPath(remote))
f.putFtpConnection(&c, err)
if err != nil {
err = translateErrorFile(err)
if err == fs.ErrorObjectNotFound {
return nil, nil
}
if errX := textprotoError(err); errX != nil {
switch errX.Code {
case ftp.StatusBadArguments:
err = nil
}
}
return nil, err
}
if entry != nil {
f.entryToStandard(entry)
}
return entry, nil
}
dir := path.Dir(remote)
base := path.Base(remote)
files, err := c.List(f.dirFromStandardPath(dir))
f.putFtpConnection(&c, err)
if err != nil {
@@ -644,7 +749,7 @@ func (f *Fs) findItem(ctx context.Context, remote string) (entry *ftp.Entry, err
// it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err error) {
// defer fs.Trace(remote, "")("o=%v, err=%v", &o, &err)
entry, err := f.findItem(ctx, remote)
entry, err := f.findItem(ctx, path.Join(f.root, remote))
if err != nil {
return nil, err
}
@@ -666,7 +771,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err err
// dirExists checks the directory pointed to by remote exists or not
func (f *Fs) dirExists(ctx context.Context, remote string) (exists bool, err error) {
entry, err := f.findItem(ctx, remote)
entry, err := f.findItem(ctx, path.Join(f.root, remote))
if err != nil {
return false, fmt.Errorf("dirExists: %w", err)
}
@@ -810,32 +915,18 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
// getInfo reads the FileInfo for a path
func (f *Fs) getInfo(ctx context.Context, remote string) (fi *FileInfo, err error) {
// defer fs.Trace(remote, "")("fi=%v, err=%v", &fi, &err)
dir := path.Dir(remote)
base := path.Base(remote)
c, err := f.getFtpConnection(ctx)
file, err := f.findItem(ctx, remote)
if err != nil {
return nil, fmt.Errorf("getInfo: %w", err)
}
files, err := c.List(f.dirFromStandardPath(dir))
f.putFtpConnection(&c, err)
if err != nil {
return nil, translateErrorFile(err)
}
for i := range files {
file := files[i]
f.entryToStandard(file)
if file.Name == base {
info := &FileInfo{
Name: remote,
Size: file.Size,
ModTime: file.Time,
precise: f.fLstTime,
IsDir: file.Type == ftp.EntryTypeFolder,
}
return info, nil
return nil, err
} else if file != nil {
info := &FileInfo{
Name: remote,
Size: file.Size,
ModTime: file.Time,
precise: f.fLstTime,
IsDir: file.Type == ftp.EntryTypeFolder,
}
return info, nil
}
return nil, fs.ErrorObjectNotFound
}
@@ -866,8 +957,7 @@ func (f *Fs) mkdir(ctx context.Context, abspath string) error {
}
err = c.MakeDir(f.dirFromStandardPath(abspath))
f.putFtpConnection(&c, err)
switch errX := err.(type) {
case *textproto.Error:
if errX := textprotoError(err); errX != nil {
switch errX.Code {
case ftp.StatusFileUnavailable: // dir already exists: see issue #2181
err = nil
@@ -1036,7 +1126,7 @@ func (o *Object) ModTime(ctx context.Context) time.Time {
// SetModTime sets the modification time of the object
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
if !o.fs.fSetTime {
fs.Errorf(o.fs, "SetModTime is not supported")
fs.Debugf(o.fs, "SetModTime is not supported")
return nil
}
c, err := o.fs.getFtpConnection(ctx)
@@ -1108,8 +1198,7 @@ func (f *ftpReadCloser) Close() error {
// mask the error if it was caused by a premature close
// NB StatusAboutToSend is to work around a bug in pureftpd
// See: https://github.com/rclone/rclone/issues/3445#issuecomment-521654257
switch errX := err.(type) {
case *textproto.Error:
if errX := textprotoError(err); errX != nil {
switch errX.Code {
case ftp.StatusTransfertAborted, ftp.StatusFileUnavailable, ftp.StatusAboutToSend:
err = nil
@@ -1135,15 +1224,26 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.Read
}
}
}
c, err := o.fs.getFtpConnection(ctx)
var (
fd *ftp.Response
c *ftp.ServerConn
)
err = o.fs.pacer.Call(func() (bool, error) {
c, err = o.fs.getFtpConnection(ctx)
if err != nil {
return false, err // getFtpConnection has retries already
}
fd, err = c.RetrFrom(o.fs.opt.Enc.FromStandardPath(path), uint64(offset))
if err != nil {
o.fs.putFtpConnection(&c, err)
}
return shouldRetry(ctx, err)
})
if err != nil {
return nil, fmt.Errorf("open: %w", err)
}
fd, err := c.RetrFrom(o.fs.opt.Enc.FromStandardPath(path), uint64(offset))
if err != nil {
o.fs.putFtpConnection(&c, err)
return nil, fmt.Errorf("open: %w", err)
}
rc = &ftpReadCloser{rc: readers.NewLimitedReadCloser(fd, limit), c: c, f: o.fs}
return rc, nil
}
@@ -1176,13 +1276,10 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
}
err = c.Stor(o.fs.opt.Enc.FromStandardPath(path), in)
// Ignore error 250 here - send by some servers
if err != nil {
switch errX := err.(type) {
case *textproto.Error:
switch errX.Code {
case ftp.StatusRequestedFileActionOK:
err = nil
}
if errX := textprotoError(err); errX != nil {
switch errX.Code {
case ftp.StatusRequestedFileActionOK:
err = nil
}
}
if err != nil {

View File

@@ -34,9 +34,9 @@ func deriveFs(ctx context.Context, t *testing.T, f fs.Fs, opts settings) fs.Fs {
// test that big file uploads do not cause network i/o timeout
func (f *Fs) testUploadTimeout(t *testing.T) {
const (
fileSize = 100000000 // 100 MiB
idleTimeout = 40 * time.Millisecond // small because test server is local
maxTime = 10 * time.Second // prevent test hangup
fileSize = 100000000 // 100 MiB
idleTimeout = 1 * time.Second // small because test server is local
maxTime = 10 * time.Second // prevent test hangup
)
if testing.Short() {

View File

@@ -19,8 +19,8 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"os"
"path"
"strconv"
"strings"
@@ -82,7 +82,8 @@ func init() {
saFile, _ := m.Get("service_account_file")
saCreds, _ := m.Get("service_account_credentials")
anonymous, _ := m.Get("anonymous")
if saFile != "" || saCreds != "" || anonymous == "true" {
envAuth, _ := m.Get("env_auth")
if saFile != "" || saCreds != "" || anonymous == "true" || envAuth == "true" {
return nil, nil
}
return oauthutil.ConfigOut("", &oauthutil.Options{
@@ -90,15 +91,21 @@ func init() {
})
},
Options: append(oauthutil.SharedOptions, []fs.Option{{
Name: "project_number",
Help: "Project number.\n\nOptional - needed only for list/create/delete buckets - see your developer console.",
Name: "project_number",
Help: "Project number.\n\nOptional - needed only for list/create/delete buckets - see your developer console.",
Sensitive: true,
}, {
Name: "user_project",
Help: "User project.\n\nOptional - needed only for requester pays.",
Sensitive: true,
}, {
Name: "service_account_file",
Help: "Service Account Credentials JSON file path.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login." + env.ShellExpandHelp,
}, {
Name: "service_account_credentials",
Help: "Service Account Credentials JSON blob.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
Hide: fs.OptionHideBoth,
Name: "service_account_credentials",
Help: "Service Account Credentials JSON blob.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
Hide: fs.OptionHideBoth,
Sensitive: true,
}, {
Name: "anonymous",
Help: "Access public buckets and objects without credentials.\n\nSet to 'true' if you just want to download files and don't configure credentials.",
@@ -297,6 +304,15 @@ Docs: https://cloud.google.com/storage/docs/bucket-policy-only
Value: "DURABLE_REDUCED_AVAILABILITY",
Help: "Durable reduced availability storage class",
}},
}, {
Name: "directory_markers",
Default: false,
Advanced: true,
Help: `Upload an empty object with a trailing slash when a new directory is created
Empty folders are unsupported for bucket based remotes, this option creates an empty
object ending with "/", to persist the folder.
`,
}, {
Name: "no_check_bucket",
Help: `If set, don't attempt to check the bucket exists or create it.
@@ -311,7 +327,7 @@ rclone does if you know the bucket exists already.
Help: `If set this will decompress gzip encoded objects.
It is possible to upload objects to GCS with "Content-Encoding: gzip"
set. Normally rclone will download these files files as compressed objects.
set. Normally rclone will download these files as compressed objects.
If this flag is set then rclone will decompress these files with
"Content-Encoding: gzip" as they are received. This means that rclone
@@ -319,6 +335,10 @@ can't check the size and hash but the file contents will be decompressed.
`,
Advanced: true,
Default: false,
}, {
Name: "endpoint",
Help: "Endpoint for the service.\n\nLeave blank normally.",
Advanced: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
@@ -326,6 +346,17 @@ can't check the size and hash but the file contents will be decompressed.
Default: (encoder.Base |
encoder.EncodeCrLf |
encoder.EncodeInvalidUtf8),
}, {
Name: "env_auth",
Help: "Get GCP IAM credentials from runtime (environment variables or instance meta data if no env vars).\n\nOnly applies if service_account_file and service_account_credentials is blank.",
Default: false,
Examples: []fs.OptionExample{{
Value: "false",
Help: "Enter credentials in the next step.",
}, {
Value: "true",
Help: "Get GCP IAM credentials from the environment (env vars or IAM).",
}},
}}...),
})
}
@@ -333,6 +364,7 @@ can't check the size and hash but the file contents will be decompressed.
// Options defines the configuration for this backend
type Options struct {
ProjectNumber string `config:"project_number"`
UserProject string `config:"user_project"`
ServiceAccountFile string `config:"service_account_file"`
ServiceAccountCredentials string `config:"service_account_credentials"`
Anonymous bool `config:"anonymous"`
@@ -343,7 +375,10 @@ type Options struct {
StorageClass string `config:"storage_class"`
NoCheckBucket bool `config:"no_check_bucket"`
Decompress bool `config:"decompress"`
Endpoint string `config:"endpoint"`
Enc encoder.MultiEncoder `config:"encoding"`
EnvAuth bool `config:"env_auth"`
DirectoryMarkers bool `config:"directory_markers"`
}
// Fs represents a remote storage server
@@ -439,7 +474,7 @@ func parsePath(path string) (root string) {
// split returns bucket and bucketPath from the rootRelativePath
// relative to f.root
func (f *Fs) split(rootRelativePath string) (bucketName, bucketPath string) {
bucketName, bucketPath = bucket.Split(path.Join(f.root, rootRelativePath))
bucketName, bucketPath = bucket.Split(bucket.Join(f.root, rootRelativePath))
return f.opt.Enc.FromStandardName(bucketName), f.opt.Enc.FromStandardPath(bucketPath)
}
@@ -482,7 +517,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
// try loading service account credentials from env variable, then from a file
if opt.ServiceAccountCredentials == "" && opt.ServiceAccountFile != "" {
loadedCreds, err := ioutil.ReadFile(env.ShellExpand(opt.ServiceAccountFile))
loadedCreds, err := os.ReadFile(env.ShellExpand(opt.ServiceAccountFile))
if err != nil {
return nil, fmt.Errorf("error opening service account credentials file: %w", err)
}
@@ -495,6 +530,11 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
if err != nil {
return nil, fmt.Errorf("failed configuring Google Cloud Storage Service Account: %w", err)
}
} else if opt.EnvAuth {
oAuthClient, err = google.DefaultClient(ctx, storage.DevstorageFullControlScope)
if err != nil {
return nil, fmt.Errorf("failed to configure Google Cloud Storage: %w", err)
}
} else {
oAuthClient, _, err = oauthutil.NewClient(ctx, name, m, storageConfig)
if err != nil {
@@ -520,10 +560,17 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
BucketBased: true,
BucketBasedRootOK: true,
}).Fill(ctx, f)
if opt.DirectoryMarkers {
f.features.CanHaveEmptyDirectories = true
}
// Create a new authorized Drive client.
f.client = oAuthClient
f.svc, err = storage.NewService(context.Background(), option.WithHTTPClient(f.client))
gcsOpts := []option.ClientOption{option.WithHTTPClient(f.client)}
if opt.Endpoint != "" {
gcsOpts = append(gcsOpts, option.WithEndpoint(opt.Endpoint))
}
f.svc, err = storage.NewService(context.Background(), gcsOpts...)
if err != nil {
return nil, fmt.Errorf("couldn't create Google Cloud Storage client: %w", err)
}
@@ -532,7 +579,11 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
// Check to see if the object exists
encodedDirectory := f.opt.Enc.FromStandardPath(f.rootDirectory)
err = f.pacer.Call(func() (bool, error) {
_, err = f.svc.Objects.Get(f.rootBucket, encodedDirectory).Context(ctx).Do()
get := f.svc.Objects.Get(f.rootBucket, encodedDirectory).Context(ctx)
if f.opt.UserProject != "" {
get = get.UserProject(f.opt.UserProject)
}
_, err = get.Do()
return shouldRetry(ctx, err)
})
if err == nil {
@@ -592,9 +643,13 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
directory += "/"
}
list := f.svc.Objects.List(bucket).Prefix(directory).MaxResults(listChunks)
if f.opt.UserProject != "" {
list = list.UserProject(f.opt.UserProject)
}
if !recurse {
list = list.Delimiter("/")
}
foundItems := 0
for {
var objects *storage.Objects
err = f.pacer.Call(func() (bool, error) {
@@ -610,6 +665,7 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
return err
}
if !recurse {
foundItems += len(objects.Prefixes)
var object storage.Object
for _, remote := range objects.Prefixes {
if !strings.HasSuffix(remote, "/") {
@@ -630,22 +686,29 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
}
}
}
foundItems += len(objects.Items)
for _, object := range objects.Items {
remote := f.opt.Enc.ToStandardPath(object.Name)
if !strings.HasPrefix(remote, prefix) {
fs.Logf(f, "Odd name received %q", object.Name)
continue
}
remote = remote[len(prefix):]
isDirectory := remote == "" || strings.HasSuffix(remote, "/")
// is this a directory marker?
if isDirectory {
// Don't insert the root directory
if remote == directory {
continue
}
// process directory markers as directories
remote = strings.TrimRight(remote, "/")
}
remote = remote[len(prefix):]
if addBucket {
remote = path.Join(bucket, remote)
}
// is this a directory marker?
if isDirectory {
continue // skip directory marker
}
err = fn(remote, object, false)
err = fn(remote, object, isDirectory)
if err != nil {
return err
}
@@ -655,6 +718,17 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
}
list.PageToken(objects.NextPageToken)
}
if f.opt.DirectoryMarkers && foundItems == 0 && directory != "" {
// Determine whether the directory exists or not by whether it has a marker
_, err := f.readObjectInfo(ctx, bucket, directory)
if err != nil {
if err == fs.ErrorObjectNotFound {
return fs.ErrorDirNotFound
}
return err
}
}
return nil
}
@@ -698,6 +772,9 @@ func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error)
return nil, errors.New("can't list buckets without project number")
}
listBuckets := f.svc.Buckets.List(f.opt.ProjectNumber).MaxResults(listChunks)
if f.opt.UserProject != "" {
listBuckets = listBuckets.UserProject(f.opt.UserProject)
}
for {
var buckets *storage.Buckets
err = f.pacer.Call(func() (bool, error) {
@@ -815,10 +892,69 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
return f.Put(ctx, in, src, options...)
}
// Create directory marker file and parents
func (f *Fs) createDirectoryMarker(ctx context.Context, bucket, dir string) error {
if !f.opt.DirectoryMarkers || bucket == "" {
return nil
}
// Object to be uploaded
o := &Object{
fs: f,
modTime: time.Now(),
}
for {
_, bucketPath := f.split(dir)
// Don't create the directory marker if it is the bucket or at the very root
if bucketPath == "" {
break
}
o.remote = dir + "/"
// Check to see if object already exists
_, err := o.readObjectInfo(ctx)
if err == nil {
return nil
}
// Upload it if not
fs.Debugf(o, "Creating directory marker")
content := io.Reader(strings.NewReader(""))
err = o.Update(ctx, content, o)
if err != nil {
return fmt.Errorf("creating directory marker failed: %w", err)
}
// Now check parent directory exists
dir = path.Dir(dir)
if dir == "/" || dir == "." {
break
}
}
return nil
}
// Mkdir creates the bucket if it doesn't exist
func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
bucket, _ := f.split(dir)
return f.makeBucket(ctx, bucket)
e := f.checkBucket(ctx, bucket)
if e != nil {
return e
}
return f.createDirectoryMarker(ctx, bucket, dir)
}
// mkdirParent creates the parent bucket/directory if it doesn't exist
func (f *Fs) mkdirParent(ctx context.Context, remote string) error {
remote = strings.TrimRight(remote, "/")
dir := path.Dir(remote)
if dir == "/" || dir == "." {
dir = ""
}
return f.Mkdir(ctx, dir)
}
// makeBucket creates the bucket if it doesn't exist
@@ -827,7 +963,11 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) (err error) {
// List something from the bucket to see if it exists. Doing it like this enables the use of a
// service account that only has the "Storage Object Admin" role. See #2193 for details.
err = f.pacer.Call(func() (bool, error) {
_, err = f.svc.Objects.List(bucket).MaxResults(1).Context(ctx).Do()
list := f.svc.Objects.List(bucket).MaxResults(1).Context(ctx)
if f.opt.UserProject != "" {
list = list.UserProject(f.opt.UserProject)
}
_, err = list.Do()
return shouldRetry(ctx, err)
})
if err == nil {
@@ -862,7 +1002,11 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) (err error) {
if !f.opt.BucketPolicyOnly {
insertBucket.PredefinedAcl(f.opt.BucketACL)
}
_, err = insertBucket.Context(ctx).Do()
insertBucket = insertBucket.Context(ctx)
if f.opt.UserProject != "" {
insertBucket = insertBucket.UserProject(f.opt.UserProject)
}
_, err = insertBucket.Do()
return shouldRetry(ctx, err)
})
}, nil)
@@ -882,12 +1026,28 @@ func (f *Fs) checkBucket(ctx context.Context, bucket string) error {
// to delete was not empty.
func (f *Fs) Rmdir(ctx context.Context, dir string) (err error) {
bucket, directory := f.split(dir)
// Remove directory marker file
if f.opt.DirectoryMarkers && bucket != "" && dir != "" {
o := &Object{
fs: f,
remote: dir + "/",
}
fs.Debugf(o, "Removing directory marker")
err := o.Remove(ctx)
if err != nil {
return fmt.Errorf("removing directory marker failed: %w", err)
}
}
if bucket == "" || directory != "" {
return nil
}
return f.cache.Remove(bucket, func() error {
return f.pacer.Call(func() (bool, error) {
err = f.svc.Buckets.Delete(bucket).Context(ctx).Do()
deleteBucket := f.svc.Buckets.Delete(bucket).Context(ctx)
if f.opt.UserProject != "" {
deleteBucket = deleteBucket.UserProject(f.opt.UserProject)
}
err = deleteBucket.Do()
return shouldRetry(ctx, err)
})
})
@@ -909,7 +1069,7 @@ func (f *Fs) Precision() time.Duration {
// If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
dstBucket, dstPath := f.split(remote)
err := f.checkBucket(ctx, dstBucket)
err := f.mkdirParent(ctx, remote)
if err != nil {
return nil, err
}
@@ -933,7 +1093,11 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
var rewriteResponse *storage.RewriteResponse
for {
err = f.pacer.Call(func() (bool, error) {
rewriteResponse, err = rewriteRequest.Context(ctx).Do()
rewriteRequest = rewriteRequest.Context(ctx)
if f.opt.UserProject != "" {
rewriteRequest.UserProject(f.opt.UserProject)
}
rewriteResponse, err = rewriteRequest.Do()
return shouldRetry(ctx, err)
})
if err != nil {
@@ -1043,8 +1207,17 @@ func (o *Object) setMetaData(info *storage.Object) {
// readObjectInfo reads the definition for an object
func (o *Object) readObjectInfo(ctx context.Context) (object *storage.Object, err error) {
bucket, bucketPath := o.split()
err = o.fs.pacer.Call(func() (bool, error) {
object, err = o.fs.svc.Objects.Get(bucket, bucketPath).Context(ctx).Do()
return o.fs.readObjectInfo(ctx, bucket, bucketPath)
}
// readObjectInfo reads the definition for an object
func (f *Fs) readObjectInfo(ctx context.Context, bucket, bucketPath string) (object *storage.Object, err error) {
err = f.pacer.Call(func() (bool, error) {
get := f.svc.Objects.Get(bucket, bucketPath).Context(ctx)
if f.opt.UserProject != "" {
get = get.UserProject(f.opt.UserProject)
}
object, err = get.Do()
return shouldRetry(ctx, err)
})
if err != nil {
@@ -1116,7 +1289,11 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) (err error)
if !o.fs.opt.BucketPolicyOnly {
copyObject.DestinationPredefinedAcl(o.fs.opt.ObjectACL)
}
newObject, err = copyObject.Context(ctx).Do()
copyObject = copyObject.Context(ctx)
if o.fs.opt.UserProject != "" {
copyObject = copyObject.UserProject(o.fs.opt.UserProject)
}
newObject, err = copyObject.Do()
return shouldRetry(ctx, err)
})
if err != nil {
@@ -1133,6 +1310,9 @@ func (o *Object) Storable() bool {
// Open an object for read
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
if o.fs.opt.UserProject != "" {
o.url = o.url + "&userProject=" + o.fs.opt.UserProject
}
req, err := http.NewRequestWithContext(ctx, "GET", o.url, nil)
if err != nil {
return nil, err
@@ -1176,11 +1356,14 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
// Update the object with the contents of the io.Reader, modTime and size
//
// The new object may have been created if an error is returned
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
bucket, bucketPath := o.split()
err := o.fs.checkBucket(ctx, bucket)
if err != nil {
return err
// Create parent dir/bucket if not saving directory marker
if !strings.HasSuffix(o.remote, "/") {
err = o.fs.mkdirParent(ctx, o.remote)
if err != nil {
return err
}
}
modTime := src.ModTime(ctx)
@@ -1225,7 +1408,11 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
if !o.fs.opt.BucketPolicyOnly {
insertObject.PredefinedAcl(o.fs.opt.ObjectACL)
}
newObject, err = insertObject.Context(ctx).Do()
insertObject = insertObject.Context(ctx)
if o.fs.opt.UserProject != "" {
insertObject = insertObject.UserProject(o.fs.opt.UserProject)
}
newObject, err = insertObject.Do()
return shouldRetry(ctx, err)
})
if err != nil {
@@ -1240,7 +1427,11 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
func (o *Object) Remove(ctx context.Context) (err error) {
bucket, bucketPath := o.split()
err = o.fs.pacer.Call(func() (bool, error) {
err = o.fs.svc.Objects.Delete(bucket, bucketPath).Context(ctx).Do()
deleteBucket := o.fs.svc.Objects.Delete(bucket, bucketPath).Context(ctx)
if o.fs.opt.UserProject != "" {
deleteBucket = deleteBucket.UserProject(o.fs.opt.UserProject)
}
err = deleteBucket.Do()
return shouldRetry(ctx, err)
})
return err

View File

@@ -6,6 +6,7 @@ import (
"testing"
"github.com/rclone/rclone/backend/googlecloudstorage"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
)
@@ -16,3 +17,17 @@ func TestIntegration(t *testing.T) {
NilObject: (*googlecloudstorage.Object)(nil),
})
}
func TestIntegration2(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
name := "TestGoogleCloudStorage"
fstests.Run(t, &fstests.Opt{
RemoteName: name + ":",
NilObject: (*googlecloudstorage.Object)(nil),
ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "directory_markers", Value: "true"},
},
})
}

View File

@@ -1,3 +1,4 @@
// Package api provides types used by the Google Photos API.
package api
import (

View File

@@ -178,7 +178,7 @@ type Fs struct {
opt Options // parsed options
features *fs.Features // optional features
unAuth *rest.Client // unauthenticated http client
srv *rest.Client // the connection to the one drive server
srv *rest.Client // the connection to the server
ts *oauthutil.TokenSource // token source for oauth2
pacer *fs.Pacer // To pace the API calls
startTime time.Time // time Fs was started - used for datestamps

View File

@@ -3,7 +3,7 @@ package googlephotos
import (
"context"
"fmt"
"io/ioutil"
"io"
"net/http"
"path"
"testing"
@@ -12,7 +12,6 @@ import (
_ "github.com/rclone/rclone/backend/local"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/lib/random"
"github.com/stretchr/testify/assert"
@@ -56,7 +55,7 @@ func TestIntegration(t *testing.T) {
require.NoError(t, err)
in, err := srcObj.Open(ctx)
require.NoError(t, err)
dstObj, err := f.Put(ctx, in, operations.NewOverrideRemote(srcObj, remote))
dstObj, err := f.Put(ctx, in, fs.NewOverrideRemote(srcObj, remote))
require.NoError(t, err)
assert.Equal(t, remote, dstObj.Remote())
_ = in.Close()
@@ -99,7 +98,7 @@ func TestIntegration(t *testing.T) {
t.Run("ObjectOpen", func(t *testing.T) {
in, err := dstObj.Open(ctx)
require.NoError(t, err)
buf, err := ioutil.ReadAll(in)
buf, err := io.ReadAll(in)
require.NoError(t, err)
require.NoError(t, in.Close())
assert.True(t, len(buf) > 1000)
@@ -221,7 +220,7 @@ func TestIntegration(t *testing.T) {
require.NoError(t, err)
in, err := srcObj.Open(ctx)
require.NoError(t, err)
dstObj, err := f.Put(ctx, in, operations.NewOverrideRemote(srcObj, remote))
dstObj, err := f.Put(ctx, in, fs.NewOverrideRemote(srcObj, remote))
require.NoError(t, err)
assert.Equal(t, remote, dstObj.Remote())
_ = in.Close()

View File

@@ -161,7 +161,7 @@ func (f *Fs) dbImport(ctx context.Context, hashName, sumRemote string, sticky bo
if err := o.putHashes(ctx, hashMap{hashType: hash}); err != nil {
fs.Errorf(nil, "%s: failed to import: %v", remote, err)
}
accounting.Stats(ctx).NewCheckingTransfer(obj).Done(ctx, err)
accounting.Stats(ctx).NewCheckingTransfer(obj, "importing").Done(ctx, err)
doneCount++
}
})

View File

@@ -166,6 +166,7 @@ func NewFs(ctx context.Context, fsname, rpath string, cmap configmap.Mapper) (fs
ReadMetadata: true,
WriteMetadata: true,
UserMetadata: true,
PartialUploads: true,
}
f.features = stubFeatures.Fill(ctx, f).Mask(ctx, f.Fs).WrapsFs(f, f.Fs)

View File

@@ -5,7 +5,6 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"path"
"time"
@@ -118,7 +117,7 @@ func (o *Object) updateHashes(ctx context.Context) error {
defer func() {
_ = r.Close()
}()
if _, err = io.Copy(ioutil.Discard, r); err != nil {
if _, err = io.Copy(io.Discard, r); err != nil {
fs.Infof(o, "update failed (copy): %v", err)
return err
}

View File

@@ -1,6 +1,7 @@
//go:build !plan9
// +build !plan9
// Package hdfs provides an interface to the HDFS storage system.
package hdfs
import (
@@ -18,9 +19,10 @@ func init() {
Description: "Hadoop distributed file system",
NewFs: NewFs,
Options: []fs.Option{{
Name: "namenode",
Help: "Hadoop name node and port.\n\nE.g. \"namenode:8020\" to connect to host namenode at port 8020.",
Required: true,
Name: "namenode",
Help: "Hadoop name node and port.\n\nE.g. \"namenode:8020\" to connect to host namenode at port 8020.",
Required: true,
Sensitive: true,
}, {
Name: "username",
Help: "Hadoop user name.",
@@ -28,6 +30,7 @@ func init() {
Value: "root",
Help: "Connect to hdfs as root.",
}},
Sensitive: true,
}, {
Name: "service_principal_name",
Help: `Kerberos service principal name for the namenode.
@@ -35,15 +38,16 @@ func init() {
Enables KERBEROS authentication. Specifies the Service Principal Name
(SERVICE/FQDN) for the namenode. E.g. \"hdfs/namenode.hadoop.docker\"
for namenode running as service 'hdfs' with FQDN 'namenode.hadoop.docker'.`,
Advanced: true,
Advanced: true,
Sensitive: true,
}, {
Name: "data_transfer_protection",
Help: `Kerberos data transfer protection: authentication|integrity|privacy.
Specifies whether or not authentication, data signature integrity
checks, and wire encryption is required when communicating the the
datanodes. Possible values are 'authentication', 'integrity' and
'privacy'. Used only with KERBEROS enabled.`,
checks, and wire encryption are required when communicating with
the datanodes. Possible values are 'authentication', 'integrity'
and 'privacy'. Used only with KERBEROS enabled.`,
Examples: []fs.OptionExample{{
Value: "privacy",
Help: "Ensure authentication, integrity and encryption enabled.",

View File

@@ -294,15 +294,6 @@ func (f *Fs) copyOrMove(ctx context.Context, isDirectory bool, operationType Cop
return &result, nil
}
// copyDirectory moves the directory at the source-path to the destination-path and
// returns the resulting api-object if successful.
//
// The operation will only be successful
// if the parent-directory of the destination-path exists.
func (f *Fs) copyDirectory(ctx context.Context, source string, destination string, onExist OnExistAction) (*api.HiDriveObject, error) {
return f.copyOrMove(ctx, true, CopyOriginalPreserveModTime, source, destination, onExist)
}
// moveDirectory moves the directory at the source-path to the destination-path and
// returns the resulting api-object if successful.
//

View File

@@ -2,7 +2,7 @@
package hidrive
// FIXME HiDrive only supports file or folder names of 255 characters or less.
// Operations that create files oder folder with longer names will throw a HTTP error:
// Operations that create files or folders with longer names will throw an HTTP error:
// - 422 Unprocessable Entity
// A more graceful way for rclone to handle this may be desirable.
@@ -330,7 +330,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, transaction)
}
// Do not allow the root-prefix to be non-existent nor a directory,
// Do not allow the root-prefix to be nonexistent nor a directory,
// but it can be empty.
if f.opt.RootPrefix != "" {
item, err := f.fetchMetadataForPath(ctx, f.opt.RootPrefix, api.HiDriveObjectNoMetadataFields)
@@ -338,7 +338,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
return nil, fmt.Errorf("could not access root-prefix: %w", err)
}
if item.Type != api.HiDriveObjectTypeDirectory {
return nil, errors.New("The root-prefix needs to point to a valid directory or be empty")
return nil, errors.New("the root-prefix needs to point to a valid directory or be empty")
}
}
@@ -623,7 +623,7 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
// should be retried after the parent-directories of the destination have been created.
// If so, it will create the parent-directories.
//
// If any errors arrise while finding the source or
// If any errors arise while finding the source or
// creating the parent-directory those will be returned.
// Otherwise returns the originalError.
func (f *Fs) shouldRetryAndCreateParents(ctx context.Context, destinationPath string, sourcePath string, originalError error) (bool, error) {
@@ -961,7 +961,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
} else {
_, _, err = o.fs.uploadFileChunked(ctx, resolvedPath, in, modTime, int(o.fs.opt.UploadChunkSize), o.fs.opt.UploadConcurrency)
}
// Try to check if object was updated, eitherway.
// Try to check if object was updated, either way.
// Metadata should be updated even if the upload fails.
info, metaErr = o.fs.fetchMetadataForPath(ctx, resolvedPath, api.HiDriveObjectWithMetadataFields)
} else {

View File

@@ -138,7 +138,7 @@ var testTable = []struct {
// pattern describes how to use data to construct the hash-input.
// For every entry n at even indices this repeats the data n times.
// For every entry m at odd indices this repeats a null-byte m times.
// The input-data is constructed by concatinating the results in order.
// The input-data is constructed by concatenating the results in order.
pattern []int64
out []byte
name string

View File

@@ -1,3 +1,4 @@
// Package internal provides utilities for HiDrive.
package internal
import (

View File

@@ -13,7 +13,6 @@ import (
"net/http"
"net/url"
"path"
"strconv"
"strings"
"sync"
"time"
@@ -305,7 +304,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
fs: f,
remote: remote,
}
err := o.stat(ctx)
err := o.head(ctx)
if err != nil {
return nil, err
}
@@ -317,15 +316,6 @@ func (f *Fs) url(remote string) string {
return f.endpointURL + rest.URLPathEscape(remote)
}
// parse s into an int64, on failure return def
func parseInt64(s string, def int64) int64 {
n, e := strconv.ParseInt(s, 10, 64)
if e != nil {
return def
}
return n
}
// Errors returned by parseName
var (
errURLJoinFailed = errors.New("URLJoin failed")
@@ -500,12 +490,12 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
fs: f,
remote: remote,
}
switch err := file.stat(ctx); err {
switch err := file.head(ctx); err {
case nil:
add(file)
case fs.ErrorNotAFile:
// ...found a directory not a file
add(fs.NewDir(remote, timeUnset))
add(fs.NewDir(remote, time.Time{}))
default:
fs.Debugf(remote, "skipping because of error: %v", err)
}
@@ -517,7 +507,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
name = strings.TrimRight(name, "/")
remote := path.Join(dir, name)
if isDir {
add(fs.NewDir(remote, timeUnset))
add(fs.NewDir(remote, time.Time{}))
} else {
in <- remote
}
@@ -579,8 +569,8 @@ func (o *Object) url() string {
return o.fs.url(o.remote)
}
// stat updates the info field in the Object
func (o *Object) stat(ctx context.Context) error {
// head sends a HEAD request to update info fields in the Object
func (o *Object) head(ctx context.Context) error {
if o.fs.opt.NoHead {
o.size = -1
o.modTime = timeUnset
@@ -601,13 +591,19 @@ func (o *Object) stat(ctx context.Context) error {
if err != nil {
return fmt.Errorf("failed to stat: %w", err)
}
return o.decodeMetadata(ctx, res)
}
// decodeMetadata updates info fields in the Object according to HTTP response headers
func (o *Object) decodeMetadata(ctx context.Context, res *http.Response) error {
t, err := http.ParseTime(res.Header.Get("Last-Modified"))
if err != nil {
t = timeUnset
}
o.size = parseInt64(res.Header.Get("Content-Length"), -1)
o.modTime = t
o.contentType = res.Header.Get("Content-Type")
o.size = rest.ParseSizeFromHeaders(res.Header)
// If NoSlash is set then check ContentType to see if it is a directory
if o.fs.opt.NoSlash {
mediaType, _, err := mime.ParseMediaType(o.contentType)
@@ -653,6 +649,9 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
if err != nil {
return nil, fmt.Errorf("Open failed: %w", err)
}
if err = o.decodeMetadata(ctx, res); err != nil {
return nil, fmt.Errorf("decodeMetadata failed: %w", err)
}
return res.Body, nil
}

View File

@@ -3,7 +3,7 @@ package http
import (
"context"
"fmt"
"io/ioutil"
"io"
"net/http"
"net/http/httptest"
"net/url"
@@ -33,20 +33,21 @@ var (
lineEndSize = 1
)
// prepareServer the test server and return a function to tidy it up afterwards
func prepareServer(t *testing.T) (configmap.Simple, func()) {
// prepareServer prepares the test server and shuts it down automatically
// when the test completes.
func prepareServer(t *testing.T) configmap.Simple {
// file server for test/files
fileServer := http.FileServer(http.Dir(filesPath))
// verify the file path is correct, and also check which line endings
// are used to get sizes right ("\n" except on Windows, but even there
// we may have "\n" or "\r\n" depending on git crlf setting)
fileList, err := ioutil.ReadDir(filesPath)
fileList, err := os.ReadDir(filesPath)
require.NoError(t, err)
require.Greater(t, len(fileList), 0)
for _, file := range fileList {
if !file.IsDir() {
data, _ := ioutil.ReadFile(filepath.Join(filesPath, file.Name()))
data, _ := os.ReadFile(filepath.Join(filesPath, file.Name()))
if strings.HasSuffix(string(data), "\r\n") {
lineEndSize = 2
}
@@ -78,20 +79,21 @@ func prepareServer(t *testing.T) (configmap.Simple, func()) {
"url": ts.URL,
"headers": strings.Join(headers, ","),
}
t.Cleanup(ts.Close)
// return a function to tidy up
return m, ts.Close
return m
}
// prepare the test server and return a function to tidy it up afterwards
func prepare(t *testing.T) (fs.Fs, func()) {
m, tidy := prepareServer(t)
// prepare prepares the test server and shuts it down automatically
// when the test completes.
func prepare(t *testing.T) fs.Fs {
m := prepareServer(t)
// Instantiate it
f, err := NewFs(context.Background(), remoteName, "", m)
require.NoError(t, err)
return f, tidy
return f
}
func testListRoot(t *testing.T, f fs.Fs, noSlash bool) {
@@ -134,22 +136,19 @@ func testListRoot(t *testing.T, f fs.Fs, noSlash bool) {
}
func TestListRoot(t *testing.T) {
f, tidy := prepare(t)
defer tidy()
f := prepare(t)
testListRoot(t, f, false)
}
func TestListRootNoSlash(t *testing.T) {
f, tidy := prepare(t)
f := prepare(t)
f.(*Fs).opt.NoSlash = true
defer tidy()
testListRoot(t, f, true)
}
func TestListSubDir(t *testing.T) {
f, tidy := prepare(t)
defer tidy()
f := prepare(t)
entries, err := f.List(context.Background(), "three")
require.NoError(t, err)
@@ -166,8 +165,7 @@ func TestListSubDir(t *testing.T) {
}
func TestNewObject(t *testing.T) {
f, tidy := prepare(t)
defer tidy()
f := prepare(t)
o, err := f.NewObject(context.Background(), "four/under four.txt")
require.NoError(t, err)
@@ -194,36 +192,69 @@ func TestNewObject(t *testing.T) {
}
func TestOpen(t *testing.T) {
f, tidy := prepare(t)
defer tidy()
m := prepareServer(t)
o, err := f.NewObject(context.Background(), "four/under four.txt")
require.NoError(t, err)
for _, head := range []bool{false, true} {
if !head {
m.Set("no_head", "true")
}
f, err := NewFs(context.Background(), remoteName, "", m)
require.NoError(t, err)
// Test normal read
fd, err := o.Open(context.Background())
require.NoError(t, err)
data, err := ioutil.ReadAll(fd)
require.NoError(t, err)
require.NoError(t, fd.Close())
if lineEndSize == 2 {
assert.Equal(t, "beetroot\r\n", string(data))
} else {
assert.Equal(t, "beetroot\n", string(data))
for _, rangeRead := range []bool{false, true} {
o, err := f.NewObject(context.Background(), "four/under four.txt")
require.NoError(t, err)
if !head {
// Test mod time is still indeterminate
tObj := o.ModTime(context.Background())
assert.Equal(t, time.Duration(0), time.Unix(0, 0).Sub(tObj))
// Test file size is still indeterminate
assert.Equal(t, int64(-1), o.Size())
}
var data []byte
if !rangeRead {
// Test normal read
fd, err := o.Open(context.Background())
require.NoError(t, err)
data, err = io.ReadAll(fd)
require.NoError(t, err)
require.NoError(t, fd.Close())
if lineEndSize == 2 {
assert.Equal(t, "beetroot\r\n", string(data))
} else {
assert.Equal(t, "beetroot\n", string(data))
}
} else {
// Test with range request
fd, err := o.Open(context.Background(), &fs.RangeOption{Start: 1, End: 5})
require.NoError(t, err)
data, err = io.ReadAll(fd)
require.NoError(t, err)
require.NoError(t, fd.Close())
assert.Equal(t, "eetro", string(data))
}
fi, err := os.Stat(filepath.Join(filesPath, "four", "under four.txt"))
require.NoError(t, err)
tFile := fi.ModTime()
// Test the time is always correct on the object after file open
tObj := o.ModTime(context.Background())
fstest.AssertTimeEqualWithPrecision(t, o.Remote(), tFile, tObj, time.Second)
if !rangeRead {
// Test the file size
assert.Equal(t, int64(len(data)), o.Size())
}
}
}
// Test with range request
fd, err = o.Open(context.Background(), &fs.RangeOption{Start: 1, End: 5})
require.NoError(t, err)
data, err = ioutil.ReadAll(fd)
require.NoError(t, err)
require.NoError(t, fd.Close())
assert.Equal(t, "eetro", string(data))
}
func TestMimeType(t *testing.T) {
f, tidy := prepare(t)
defer tidy()
f := prepare(t)
o, err := f.NewObject(context.Background(), "four/under four.txt")
require.NoError(t, err)
@@ -234,8 +265,7 @@ func TestMimeType(t *testing.T) {
}
func TestIsAFileRoot(t *testing.T) {
m, tidy := prepareServer(t)
defer tidy()
m := prepareServer(t)
f, err := NewFs(context.Background(), remoteName, "one%.txt", m)
assert.Equal(t, err, fs.ErrorIsFile)
@@ -244,8 +274,7 @@ func TestIsAFileRoot(t *testing.T) {
}
func TestIsAFileSubDir(t *testing.T) {
m, tidy := prepareServer(t)
defer tidy()
m := prepareServer(t)
f, err := NewFs(context.Background(), remoteName, "three/underthree.txt", m)
assert.Equal(t, err, fs.ErrorIsFile)

View File

@@ -1,62 +0,0 @@
package hubic
import (
"context"
"net/http"
"time"
"github.com/ncw/swift/v2"
"github.com/rclone/rclone/fs"
)
// auth is an authenticator for swift
type auth struct {
f *Fs
}
// newAuth creates a swift authenticator
func newAuth(f *Fs) *auth {
return &auth{
f: f,
}
}
// Request constructs an http.Request for authentication
//
// returns nil for not needed
func (a *auth) Request(ctx context.Context, c *swift.Connection) (r *http.Request, err error) {
const retries = 10
for try := 1; try <= retries; try++ {
err = a.f.getCredentials(context.TODO())
if err == nil {
break
}
time.Sleep(100 * time.Millisecond)
fs.Debugf(a.f, "retrying auth request %d/%d: %v", try, retries, err)
}
return nil, err
}
// Response parses the result of an http request
func (a *auth) Response(ctx context.Context, resp *http.Response) error {
return nil
}
// The public storage URL - set Internal to true to read
// internal/service net URL
func (a *auth) StorageUrl(Internal bool) string { // nolint
return a.f.credentials.Endpoint
}
// The access token
func (a *auth) Token() string {
return a.f.credentials.Token
}
// The CDN url if available
func (a *auth) CdnUrl() string { // nolint
return ""
}
// Check the interfaces are satisfied
var _ swift.Authenticator = (*auth)(nil)

View File

@@ -1,200 +0,0 @@
// Package hubic provides an interface to the Hubic object storage
// system.
package hubic
// This uses the normal swift mechanism to update the credentials and
// ignores the expires field returned by the Hubic API. This may need
// to be revisited after some actual experience.
import (
"context"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net/http"
"strings"
"time"
swiftLib "github.com/ncw/swift/v2"
"github.com/rclone/rclone/backend/swift"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/lib/oauthutil"
"golang.org/x/oauth2"
)
const (
rcloneClientID = "api_hubic_svWP970PvSWbw5G3PzrAqZ6X2uHeZBPI"
rcloneEncryptedClientSecret = "leZKCcqy9movLhDWLVXX8cSLp_FzoiAPeEJOIOMRw1A5RuC4iLEPDYPWVF46adC_MVonnLdVEOTHVstfBOZ_lY4WNp8CK_YWlpRZ9diT5YI"
)
// Globals
var (
// Description of how to auth for this app
oauthConfig = &oauth2.Config{
Scopes: []string{
"credentials.r", // Read OpenStack credentials
},
Endpoint: oauth2.Endpoint{
AuthURL: "https://api.hubic.com/oauth/auth/",
TokenURL: "https://api.hubic.com/oauth/token/",
},
ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.RedirectLocalhostURL,
}
)
// Register with Fs
func init() {
fs.Register(&fs.RegInfo{
Name: "hubic",
Description: "Hubic",
NewFs: NewFs,
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
return oauthutil.ConfigOut("", &oauthutil.Options{
OAuth2Config: oauthConfig,
})
},
Options: append(oauthutil.SharedOptions, swift.SharedOptions...),
})
}
// credentials is the JSON returned from the Hubic API to read the
// OpenStack credentials
type credentials struct {
Token string `json:"token"` // OpenStack token
Endpoint string `json:"endpoint"` // OpenStack endpoint
Expires string `json:"expires"` // Expires date - e.g. "2015-11-09T14:24:56+01:00"
}
// Fs represents a remote hubic
type Fs struct {
fs.Fs // wrapped Fs
features *fs.Features // optional features
client *http.Client // client for oauth api
credentials credentials // returned from the Hubic API
expires time.Time // time credentials expire
}
// Object describes a swift object
type Object struct {
*swift.Object
}
// Return a string version
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.Object.String()
}
// ------------------------------------------------------------
// String converts this Fs to a string
func (f *Fs) String() string {
if f.Fs == nil {
return "Hubic"
}
return fmt.Sprintf("Hubic %s", f.Fs.String())
}
// getCredentials reads the OpenStack Credentials using the Hubic API
//
// The credentials are read into the Fs
func (f *Fs) getCredentials(ctx context.Context) (err error) {
req, err := http.NewRequestWithContext(ctx, "GET", "https://api.hubic.com/1.0/account/credentials", nil)
if err != nil {
return err
}
resp, err := f.client.Do(req)
if err != nil {
return err
}
defer fs.CheckClose(resp.Body, &err)
if resp.StatusCode < 200 || resp.StatusCode > 299 {
body, _ := ioutil.ReadAll(resp.Body)
bodyStr := strings.TrimSpace(strings.ReplaceAll(string(body), "\n", " "))
return fmt.Errorf("failed to get credentials: %s: %s", resp.Status, bodyStr)
}
decoder := json.NewDecoder(resp.Body)
var result credentials
err = decoder.Decode(&result)
if err != nil {
return err
}
// fs.Debugf(f, "Got credentials %+v", result)
if result.Token == "" || result.Endpoint == "" || result.Expires == "" {
return errors.New("couldn't read token, result and expired from credentials")
}
f.credentials = result
expires, err := time.Parse(time.RFC3339, result.Expires)
if err != nil {
return err
}
f.expires = expires
fs.Debugf(f, "Got swift credentials (expiry %v in %v)", f.expires, time.Until(f.expires))
return nil
}
// NewFs constructs an Fs from the path, container:path
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
client, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
if err != nil {
return nil, fmt.Errorf("failed to configure Hubic: %w", err)
}
f := &Fs{
client: client,
}
// Make the swift Connection
ci := fs.GetConfig(ctx)
c := &swiftLib.Connection{
Auth: newAuth(f),
ConnectTimeout: 10 * ci.ConnectTimeout, // Use the timeouts in the transport
Timeout: 10 * ci.Timeout, // Use the timeouts in the transport
Transport: fshttp.NewTransport(ctx),
}
err = c.Authenticate(ctx)
if err != nil {
return nil, fmt.Errorf("error authenticating swift connection: %w", err)
}
// Parse config into swift.Options struct
opt := new(swift.Options)
err = configstruct.Set(m, opt)
if err != nil {
return nil, err
}
// Make inner swift Fs from the connection
swiftFs, err := swift.NewFsWithConnection(ctx, opt, name, root, c, true)
if err != nil && err != fs.ErrorIsFile {
return nil, err
}
f.Fs = swiftFs
f.features = f.Fs.Features().Wrap(f)
return f, err
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// UnWrap returns the Fs that this Fs is wrapping
func (f *Fs) UnWrap() fs.Fs {
return f.Fs
}
// Check the interfaces are satisfied
var (
_ fs.Fs = (*Fs)(nil)
_ fs.UnWrapper = (*Fs)(nil)
)

View File

@@ -1,19 +0,0 @@
// Test Hubic filesystem interface
package hubic_test
import (
"testing"
"github.com/rclone/rclone/backend/hubic"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestHubic:",
NilObject: (*hubic.Object)(nil),
SkipFsCheckWrap: true,
SkipObjectCheckWrap: true,
})
}

View File

@@ -133,11 +133,13 @@ Owner is able to add custom keys. Metadata feature grabs all the keys including
},
Options: []fs.Option{{
Name: "access_key_id",
Help: "IAS3 Access Key.\n\nLeave blank for anonymous access.\nYou can find one here: https://archive.org/account/s3.php",
Name: "access_key_id",
Help: "IAS3 Access Key.\n\nLeave blank for anonymous access.\nYou can find one here: https://archive.org/account/s3.php",
Sensitive: true,
}, {
Name: "secret_access_key",
Help: "IAS3 Secret Key (password).\n\nLeave blank for anonymous access.",
Name: "secret_access_key",
Help: "IAS3 Secret Key (password).\n\nLeave blank for anonymous access.",
Sensitive: true,
}, {
// their official client (https://github.com/jjjake/internetarchive) hardcodes following the two
Name: "endpoint",
@@ -227,7 +229,7 @@ type Object struct {
rawData json.RawMessage
}
// IAFile reprensents a subset of object in MetadataResponse.Files
// IAFile represents a subset of object in MetadataResponse.Files
type IAFile struct {
Name string `json:"name"`
// Source string `json:"source"`
@@ -243,7 +245,7 @@ type IAFile struct {
rawData json.RawMessage
}
// MetadataResponse reprensents subset of the JSON object returned by (frontend)/metadata/
// MetadataResponse represents subset of the JSON object returned by (frontend)/metadata/
type MetadataResponse struct {
Files []IAFile `json:"files"`
ItemSize int64 `json:"item_size"`
@@ -1273,7 +1275,7 @@ func trimPathPrefix(s, prefix string, enc encoder.MultiEncoder) string {
return enc.ToStandardPath(strings.TrimPrefix(s, prefix+"/"))
}
// mimicks urllib.parse.quote() on Python; exclude / from url.PathEscape
// mimics urllib.parse.quote() on Python; exclude / from url.PathEscape
func quotePath(s string) string {
seg := strings.Split(s, "/")
newValues := []string{}

View File

@@ -1,3 +1,4 @@
// Package api provides types used by the Jottacloud API.
package api
import (

View File

@@ -1,3 +1,4 @@
// Package jottacloud provides an interface to the Jottacloud storage system.
package jottacloud
import (
@@ -11,7 +12,6 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"math/rand"
"net/http"
"net/url"
@@ -74,6 +74,10 @@ const (
tele2CloudTokenURL = "https://mittcloud-auth.tele2.se/auth/realms/comhem/protocol/openid-connect/token"
tele2CloudAuthURL = "https://mittcloud-auth.tele2.se/auth/realms/comhem/protocol/openid-connect/auth"
tele2CloudClientID = "desktop"
onlimeCloudTokenURL = "https://cloud-auth.onlime.dk/auth/realms/onlime_wl/protocol/openid-connect/token"
onlimeCloudAuthURL = "https://cloud-auth.onlime.dk/auth/realms/onlime_wl/protocol/openid-connect/auth"
onlimeCloudClientID = "desktop"
)
// Register with Fs
@@ -84,7 +88,7 @@ func init() {
Description: "Jottacloud",
NewFs: NewFs,
Config: Config,
Options: []fs.Option{{
Options: append(oauthutil.SharedOptions, []fs.Option{{
Name: "md5_memory_limit",
Help: "Files bigger than this will be cached on disk to calculate the MD5 if required.",
Default: fs.SizeSuffix(10 * 1024 * 1024),
@@ -119,7 +123,7 @@ func init() {
Default: (encoder.Display |
encoder.EncodeWin | // :?"*<>|
encoder.EncodeInvalidUtf8),
}},
}}...),
})
}
@@ -139,6 +143,9 @@ func Config(ctx context.Context, name string, m configmap.Mapper, config fs.Conf
}, {
Value: "tele2",
Help: "Tele2 Cloud authentication.\nUse this if you are using Tele2 Cloud.",
}, {
Value: "onlime",
Help: "Onlime Cloud authentication.\nUse this if you are using Onlime Cloud.",
}})
case "auth_type_done":
// Jump to next state according to config chosen
@@ -261,6 +268,21 @@ machines.`)
RedirectURL: oauthutil.RedirectLocalhostURL,
},
})
case "onlime": // onlime cloud config
m.Set("configVersion", fmt.Sprint(configVersion))
m.Set(configClientID, onlimeCloudClientID)
m.Set(configTokenURL, onlimeCloudTokenURL)
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
OAuth2Config: &oauth2.Config{
Endpoint: oauth2.Endpoint{
AuthURL: onlimeCloudAuthURL,
TokenURL: onlimeCloudTokenURL,
},
ClientID: onlimeCloudClientID,
Scopes: []string{"openid", "jotta-default", "offline_access"},
RedirectURL: oauthutil.RedirectLocalhostURL,
},
})
case "choose_device":
return fs.ConfigConfirm("choose_device_query", false, "config_non_standard", `Use a non-standard device/mountpoint?
Choosing no, the default, will let you access the storage used for the archive
@@ -821,7 +843,7 @@ func (f *Fs) allocatePathRaw(file string, absolute bool) string {
func grantTypeFilter(req *http.Request) {
if legacyTokenURL == req.URL.String() {
// read the entire body
refreshBody, err := ioutil.ReadAll(req.Body)
refreshBody, err := io.ReadAll(req.Body)
if err != nil {
return
}
@@ -831,7 +853,7 @@ func grantTypeFilter(req *http.Request) {
refreshBody = []byte(strings.Replace(string(refreshBody), "grant_type=refresh_token", "grant_type=REFRESH_TOKEN", 1))
// set the new ReadCloser (with a dummy Close())
req.Body = ioutil.NopCloser(bytes.NewReader(refreshBody))
req.Body = io.NopCloser(bytes.NewReader(refreshBody))
}
}
@@ -1417,7 +1439,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
}
info, err := f.copyOrMove(ctx, "cp", srcObj.filePath(), remote)
// if destination was a trashed file then after a successfull copy the copied file is still in trash (bug in api?)
// if destination was a trashed file then after a successful copy the copied file is still in trash (bug in api?)
if err == nil && bool(info.Deleted) && !f.opt.TrashedOnly && info.State == "COMPLETED" {
fs.Debugf(src, "Server-side copied to trashed destination, restoring")
info, err = f.createOrUpdate(ctx, remote, srcObj.modTime, srcObj.size, srcObj.md5)
@@ -1788,7 +1810,7 @@ func readMD5(in io.Reader, size, threshold int64) (md5sum string, out io.Reader,
var tempFile *os.File
// create the cache file
tempFile, err = ioutil.TempFile("", cachePrefix)
tempFile, err = os.CreateTemp("", cachePrefix)
if err != nil {
return
}
@@ -1816,7 +1838,7 @@ func readMD5(in io.Reader, size, threshold int64) (md5sum string, out io.Reader,
} else {
// that's a small file, just read it into memory
var inData []byte
inData, err = ioutil.ReadAll(teeReader)
inData, err = io.ReadAll(teeReader)
if err != nil {
return
}
@@ -1838,12 +1860,12 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
if err == nil {
// if the object exists delete it
err = o.remove(ctx, true)
if err != nil {
if err != nil && err != fs.ErrorObjectNotFound {
// if delete failed then report that, unless it was because the file did not exist after all
return fmt.Errorf("failed to remove old object: %w", err)
}
}
// if the object does not exist we can just continue but if the error is something different we should report that
if err != fs.ErrorObjectNotFound {
} else if err != fs.ErrorObjectNotFound {
// if the object does not exist we can just continue but if the error is something different we should report that
return err
}
}
@@ -1913,7 +1935,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// copy the already uploaded bytes into the trash :)
var result api.UploadResponse
_, err = io.CopyN(ioutil.Discard, in, response.ResumePos)
_, err = io.CopyN(io.Discard, in, response.ResumePos)
if err != nil {
return err
}
@@ -1930,7 +1952,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
o.md5 = result.Md5
o.modTime = time.Unix(result.Modified/1000, 0)
} else {
// If the file state is COMPLETE we don't need to upload it because the file was already found but we still ned to update our metadata
// If the file state is COMPLETE we don't need to upload it because the file was already found but we still need to update our metadata
return o.readMetaData(ctx, true)
}
@@ -1951,10 +1973,17 @@ func (o *Object) remove(ctx context.Context, hard bool) error {
opts.Parameters.Set("dl", "true")
}
return o.fs.pacer.Call(func() (bool, error) {
err := o.fs.pacer.Call(func() (bool, error) {
resp, err := o.fs.jfsSrv.CallXML(ctx, &opts, nil, nil)
return shouldRetry(ctx, resp, err)
})
if apiErr, ok := err.(*api.Error); ok {
// attempting to hard delete will fail if path does not exist, but standard delete will succeed
if apiErr.StatusCode == http.StatusNotFound {
return fs.ErrorObjectNotFound
}
}
return err
}
// Remove an object

View File

@@ -1,3 +1,4 @@
// Package koofr provides an interface to the Koofr storage system.
package koofr
import (
@@ -60,9 +61,10 @@ func init() {
Default: true,
Advanced: true,
}, {
Name: "user",
Help: "Your user name.",
Required: true,
Name: "user",
Help: "Your user name.",
Required: true,
Sensitive: true,
}, {
Name: "password",
Help: "Your password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password).",
@@ -375,7 +377,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
for i, file := range files {
remote := path.Join(dir, f.opt.Enc.ToStandardName(file.Name))
if file.Type == "dir" {
entries[i] = fs.NewDir(remote, time.Unix(0, 0))
entries[i] = fs.NewDir(remote, time.Time{})
} else {
entries[i] = &Object{
fs: f,
@@ -667,7 +669,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
//
// https://app.koofr.net/content/links/39a6cc01-3b23-477a-8059-c0fb3b0f15de/files/get?path=%2F
//
// I am not sure about meaning of "path" parameter; in my expriments
// I am not sure about meaning of "path" parameter; in my experiments
// it is always "%2F", and omitting it or putting any other value
// results in 404.
//

View File

@@ -7,7 +7,6 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"path"
"path/filepath"
@@ -22,6 +21,7 @@ import (
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/filter"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/encoder"
@@ -123,8 +123,8 @@ routine so this flag shouldn't normally be used.`,
Help: `Don't check to see if the files change during upload.
Normally rclone checks the size and modification time of files as they
are being uploaded and aborts with a message which starts "can't copy
- source file is being updated" if the file changes during upload.
are being uploaded and aborts with a message which starts "can't copy -
source file is being updated" if the file changes during upload.
However on some file systems this modification time check may fail (e.g.
[Glusterfs #2206](https://github.com/rclone/rclone/issues/2206)) so this
@@ -234,15 +234,16 @@ type Options struct {
// Fs represents a local filesystem rooted at root
type Fs struct {
name string // the name of the remote
root string // The root directory (OS path)
opt Options // parsed config options
features *fs.Features // optional features
dev uint64 // device number of root node
precisionOk sync.Once // Whether we need to read the precision
precision time.Duration // precision of local filesystem
warnedMu sync.Mutex // used for locking access to 'warned'.
warned map[string]struct{} // whether we have warned about this string
name string // the name of the remote
root string // The root directory (OS path)
opt Options // parsed config options
features *fs.Features // optional features
dev uint64 // device number of root node
precisionOk sync.Once // Whether we need to read the precision
precision time.Duration // precision of local filesystem
warnedMu sync.Mutex // used for locking access to 'warned'.
warned map[string]struct{} // whether we have warned about this string
xattrSupported int32 // whether xattrs are supported (atomic access)
// do os.Lstat or os.Stat
lstat func(name string) (os.FileInfo, error)
@@ -265,7 +266,10 @@ type Object struct {
// ------------------------------------------------------------
var errLinksAndCopyLinks = errors.New("can't use -l/--links with -L/--copy-links")
var (
errLinksAndCopyLinks = errors.New("can't use -l/--links with -L/--copy-links")
errLinksNeedsSuffix = errors.New("need \"" + linkSuffix + "\" suffix to refer to symlink when using -l/--links")
)
// NewFs constructs an Fs from the path
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
@@ -286,6 +290,9 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
dev: devUnset,
lstat: os.Lstat,
}
if xattrSupported {
f.xattrSupported = 1
}
f.root = cleanRootPath(root, f.opt.NoUNC, f.opt.Enc)
f.features = (&fs.Features{
CaseInsensitive: f.caseInsensitive(),
@@ -295,6 +302,8 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
ReadMetadata: true,
WriteMetadata: true,
UserMetadata: xattrSupported, // can only R/W general purpose metadata if xattrs are supported
FilterAware: true,
PartialUploads: true,
}).Fill(ctx, f)
if opt.FollowSymlinks {
f.lstat = os.Stat
@@ -305,7 +314,16 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
if err == nil {
f.dev = readDevice(fi, f.opt.OneFileSystem)
}
// Check to see if this is a .rclonelink if not found
hasLinkSuffix := strings.HasSuffix(f.root, linkSuffix)
if hasLinkSuffix && opt.TranslateSymlinks && os.IsNotExist(err) {
fi, err = f.lstat(strings.TrimSuffix(f.root, linkSuffix))
}
if err == nil && f.isRegular(fi.Mode()) {
// Handle the odd case, that a symlink was specified by name without the link suffix
if !hasLinkSuffix && opt.TranslateSymlinks && fi.Mode()&os.ModeSymlink != 0 {
return nil, errLinksNeedsSuffix
}
// It is a file, so use the parent as the root
f.root = filepath.Dir(f.root)
// return an error with an fs which points to the parent
@@ -439,6 +457,8 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
filter, useFilter := filter.GetConfig(ctx), filter.GetUseFilter(ctx)
fsDirPath := f.localPath(dir)
_, err = os.Stat(fsDirPath)
if err != nil {
@@ -489,7 +509,14 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
continue
}
if fierr != nil {
err = fmt.Errorf("failed to read directory %q: %w", namepath, err)
// Don't report errors on any file names that are excluded
if useFilter {
newRemote := f.cleanRemote(dir, name)
if !filter.IncludeRemote(newRemote) {
continue
}
}
fierr = fmt.Errorf("failed to get info about directory entry %q: %w", namepath, fierr)
fs.Errorf(dir, "%v", fierr)
_ = accounting.Stats(ctx).Error(fserrors.NoRetryError(fierr)) // fail the sync
continue
@@ -510,6 +537,10 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
if f.opt.FollowSymlinks && (mode&os.ModeSymlink) != 0 {
localPath := filepath.Join(fsDirPath, name)
fi, err = os.Stat(localPath)
// Quietly skip errors on excluded files and directories
if err != nil && useFilter && !filter.IncludeRemote(newRemote) {
continue
}
if os.IsNotExist(err) || isCircularSymlinkError(err) {
// Skip bad symlinks and circular symlinks
err = fserrors.NoRetryError(fmt.Errorf("symlink: %w", err))
@@ -534,6 +565,11 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
if f.opt.TranslateSymlinks && fi.Mode()&os.ModeSymlink != 0 {
newRemote += linkSuffix
}
// Don't include non directory if not included
// we leave directory filtering to the layer above
if useFilter && !filter.IncludeRemote(newRemote) {
continue
}
fso, err := f.newObjectWithInfo(newRemote, fi)
if err != nil {
return nil, err
@@ -626,7 +662,7 @@ func (f *Fs) readPrecision() (precision time.Duration) {
precision = time.Second
// Create temporary file and test it
fd, err := ioutil.TempFile("", "rclone")
fd, err := os.CreateTemp("", "rclone")
if err != nil {
// If failed return 1s
// fmt.Println("Failed to create temp file", err)
@@ -1053,7 +1089,7 @@ func (o *Object) openTranslatedLink(offset, limit int64) (lrc io.ReadCloser, err
if err != nil {
return nil, err
}
return readers.NewLimitedReadCloser(ioutil.NopCloser(strings.NewReader(linkdst[offset:])), limit), nil
return readers.NewLimitedReadCloser(io.NopCloser(strings.NewReader(linkdst[offset:])), limit), nil
}
// Open an object for read
@@ -1380,30 +1416,27 @@ func (o *Object) writeMetadata(metadata fs.Metadata) (err error) {
}
func cleanRootPath(s string, noUNC bool, enc encoder.MultiEncoder) string {
if runtime.GOOS == "windows" {
if !filepath.IsAbs(s) && !strings.HasPrefix(s, "\\") {
if runtime.GOOS != "windows" || !strings.HasPrefix(s, "\\") {
if !filepath.IsAbs(s) {
s2, err := filepath.Abs(s)
if err == nil {
s = s2
}
} else {
s = filepath.Clean(s)
}
}
if runtime.GOOS == "windows" {
s = filepath.ToSlash(s)
vol := filepath.VolumeName(s)
s = vol + enc.FromStandardPath(s[len(vol):])
s = filepath.FromSlash(s)
if !noUNC {
// Convert to UNC
s = file.UNCPath(s)
}
return s
}
if !filepath.IsAbs(s) {
s2, err := filepath.Abs(s)
if err == nil {
s = s2
}
}
s = enc.FromStandardPath(s)
return s
}

View File

@@ -4,18 +4,22 @@ import (
"bytes"
"context"
"fmt"
"io/ioutil"
"io"
"os"
"path"
"path/filepath"
"runtime"
"sort"
"testing"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/filter"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/lib/file"
"github.com/rclone/rclone/lib/readers"
@@ -31,7 +35,6 @@ func TestMain(m *testing.M) {
// Test copy with source file that's updating
func TestUpdatingCheck(t *testing.T) {
r := fstest.NewRun(t)
defer r.Finalise()
filePath := "sub dir/local test"
r.WriteFile(filePath, "content", time.Now())
@@ -76,7 +79,6 @@ func TestUpdatingCheck(t *testing.T) {
func TestSymlink(t *testing.T) {
ctx := context.Background()
r := fstest.NewRun(t)
defer r.Finalise()
f := r.Flocal.(*Fs)
dir := f.root
@@ -145,10 +147,24 @@ func TestSymlink(t *testing.T) {
_, err = r.Flocal.NewObject(ctx, "symlink2.txt")
require.Equal(t, fs.ErrorObjectNotFound, err)
// Check that NewFs works with the suffixed version and --links
f2, err := NewFs(ctx, "local", filepath.Join(dir, "symlink2.txt"+linkSuffix), configmap.Simple{
"links": "true",
})
require.Equal(t, fs.ErrorIsFile, err)
require.Equal(t, dir, f2.(*Fs).root)
// Check that NewFs doesn't see the non suffixed version with --links
f2, err = NewFs(ctx, "local", filepath.Join(dir, "symlink2.txt"), configmap.Simple{
"links": "true",
})
require.Equal(t, errLinksNeedsSuffix, err)
require.Nil(t, f2)
// Check reading the object
in, err := o.Open(ctx)
require.NoError(t, err)
contents, err := ioutil.ReadAll(in)
contents, err := io.ReadAll(in)
require.NoError(t, err)
require.Equal(t, "file.txt", string(contents))
require.NoError(t, in.Close())
@@ -156,7 +172,7 @@ func TestSymlink(t *testing.T) {
// Check reading the object with range
in, err = o.Open(ctx, &fs.RangeOption{Start: 2, End: 5})
require.NoError(t, err)
contents, err = ioutil.ReadAll(in)
contents, err = io.ReadAll(in)
require.NoError(t, err)
require.Equal(t, "file.txt"[2:5+1], string(contents))
require.NoError(t, in.Close())
@@ -175,7 +191,6 @@ func TestSymlinkError(t *testing.T) {
func TestHashOnUpdate(t *testing.T) {
ctx := context.Background()
r := fstest.NewRun(t)
defer r.Finalise()
const filePath = "file.txt"
when := time.Now()
r.WriteFile(filePath, "content", when)
@@ -190,7 +205,7 @@ func TestHashOnUpdate(t *testing.T) {
require.NoError(t, err)
assert.Equal(t, "9a0364b9e99bb480dd25e1f0284c8555", md5)
// Reupload it with diferent contents but same size and timestamp
// Reupload it with different contents but same size and timestamp
var b = bytes.NewBufferString("CONTENT")
src := object.NewStaticObjectInfo(filePath, when, int64(b.Len()), true, nil, f)
err = o.Update(ctx, b, src)
@@ -206,7 +221,6 @@ func TestHashOnUpdate(t *testing.T) {
func TestHashOnDelete(t *testing.T) {
ctx := context.Background()
r := fstest.NewRun(t)
defer r.Finalise()
const filePath = "file.txt"
when := time.Now()
r.WriteFile(filePath, "content", when)
@@ -235,7 +249,6 @@ func TestHashOnDelete(t *testing.T) {
func TestMetadata(t *testing.T) {
ctx := context.Background()
r := fstest.NewRun(t)
defer r.Finalise()
const filePath = "metafile.txt"
when := time.Now()
const dayLength = len("2001-01-01")
@@ -366,3 +379,179 @@ func TestMetadata(t *testing.T) {
})
}
func TestFilter(t *testing.T) {
ctx := context.Background()
r := fstest.NewRun(t)
when := time.Now()
r.WriteFile("included", "included file", when)
r.WriteFile("excluded", "excluded file", when)
f := r.Flocal.(*Fs)
// Check set up for filtering
assert.True(t, f.Features().FilterAware)
// Add a filter
ctx, fi := filter.AddConfig(ctx)
require.NoError(t, fi.AddRule("+ included"))
require.NoError(t, fi.AddRule("- *"))
// Check listing without use filter flag
entries, err := f.List(ctx, "")
require.NoError(t, err)
sort.Sort(entries)
require.Equal(t, "[excluded included]", fmt.Sprint(entries))
// Add user filter flag
ctx = filter.SetUseFilter(ctx, true)
// Check listing with use filter flag
entries, err = f.List(ctx, "")
require.NoError(t, err)
sort.Sort(entries)
require.Equal(t, "[included]", fmt.Sprint(entries))
}
func testFilterSymlink(t *testing.T, copyLinks bool) {
ctx := context.Background()
r := fstest.NewRun(t)
defer r.Finalise()
when := time.Now()
f := r.Flocal.(*Fs)
// Create a file, a directory, a symlink to a file, a symlink to a directory and a dangling symlink
r.WriteFile("included.file", "included file", when)
r.WriteFile("included.dir/included.sub.file", "included sub file", when)
require.NoError(t, os.Symlink("included.file", filepath.Join(r.LocalName, "included.file.link")))
require.NoError(t, os.Symlink("included.dir", filepath.Join(r.LocalName, "included.dir.link")))
require.NoError(t, os.Symlink("dangling", filepath.Join(r.LocalName, "dangling.link")))
defer func() {
// Reset -L/-l mode
f.opt.FollowSymlinks = false
f.opt.TranslateSymlinks = false
f.lstat = os.Lstat
}()
if copyLinks {
// Set fs into "-L" mode
f.opt.FollowSymlinks = true
f.opt.TranslateSymlinks = false
f.lstat = os.Stat
} else {
// Set fs into "-l" mode
f.opt.FollowSymlinks = false
f.opt.TranslateSymlinks = true
f.lstat = os.Lstat
}
// Check set up for filtering
assert.True(t, f.Features().FilterAware)
// Reset global error count
accounting.Stats(ctx).ResetErrors()
assert.Equal(t, int64(0), accounting.Stats(ctx).GetErrors(), "global errors found")
// Add a filter
ctx, fi := filter.AddConfig(ctx)
require.NoError(t, fi.AddRule("+ included.file"))
require.NoError(t, fi.AddRule("+ included.dir/**"))
if copyLinks {
require.NoError(t, fi.AddRule("+ included.file.link"))
require.NoError(t, fi.AddRule("+ included.dir.link/**"))
} else {
require.NoError(t, fi.AddRule("+ included.file.link.rclonelink"))
require.NoError(t, fi.AddRule("+ included.dir.link.rclonelink"))
}
require.NoError(t, fi.AddRule("- *"))
// Check listing without use filter flag
entries, err := f.List(ctx, "")
require.NoError(t, err)
if copyLinks {
// Check 1 global errors one for each dangling symlink
assert.Equal(t, int64(1), accounting.Stats(ctx).GetErrors(), "global errors found")
} else {
// Check 0 global errors as dangling symlink copied properly
assert.Equal(t, int64(0), accounting.Stats(ctx).GetErrors(), "global errors found")
}
accounting.Stats(ctx).ResetErrors()
sort.Sort(entries)
if copyLinks {
require.Equal(t, "[included.dir included.dir.link included.file included.file.link]", fmt.Sprint(entries))
} else {
require.Equal(t, "[dangling.link.rclonelink included.dir included.dir.link.rclonelink included.file included.file.link.rclonelink]", fmt.Sprint(entries))
}
// Add user filter flag
ctx = filter.SetUseFilter(ctx, true)
// Check listing with use filter flag
entries, err = f.List(ctx, "")
require.NoError(t, err)
assert.Equal(t, int64(0), accounting.Stats(ctx).GetErrors(), "global errors found")
sort.Sort(entries)
if copyLinks {
require.Equal(t, "[included.dir included.dir.link included.file included.file.link]", fmt.Sprint(entries))
} else {
require.Equal(t, "[included.dir included.dir.link.rclonelink included.file included.file.link.rclonelink]", fmt.Sprint(entries))
}
// Check listing through a symlink still works
entries, err = f.List(ctx, "included.dir")
require.NoError(t, err)
assert.Equal(t, int64(0), accounting.Stats(ctx).GetErrors(), "global errors found")
sort.Sort(entries)
require.Equal(t, "[included.dir/included.sub.file]", fmt.Sprint(entries))
}
func TestFilterSymlinkCopyLinks(t *testing.T) {
testFilterSymlink(t, true)
}
func TestFilterSymlinkLinks(t *testing.T) {
testFilterSymlink(t, false)
}
func TestCopySymlink(t *testing.T) {
ctx := context.Background()
r := fstest.NewRun(t)
defer r.Finalise()
when := time.Now()
f := r.Flocal.(*Fs)
// Create a file and a symlink to it
r.WriteFile("src/file.txt", "hello world", when)
require.NoError(t, os.Symlink("file.txt", filepath.Join(r.LocalName, "src", "link.txt")))
defer func() {
// Reset -L/-l mode
f.opt.FollowSymlinks = false
f.opt.TranslateSymlinks = false
f.lstat = os.Lstat
}()
// Set fs into "-l/--links" mode
f.opt.FollowSymlinks = false
f.opt.TranslateSymlinks = true
f.lstat = os.Lstat
// Create dst
require.NoError(t, f.Mkdir(ctx, "dst"))
// Do copy from src into dst
src, err := f.NewObject(ctx, "src/link.txt.rclonelink")
require.NoError(t, err)
require.NotNil(t, src)
dst, err := operations.Copy(ctx, f, nil, "dst/link.txt.rclonelink", src)
require.NoError(t, err)
require.NotNil(t, dst)
// Test that we made a symlink and it has the right contents
dstPath := filepath.Join(r.LocalName, "dst", "link.txt")
linkContents, err := os.Readlink(dstPath)
require.NoError(t, err)
assert.Equal(t, "file.txt", linkContents)
}

View File

@@ -5,19 +5,42 @@ package local
import (
"fmt"
"runtime"
"sync"
"time"
"github.com/rclone/rclone/fs"
"golang.org/x/sys/unix"
)
var (
statxCheckOnce sync.Once
readMetadataFromFileFn func(o *Object, m *fs.Metadata) (err error)
)
// Read the metadata from the file into metadata where possible
func (o *Object) readMetadataFromFile(m *fs.Metadata) (err error) {
statxCheckOnce.Do(func() {
// Check statx() is available as it was only introduced in kernel 4.11
// If not, fall back to fstatat() which was introduced in 2.6.16 which is guaranteed for all Go versions
var stat unix.Statx_t
if runtime.GOOS != "android" && unix.Statx(unix.AT_FDCWD, ".", 0, unix.STATX_ALL, &stat) != unix.ENOSYS {
readMetadataFromFileFn = readMetadataFromFileStatx
} else {
readMetadataFromFileFn = readMetadataFromFileFstatat
}
})
return readMetadataFromFileFn(o, m)
}
// Read the metadata from the file into metadata where possible
func readMetadataFromFileStatx(o *Object, m *fs.Metadata) (err error) {
flags := unix.AT_SYMLINK_NOFOLLOW
if o.fs.opt.FollowSymlinks {
flags = 0
}
var stat unix.Statx_t
// statx() was added to Linux in kernel 4.11
err = unix.Statx(unix.AT_FDCWD, o.path, flags, (0 |
unix.STATX_TYPE | // Want stx_mode & S_IFMT
unix.STATX_MODE | // Want stx_mode & ~S_IFMT
@@ -45,3 +68,36 @@ func (o *Object) readMetadataFromFile(m *fs.Metadata) (err error) {
setTime("btime", stat.Btime)
return nil
}
// Read the metadata from the file into metadata where possible
func readMetadataFromFileFstatat(o *Object, m *fs.Metadata) (err error) {
flags := unix.AT_SYMLINK_NOFOLLOW
if o.fs.opt.FollowSymlinks {
flags = 0
}
var stat unix.Stat_t
// fstatat() was added to Linux in kernel 2.6.16
// Go only supports 2.6.32 or later
err = unix.Fstatat(unix.AT_FDCWD, o.path, &stat, flags)
if err != nil {
return err
}
m.Set("mode", fmt.Sprintf("%0o", stat.Mode))
m.Set("uid", fmt.Sprintf("%d", stat.Uid))
m.Set("gid", fmt.Sprintf("%d", stat.Gid))
if stat.Rdev != 0 {
m.Set("rdev", fmt.Sprintf("%x", stat.Rdev))
}
setTime := func(key string, t unix.Timespec) {
// The types of t.Sec and t.Nsec vary from int32 to int64 on
// different Linux architectures so we need to cast them to
// int64 here and hence need to quiet the linter about
// unnecessary casts.
//
// nolint: unconvert
m.Set(key, time.Unix(int64(t.Sec), int64(t.Nsec)).Format(metadataTimeFormat))
}
setTime("atime", stat.Atim)
setTime("mtime", stat.Mtim)
return nil
}

View File

@@ -1,7 +1,6 @@
package local
import (
"io/ioutil"
"os"
"sync"
"testing"
@@ -13,7 +12,7 @@ import (
// Check we can remove an open file
func TestRemove(t *testing.T) {
fd, err := ioutil.TempFile("", "rclone-remove-test")
fd, err := os.CreateTemp("", "rclone-remove-test")
require.NoError(t, err)
name := fd.Name()
defer func() {

View File

@@ -9,7 +9,7 @@ import (
const haveSetBTime = false
// setBTime changes the the birth time of the file passed in
// setBTime changes the birth time of the file passed in
func setBTime(name string, btime time.Time) error {
// Does nothing
return nil

View File

@@ -11,7 +11,7 @@ import (
const haveSetBTime = true
// setBTime sets the the birth time of the file passed in
// setBTime sets the birth time of the file passed in
func setBTime(name string, btime time.Time) (err error) {
h, err := syscall.Open(name, os.O_RDWR, 0755)
if err != nil {

View File

@@ -6,6 +6,8 @@ package local
import (
"fmt"
"strings"
"sync/atomic"
"syscall"
"github.com/pkg/xattr"
"github.com/rclone/rclone/fs"
@@ -16,12 +18,30 @@ const (
xattrSupported = xattr.XATTR_SUPPORTED
)
// Check to see if the error supplied is a not supported error, and if
// so, disable xattrs
func (f *Fs) xattrIsNotSupported(err error) bool {
xattrErr, ok := err.(*xattr.Error)
if !ok {
return false
}
// Xattrs not supported can be ENOTSUP or ENOATTR or EINVAL (on Solaris)
if xattrErr.Err == syscall.EINVAL || xattrErr.Err == syscall.ENOTSUP || xattrErr.Err == xattr.ENOATTR {
// Show xattrs not supported
if atomic.CompareAndSwapInt32(&f.xattrSupported, 1, 0) {
fs.Errorf(f, "xattrs not supported - disabling: %v", err)
}
return true
}
return false
}
// getXattr returns the extended attributes for an object
//
// It doesn't return any attributes owned by this backend in
// metadataKeys
func (o *Object) getXattr() (metadata fs.Metadata, err error) {
if !xattrSupported {
if !xattrSupported || atomic.LoadInt32(&o.fs.xattrSupported) == 0 {
return nil, nil
}
var list []string
@@ -31,6 +51,9 @@ func (o *Object) getXattr() (metadata fs.Metadata, err error) {
list, err = xattr.LList(o.path)
}
if err != nil {
if o.fs.xattrIsNotSupported(err) {
return nil, nil
}
return nil, fmt.Errorf("failed to read xattr: %w", err)
}
if len(list) == 0 {
@@ -45,6 +68,9 @@ func (o *Object) getXattr() (metadata fs.Metadata, err error) {
v, err = xattr.LGet(o.path, k)
}
if err != nil {
if o.fs.xattrIsNotSupported(err) {
return nil, nil
}
return nil, fmt.Errorf("failed to read xattr key %q: %w", k, err)
}
k = strings.ToLower(k)
@@ -64,7 +90,7 @@ func (o *Object) getXattr() (metadata fs.Metadata, err error) {
//
// It doesn't set any attributes owned by this backend in metadataKeys
func (o *Object) setXattr(metadata fs.Metadata) (err error) {
if !xattrSupported {
if !xattrSupported || atomic.LoadInt32(&o.fs.xattrSupported) == 0 {
return nil
}
for k, value := range metadata {
@@ -80,6 +106,9 @@ func (o *Object) setXattr(metadata fs.Metadata) (err error) {
err = xattr.LSet(o.path, k, v)
}
if err != nil {
if o.fs.xattrIsNotSupported(err) {
return nil
}
return fmt.Errorf("failed to set xattr key %q: %w", k, err)
}
}

View File

@@ -69,6 +69,11 @@ func (w *BinWriter) WritePu64(val int64) {
w.b.Write(w.a[:binary.PutUvarint(w.a, uint64(val))])
}
// WriteP64 writes an signed long as unsigned varint
func (w *BinWriter) WriteP64(val int64) {
w.b.Write(w.a[:binary.PutUvarint(w.a, uint64(val))])
}
// WriteString writes a zero-terminated string
func (w *BinWriter) WriteString(str string) {
buf := []byte(str)

View File

@@ -1,3 +1,4 @@
// Package api provides types used by the Mail.ru API.
package api
import (

View File

@@ -1,3 +1,4 @@
// Package mailru provides an interface to the Mail.ru Cloud storage system.
package mailru
import (
@@ -17,7 +18,6 @@ import (
"encoding/hex"
"encoding/json"
"io/ioutil"
"net/http"
"net/url"
@@ -85,13 +85,19 @@ func init() {
Name: "mailru",
Description: "Mail.ru Cloud",
NewFs: NewFs,
Options: []fs.Option{{
Name: "user",
Help: "User name (usually email).",
Required: true,
Options: append(oauthutil.SharedOptions, []fs.Option{{
Name: "user",
Help: "User name (usually email).",
Required: true,
Sensitive: true,
}, {
Name: "pass",
Help: "Password.",
Name: "pass",
Help: `Password.
This must be an app password - rclone will not work with your normal
password. See the Configuration section in the docs for how to make an
app password.
`,
Required: true,
IsPassword: true,
}, {
@@ -208,7 +214,7 @@ Supported quirks: atomicmkdir binlist unknowndirs`,
encoder.EncodeWin | // :?"*<>|
encoder.EncodeBackSlash |
encoder.EncodeInvalidUtf8),
}},
}}...),
})
}
@@ -640,12 +646,7 @@ func (f *Fs) itemToDirEntry(ctx context.Context, item *api.ListItem) (entry fs.D
return nil, -1, err
}
mTime := int64(item.Mtime)
if mTime < 0 {
fs.Debugf(f, "Fixing invalid timestamp %d on mailru file %q", mTime, remote)
mTime = 0
}
modTime := time.Unix(mTime, 0)
modTime := time.Unix(int64(item.Mtime), 0)
isDir, err := f.isDir(item.Kind, remote)
if err != nil {
@@ -1659,7 +1660,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// Attempt to put by calculating hash in memory
if trySpeedup && size <= int64(o.fs.opt.SpeedupMaxMem) {
fileBuf, err = ioutil.ReadAll(in)
fileBuf, err = io.ReadAll(in)
if err != nil {
return err
}
@@ -1702,7 +1703,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
if size <= mrhash.Size {
// Optimize upload: skip extra request if data fits in the hash buffer.
if fileBuf == nil {
fileBuf, err = ioutil.ReadAll(wrapIn)
fileBuf, err = io.ReadAll(wrapIn)
}
if fileHash == nil && err == nil {
fileHash = mrhash.Sum(fileBuf)
@@ -2057,7 +2058,7 @@ func (o *Object) addFileMetaData(ctx context.Context, overwrite bool) error {
req.WritePu16(0) // revision
req.WriteString(o.fs.opt.Enc.FromStandardPath(o.absPath()))
req.WritePu64(o.size)
req.WritePu64(o.modTime.Unix())
req.WriteP64(o.modTime.Unix())
req.WritePu32(0)
req.Write(o.mrHash)
@@ -2213,7 +2214,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
fs.Debugf(o, "Server returned full content instead of range")
if start > 0 {
// Discard the beginning of the data
_, err = io.CopyN(ioutil.Discard, wrapStream, start)
_, err = io.CopyN(io.Discard, wrapStream, start)
if err != nil {
closeBody(res)
return nil, err

View File

@@ -58,9 +58,10 @@ func init() {
Description: "Mega",
NewFs: NewFs,
Options: []fs.Option{{
Name: "user",
Help: "User name.",
Required: true,
Name: "user",
Help: "User name.",
Required: true,
Sensitive: true,
}, {
Name: "pass",
Help: "Password.",
@@ -83,6 +84,17 @@ than permanently deleting them. If you specify this then rclone will
permanently delete objects instead.`,
Default: false,
Advanced: true,
}, {
Name: "use_https",
Help: `Use HTTPS for transfers.
MEGA uses plain text HTTP connections by default.
Some ISPs throttle HTTP connections, this causes transfers to become very slow.
Enabling this will force MEGA to use HTTPS for all transfers.
HTTPS is normally not necessary since all data is already encrypted anyway.
Enabling it will increase CPU usage and add network overhead.`,
Default: false,
Advanced: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
@@ -100,6 +112,7 @@ type Options struct {
Pass string `config:"pass"`
Debug bool `config:"debug"`
HardDelete bool `config:"hard_delete"`
UseHTTPS bool `config:"use_https"`
Enc encoder.MultiEncoder `config:"encoding"`
}
@@ -204,6 +217,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
if srv == nil {
srv = mega.New().SetClient(fshttp.NewClient(ctx))
srv.SetRetries(ci.LowLevelRetries) // let mega do the low level retries
srv.SetHTTPS(opt.UseHTTPS)
srv.SetLogger(func(format string, v ...interface{}) {
fs.Infof("*go-mega*", format, v...)
})
@@ -347,7 +361,7 @@ func (f *Fs) mkdir(ctx context.Context, rootNode *mega.Node, dir string) (node *
}
}
if err != nil {
return nil, fmt.Errorf("internal error: mkdir called with non-existent root node: %w", err)
return nil, fmt.Errorf("internal error: mkdir called with nonexistent root node: %w", err)
}
// i is number of directories to create (may be 0)
// node is directory to create them from
@@ -387,7 +401,7 @@ func (f *Fs) findRoot(ctx context.Context, create bool) (*mega.Node, error) {
return f._rootNode, nil
}
// Check for pre-existing root
// Check for preexisting root
absRoot := f.srv.FS.GetRoot()
node, err := f.findDir(absRoot, f.root)
//log.Printf("findRoot findDir %p %v", node, err)

View File

@@ -8,7 +8,6 @@ import (
"encoding/hex"
"fmt"
"io"
"io/ioutil"
"path"
"strings"
"sync"
@@ -575,7 +574,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
}
data = data[:limit]
}
return ioutil.NopCloser(bytes.NewBuffer(data)), nil
return io.NopCloser(bytes.NewBuffer(data)), nil
}
// Update the object with the contents of the io.Reader, modTime and size
@@ -583,7 +582,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
// The new object may have been created if an error is returned
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
bucket, bucketPath := o.split()
data, err := ioutil.ReadAll(in)
data, err := io.ReadAll(in)
if err != nil {
return fmt.Errorf("failed to update memory object: %w", err)
}

View File

@@ -12,7 +12,6 @@ import (
"fmt"
gohash "hash"
"io"
"io/ioutil"
"math/rand"
"net/http"
"net/url"
@@ -66,11 +65,13 @@ HTTP is provided primarily for debugging purposes.`,
Help: `Domain+path of NetStorage host to connect to.
Format should be ` + "`<domain>/<internal folders>`",
Required: true,
Required: true,
Sensitive: true,
}, {
Name: "account",
Help: "Set the NetStorage account name",
Required: true,
Name: "account",
Help: "Set the NetStorage account name",
Required: true,
Sensitive: true,
}, {
Name: "secret",
Help: `Set the NetStorage account secret/G2O key for authentication.
@@ -118,7 +119,7 @@ type Fs struct {
filetype string // dir, file or symlink
dirscreated map[string]bool // if implicit dir has been created already
dirscreatedMutex sync.Mutex // mutex to protect dirscreated
statcache map[string][]File // cache successfull stat requests
statcache map[string][]File // cache successful stat requests
statcacheMutex sync.RWMutex // RWMutex to protect statcache
}
@@ -424,7 +425,7 @@ func (f *Fs) getFileName(file *File) string {
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
if f.filetype == "" {
// This happens in two scenarios.
// 1. NewFs is done on a non-existent object, then later rclone attempts to List/ListR this NewFs.
// 1. NewFs is done on a nonexistent object, then later rclone attempts to List/ListR this NewFs.
// 2. List/ListR is called from the context of test_all and not the regular rclone binary.
err := f.initFs(ctx, dir)
if err != nil {
@@ -488,7 +489,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
if f.filetype == "" {
// This happens in two scenarios.
// 1. NewFs is done on a non-existent object, then later rclone attempts to List/ListR this NewFs.
// 1. NewFs is done on a nonexistent object, then later rclone attempts to List/ListR this NewFs.
// 2. List/ListR is called from the context of test_all and not the regular rclone binary.
err := f.initFs(ctx, dir)
if err != nil {
@@ -820,6 +821,8 @@ func (f *Fs) getAuth(req *http.Request) error {
// Set Authorization header
dataHeader := generateDataHeader(f)
path := req.URL.RequestURI()
//lint:ignore SA1008 false positive when running staticcheck, the header name is according to docs even if not canonical
//nolint:staticcheck // Don't include staticcheck when running golangci-lint to avoid SA1008
actionHeader := req.Header["X-Akamai-ACS-Action"][0]
fs.Debugf(nil, "NetStorage API %s call %s for path %q", req.Method, actionHeader, path)
req.Header.Set("X-Akamai-ACS-Auth-Data", dataHeader)
@@ -972,7 +975,7 @@ func (o *Object) netStorageUploadRequest(ctx context.Context, in io.Reader, src
URL = o.fs.url(src.Remote())
}
if strings.HasSuffix(URL, ".rclonelink") {
bits, err := ioutil.ReadAll(in)
bits, err := io.ReadAll(in)
if err != nil {
return err
}
@@ -1058,7 +1061,7 @@ func (o *Object) netStorageDownloadRequest(ctx context.Context, options []fs.Ope
if strings.HasSuffix(URL, ".rclonelink") && o.target != "" {
fs.Infof(nil, "Converting a symlink to the rclonelink file on download %q", URL)
reader := strings.NewReader(o.target)
readcloser := ioutil.NopCloser(reader)
readcloser := io.NopCloser(reader)
return readcloser, nil
}

View File

@@ -1,5 +1,4 @@
// Types passed and returned to and from the API
// Package api provides types used by the OneDrive API.
package api
import (
@@ -14,7 +13,7 @@ const (
PackageTypeOneNote = "oneNote"
)
// Error is returned from one drive when things go wrong
// Error is returned from OneDrive when things go wrong
type Error struct {
ErrorInfo struct {
Code string `json:"code"`
@@ -71,7 +70,7 @@ type Drive struct {
Quota Quota `json:"quota"`
}
// Timestamp represents represents date and time information for the
// Timestamp represents date and time information for the
// OneDrive API, by using ISO 8601 and is always in UTC time.
type Timestamp time.Time
@@ -127,6 +126,7 @@ type HashesType struct {
Sha1Hash string `json:"sha1Hash"` // hex encoded SHA1 hash for the contents of the file (if available)
Crc32Hash string `json:"crc32Hash"` // hex encoded CRC32 value of the file (if available)
QuickXorHash string `json:"quickXorHash"` // base64 encoded QuickXorHash value of the file (if available)
Sha256Hash string `json:"sha256Hash"` // hex encoded SHA256 value of the file (if available)
}
// FileFacet groups file-related data on OneDrive into a single structure.

View File

@@ -131,10 +131,11 @@ Note that the chunks will be buffered into memory.`,
Default: defaultChunkSize,
Advanced: true,
}, {
Name: "drive_id",
Help: "The ID of the drive to use.",
Default: "",
Advanced: true,
Name: "drive_id",
Help: "The ID of the drive to use.",
Default: "",
Advanced: true,
Sensitive: true,
}, {
Name: "drive_type",
Help: "The type of the drive (" + driveTypePersonal + " | " + driveTypeBusiness + " | " + driveTypeSharepoint + ").",
@@ -148,7 +149,8 @@ This isn't normally needed, but in special circumstances you might
know the folder ID that you wish to access but not be able to get
there through a path traversal.
`,
Advanced: true,
Advanced: true,
Sensitive: true,
}, {
Name: "access_scopes",
Help: `Set scopes to be requested by rclone.
@@ -196,7 +198,9 @@ listing, set this option.`,
}, {
Name: "server_side_across_configs",
Default: false,
Help: `Allow server-side operations (e.g. copy) to work across different onedrive configs.
Help: `Deprecated: use --server-side-across-configs instead.
Allow server-side operations (e.g. copy) to work across different onedrive configs.
This will only work if you are copying between two OneDrive *Personal* drives AND
the files to copy are already shared between them. In other cases, rclone will
@@ -257,6 +261,67 @@ this flag there.
Help: `Set the password for links created by the link command.
At the time of writing this only works with OneDrive personal paid accounts.
`,
Advanced: true,
Sensitive: true,
}, {
Name: "hash_type",
Default: "auto",
Help: `Specify the hash in use for the backend.
This specifies the hash type in use. If set to "auto" it will use the
default hash which is QuickXorHash.
Before rclone 1.62 an SHA1 hash was used by default for Onedrive
Personal. For 1.62 and later the default is to use a QuickXorHash for
all onedrive types. If an SHA1 hash is desired then set this option
accordingly.
From July 2023 QuickXorHash will be the only available hash for
both OneDrive for Business and OneDriver Personal.
This can be set to "none" to not use any hashes.
If the hash requested does not exist on the object, it will be
returned as an empty string which is treated as a missing hash by
rclone.
`,
Examples: []fs.OptionExample{{
Value: "auto",
Help: "Rclone chooses the best hash",
}, {
Value: "quickxor",
Help: "QuickXor",
}, {
Value: "sha1",
Help: "SHA1",
}, {
Value: "sha256",
Help: "SHA256",
}, {
Value: "crc32",
Help: "CRC32",
}, {
Value: "none",
Help: "None - don't use any hashes",
}},
Advanced: true,
}, {
Name: "av_override",
Default: false,
Help: `Allows download of files the server thinks has a virus.
The onedrive/sharepoint server may check files uploaded with an Anti
Virus checker. If it detects any potential viruses or malware it will
block download of the file.
In this case you will see a message like this
server reports this file is infected with a virus - use --onedrive-av-override to download anyway: Infected (name of virus): 403 Forbidden:
If you are 100% sure you want to download this file anyway then use
the --onedrive-av-override flag, or av_override = true in the config
file.
`,
Advanced: true,
}, {
@@ -511,7 +576,7 @@ Example: "https://contoso.sharepoint.com/sites/mysite" or "mysite"
`)
case "url_end":
siteURL := config.Result
re := regexp.MustCompile(`https://.*\.sharepoint.com/sites/(.*)`)
re := regexp.MustCompile(`https://.*\.sharepoint\.com/sites/(.*)`)
match := re.FindStringSubmatch(siteURL)
if len(match) == 2 {
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
@@ -597,25 +662,28 @@ type Options struct {
LinkScope string `config:"link_scope"`
LinkType string `config:"link_type"`
LinkPassword string `config:"link_password"`
HashType string `config:"hash_type"`
AVOverride bool `config:"av_override"`
Enc encoder.MultiEncoder `config:"encoding"`
}
// Fs represents a remote one drive
// Fs represents a remote OneDrive
type Fs struct {
name string // name of this remote
root string // the path we are working on
opt Options // parsed options
ci *fs.ConfigInfo // global config
features *fs.Features // optional features
srv *rest.Client // the connection to the one drive server
srv *rest.Client // the connection to the OneDrive server
dirCache *dircache.DirCache // Map of directory path to directory id
pacer *fs.Pacer // pacer for API calls
tokenRenewer *oauthutil.Renew // renew the token on expiry
driveID string // ID to use for querying Microsoft Graph
driveType string // https://developer.microsoft.com/en-us/graph/docs/api-reference/v1.0/resources/drive
hashType hash.Type // type of the hash we are using
}
// Object describes a one drive object
// Object describes a OneDrive object
//
// Will definitely have info but maybe not meta
type Object struct {
@@ -626,8 +694,7 @@ type Object struct {
size int64 // size of the object
modTime time.Time // modification time of the object
id string // ID of the object
sha1 string // SHA-1 of the object content
quickxorhash string // QuickXorHash of the object content
hash string // Hash of the content, usually QuickXorHash but set as hash_type
mimeType string // Content-Type of object from server (may not be as uploaded)
}
@@ -645,7 +712,7 @@ func (f *Fs) Root() string {
// String converts this Fs to a string
func (f *Fs) String() string {
return fmt.Sprintf("One drive root '%s'", f.root)
return fmt.Sprintf("OneDrive root '%s'", f.root)
}
// Features returns the optional features of this Fs
@@ -653,7 +720,7 @@ func (f *Fs) Features() *fs.Features {
return f.features
}
// parsePath parses a one drive 'url'
// parsePath parses a OneDrive 'url'
func parsePath(path string) (root string) {
root = strings.Trim(path, "/")
return
@@ -882,6 +949,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
driveType: opt.DriveType,
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
hashType: QuickXorHashType,
}
f.features = (&fs.Features{
CaseInsensitive: true,
@@ -891,6 +959,21 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
}).Fill(ctx, f)
f.srv.SetErrorHandler(errorHandler)
// Set the user defined hash
if opt.HashType == "auto" || opt.HashType == "" {
opt.HashType = QuickXorHashType.String()
}
err = f.hashType.Set(opt.HashType)
if err != nil {
return nil, err
}
// Disable change polling in China region
// See: https://github.com/rclone/rclone/issues/6444
if f.opt.Region == regionCN {
f.features.ChangeNotify = nil
}
// Renew the token in the background
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
_, _, err := f.readMetaDataForPath(ctx, "")
@@ -1550,10 +1633,7 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() hash.Set {
if f.driveType == driveTypePersonal {
return hash.Set(hash.SHA1)
}
return hash.Set(QuickXorHashType)
return hash.Set(f.hashType)
}
// PublicLink returns a link for downloading without account.
@@ -1668,6 +1748,10 @@ func (f *Fs) CleanUp(ctx context.Context) error {
token := make(chan struct{}, f.ci.Checkers)
var wg sync.WaitGroup
err := walk.Walk(ctx, f, "", true, -1, func(path string, entries fs.DirEntries, err error) error {
if err != nil {
fs.Errorf(f, "Failed to list %q: %v", path, err)
return nil
}
err = entries.ForObjectError(func(obj fs.Object) error {
o, ok := obj.(*Object)
if !ok {
@@ -1762,14 +1846,8 @@ func (o *Object) rootPath() string {
// Hash returns the SHA-1 of an object returning a lowercase hex string
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
if o.fs.driveType == driveTypePersonal {
if t == hash.SHA1 {
return o.sha1, nil
}
} else {
if t == QuickXorHashType {
return o.quickxorhash, nil
}
if t == o.fs.hashType {
return o.hash, nil
}
return "", hash.ErrUnsupported
}
@@ -1800,16 +1878,23 @@ func (o *Object) setMetaData(info *api.Item) (err error) {
file := info.GetFile()
if file != nil {
o.mimeType = file.MimeType
if file.Hashes.Sha1Hash != "" {
o.sha1 = strings.ToLower(file.Hashes.Sha1Hash)
}
if file.Hashes.QuickXorHash != "" {
h, err := base64.StdEncoding.DecodeString(file.Hashes.QuickXorHash)
if err != nil {
fs.Errorf(o, "Failed to decode QuickXorHash %q: %v", file.Hashes.QuickXorHash, err)
} else {
o.quickxorhash = hex.EncodeToString(h)
o.hash = ""
switch o.fs.hashType {
case QuickXorHashType:
if file.Hashes.QuickXorHash != "" {
h, err := base64.StdEncoding.DecodeString(file.Hashes.QuickXorHash)
if err != nil {
fs.Errorf(o, "Failed to decode QuickXorHash %q: %v", file.Hashes.QuickXorHash, err)
} else {
o.hash = hex.EncodeToString(h)
}
}
case hash.SHA1:
o.hash = strings.ToLower(file.Hashes.Sha1Hash)
case hash.SHA256:
o.hash = strings.ToLower(file.Hashes.Sha256Hash)
case hash.CRC32:
o.hash = strings.ToLower(file.Hashes.Crc32Hash)
}
}
fileSystemInfo := info.GetFileSystemInfo()
@@ -1905,12 +1990,20 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
var resp *http.Response
opts := o.fs.newOptsCall(o.id, "GET", "/content")
opts.Options = options
if o.fs.opt.AVOverride {
opts.Parameters = url.Values{"AVOverride": {"1"}}
}
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.Call(ctx, &opts)
return shouldRetry(ctx, resp, err)
})
if err != nil {
if resp != nil {
if virus := resp.Header.Get("X-Virus-Infected"); virus != "" {
err = fmt.Errorf("server reports this file is infected with a virus - use --onedrive-av-override to download anyway: %s: %w", virus, err)
}
}
return nil, err
}

View File

@@ -7,51 +7,40 @@
// See: https://docs.microsoft.com/en-us/onedrive/developer/code-snippets/quickxorhash
package quickxorhash
// This code was ported from the code snippet linked from
// https://docs.microsoft.com/en-us/onedrive/developer/code-snippets/quickxorhash
// Which has the copyright
// This code was ported from a fast C-implementation from
// https://github.com/namazso/QuickXorHash
// which has licenced as BSD Zero Clause License
//
// BSD Zero Clause License
//
// Copyright (c) 2022 namazso <admin@namazso.eu>
//
// Permission to use, copy, modify, and/or distribute this software for any
// purpose with or without fee is hereby granted.
//
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
// AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
// OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
// PERFORMANCE OF THIS SOFTWARE.
// ------------------------------------------------------------------------------
// Copyright (c) 2016 Microsoft Corporation
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
// ------------------------------------------------------------------------------
import (
"hash"
)
import "hash"
const (
// BlockSize is the preferred size for hashing
BlockSize = 64
// Size of the output checksum
Size = 20
bitsInLastCell = 32
shift = 11
widthInBits = 8 * Size
dataSize = (widthInBits-1)/64 + 1
Size = 20
shift = 11
widthInBits = 8 * Size
dataSize = shift * widthInBits
)
type quickXorHash struct {
data [dataSize]uint64
lengthSoFar uint64
shiftSoFar int
data [dataSize]byte
size uint64
}
// New returns a new hash.Hash computing the quickXorHash checksum.
@@ -70,94 +59,37 @@ func New() hash.Hash {
//
// Implementations must not retain p.
func (q *quickXorHash) Write(p []byte) (n int, err error) {
currentshift := q.shiftSoFar
// The bitvector where we'll start xoring
vectorArrayIndex := currentshift / 64
// The position within the bit vector at which we begin xoring
vectorOffset := currentshift % 64
iterations := len(p)
if iterations > widthInBits {
iterations = widthInBits
var i int
// fill last remain
lastRemain := q.size % dataSize
if lastRemain != 0 {
i += xorBytes(q.data[lastRemain:], p)
}
for i := 0; i < iterations; i++ {
isLastCell := vectorArrayIndex == len(q.data)-1
var bitsInVectorCell int
if isLastCell {
bitsInVectorCell = bitsInLastCell
} else {
bitsInVectorCell = 64
}
// There's at least 2 bitvectors before we reach the end of the array
if vectorOffset <= bitsInVectorCell-8 {
for j := i; j < len(p); j += widthInBits {
q.data[vectorArrayIndex] ^= uint64(p[j]) << uint(vectorOffset)
}
} else {
index1 := vectorArrayIndex
var index2 int
if isLastCell {
index2 = 0
} else {
index2 = vectorArrayIndex + 1
}
low := byte(bitsInVectorCell - vectorOffset)
xoredByte := byte(0)
for j := i; j < len(p); j += widthInBits {
xoredByte ^= p[j]
}
q.data[index1] ^= uint64(xoredByte) << uint(vectorOffset)
q.data[index2] ^= uint64(xoredByte) >> low
}
vectorOffset += shift
for vectorOffset >= bitsInVectorCell {
if isLastCell {
vectorArrayIndex = 0
} else {
vectorArrayIndex = vectorArrayIndex + 1
}
vectorOffset -= bitsInVectorCell
if i != len(p) {
for len(p)-i >= dataSize {
i += xorBytes(q.data[:], p[i:])
}
xorBytes(q.data[:], p[i:])
}
// Update the starting position in a circular shift pattern
q.shiftSoFar = (q.shiftSoFar + shift*(len(p)%widthInBits)) % widthInBits
q.lengthSoFar += uint64(len(p))
q.size += uint64(len(p))
return len(p), nil
}
// Calculate the current checksum
func (q *quickXorHash) checkSum() (h [Size]byte) {
// Output the data as little endian bytes
ph := 0
for i := 0; i < len(q.data)-1; i++ {
d := q.data[i]
_ = h[ph+7] // bounds check
h[ph+0] = byte(d >> (8 * 0))
h[ph+1] = byte(d >> (8 * 1))
h[ph+2] = byte(d >> (8 * 2))
h[ph+3] = byte(d >> (8 * 3))
h[ph+4] = byte(d >> (8 * 4))
h[ph+5] = byte(d >> (8 * 5))
h[ph+6] = byte(d >> (8 * 6))
h[ph+7] = byte(d >> (8 * 7))
ph += 8
func (q *quickXorHash) checkSum() (h [Size + 1]byte) {
for i := 0; i < dataSize; i++ {
shift := (i * 11) % 160
shiftBytes := shift / 8
shiftBits := shift % 8
shifted := int(q.data[i]) << shiftBits
h[shiftBytes] ^= byte(shifted)
h[shiftBytes+1] ^= byte(shifted >> 8)
}
// remaining 32 bits
d := q.data[len(q.data)-1]
h[Size-4] = byte(d >> (8 * 0))
h[Size-3] = byte(d >> (8 * 1))
h[Size-2] = byte(d >> (8 * 2))
h[Size-1] = byte(d >> (8 * 3))
h[0] ^= h[20]
// XOR the file length with the least significant bits in little endian format
d = q.lengthSoFar
d := q.size
h[Size-8] ^= byte(d >> (8 * 0))
h[Size-7] ^= byte(d >> (8 * 1))
h[Size-6] ^= byte(d >> (8 * 2))
@@ -174,7 +106,7 @@ func (q *quickXorHash) checkSum() (h [Size]byte) {
// It does not change the underlying hash state.
func (q *quickXorHash) Sum(b []byte) []byte {
hash := q.checkSum()
return append(b, hash[:]...)
return append(b, hash[:Size]...)
}
// Reset resets the Hash to its initial state.
@@ -196,8 +128,10 @@ func (q *quickXorHash) BlockSize() int {
}
// Sum returns the quickXorHash checksum of the data.
func Sum(data []byte) [Size]byte {
func Sum(data []byte) (h [Size]byte) {
var d quickXorHash
_, _ = d.Write(data)
return d.checkSum()
s := d.checkSum()
copy(h[:], s[:])
return h
}

View File

@@ -4,6 +4,7 @@ import (
"encoding/base64"
"fmt"
"hash"
"math/rand"
"testing"
"github.com/stretchr/testify/assert"
@@ -166,3 +167,16 @@ func TestReset(t *testing.T) {
// check interface
var _ hash.Hash = (*quickXorHash)(nil)
func BenchmarkQuickXorHash(b *testing.B) {
b.SetBytes(1 << 20)
buf := make([]byte, 1<<20)
rand.Read(buf)
h := New()
b.ResetTimer()
for i := 0; i < b.N; i++ {
h.Reset()
h.Write(buf)
h.Sum(nil)
}
}

View File

@@ -0,0 +1,20 @@
//go:build !go1.20
package quickxorhash
func xorBytes(dst, src []byte) int {
n := len(dst)
if len(src) < n {
n = len(src)
}
if n == 0 {
return 0
}
dst = dst[:n]
//src = src[:n]
src = src[:len(dst)] // remove bounds check in loop
for i := range dst {
dst[i] ^= src[i]
}
return n
}

View File

@@ -0,0 +1,9 @@
//go:build go1.20
package quickxorhash
import "crypto/subtle"
func xorBytes(dst, src []byte) int {
return subtle.XORBytes(dst, src, dst)
}

View File

@@ -1,3 +1,4 @@
// Package opendrive provides an interface to the OpenDrive storage system.
package opendrive
import (
@@ -41,9 +42,10 @@ func init() {
Description: "OpenDrive",
NewFs: NewFs,
Options: []fs.Option{{
Name: "username",
Help: "Username.",
Required: true,
Name: "username",
Help: "Username.",
Required: true,
Sensitive: true,
}, {
Name: "password",
Help: "Password.",

View File

@@ -0,0 +1,145 @@
//go:build !plan9 && !solaris && !js
// +build !plan9,!solaris,!js
package oracleobjectstorage
import (
"crypto/sha256"
"encoding/base64"
"errors"
"fmt"
"os"
"strings"
"github.com/oracle/oci-go-sdk/v65/common"
"github.com/oracle/oci-go-sdk/v65/objectstorage"
"github.com/oracle/oci-go-sdk/v65/objectstorage/transfer"
)
const (
sseDefaultAlgorithm = "AES256"
)
func getSha256(p []byte) []byte {
h := sha256.New()
h.Write(p)
return h.Sum(nil)
}
func validateSSECustomerKeyOptions(opt *Options) error {
if opt.SSEKMSKeyID != "" && (opt.SSECustomerKeyFile != "" || opt.SSECustomerKey != "") {
return errors.New("oos: can't use vault sse_kms_key_id and local sse_customer_key at the same time")
}
if opt.SSECustomerKey != "" && opt.SSECustomerKeyFile != "" {
return errors.New("oos: can't use sse_customer_key and sse_customer_key_file at the same time")
}
if opt.SSEKMSKeyID != "" {
return nil
}
err := populateSSECustomerKeys(opt)
if err != nil {
return err
}
return nil
}
func populateSSECustomerKeys(opt *Options) error {
if opt.SSECustomerKeyFile != "" {
// Reads the base64-encoded AES key data from the specified file and computes its SHA256 checksum
data, err := os.ReadFile(expandPath(opt.SSECustomerKeyFile))
if err != nil {
return fmt.Errorf("oos: error reading sse_customer_key_file: %v", err)
}
opt.SSECustomerKey = strings.TrimSpace(string(data))
}
if opt.SSECustomerKey != "" {
decoded, err := base64.StdEncoding.DecodeString(opt.SSECustomerKey)
if err != nil {
return fmt.Errorf("oos: Could not decode sse_customer_key_file: %w", err)
}
sha256Checksum := base64.StdEncoding.EncodeToString(getSha256(decoded))
if opt.SSECustomerKeySha256 == "" {
opt.SSECustomerKeySha256 = sha256Checksum
} else {
if opt.SSECustomerKeySha256 != sha256Checksum {
return fmt.Errorf("the computed SHA256 checksum "+
"(%v) of the key doesn't match the config entry sse_customer_key_sha256=(%v)",
sha256Checksum, opt.SSECustomerKeySha256)
}
}
if opt.SSECustomerAlgorithm == "" {
opt.SSECustomerAlgorithm = sseDefaultAlgorithm
}
}
return nil
}
// https://docs.oracle.com/en-us/iaas/Content/Object/Tasks/usingyourencryptionkeys.htm
func useBYOKPutObject(fs *Fs, request *objectstorage.PutObjectRequest) {
if fs.opt.SSEKMSKeyID != "" {
request.OpcSseKmsKeyId = common.String(fs.opt.SSEKMSKeyID)
}
if fs.opt.SSECustomerAlgorithm != "" {
request.OpcSseCustomerAlgorithm = common.String(fs.opt.SSECustomerAlgorithm)
}
if fs.opt.SSECustomerKey != "" {
request.OpcSseCustomerKey = common.String(fs.opt.SSECustomerKey)
}
if fs.opt.SSECustomerKeySha256 != "" {
request.OpcSseCustomerKeySha256 = common.String(fs.opt.SSECustomerKeySha256)
}
}
func useBYOKHeadObject(fs *Fs, request *objectstorage.HeadObjectRequest) {
if fs.opt.SSECustomerAlgorithm != "" {
request.OpcSseCustomerAlgorithm = common.String(fs.opt.SSECustomerAlgorithm)
}
if fs.opt.SSECustomerKey != "" {
request.OpcSseCustomerKey = common.String(fs.opt.SSECustomerKey)
}
if fs.opt.SSECustomerKeySha256 != "" {
request.OpcSseCustomerKeySha256 = common.String(fs.opt.SSECustomerKeySha256)
}
}
func useBYOKGetObject(fs *Fs, request *objectstorage.GetObjectRequest) {
if fs.opt.SSECustomerAlgorithm != "" {
request.OpcSseCustomerAlgorithm = common.String(fs.opt.SSECustomerAlgorithm)
}
if fs.opt.SSECustomerKey != "" {
request.OpcSseCustomerKey = common.String(fs.opt.SSECustomerKey)
}
if fs.opt.SSECustomerKeySha256 != "" {
request.OpcSseCustomerKeySha256 = common.String(fs.opt.SSECustomerKeySha256)
}
}
func useBYOKCopyObject(fs *Fs, request *objectstorage.CopyObjectRequest) {
if fs.opt.SSEKMSKeyID != "" {
request.OpcSseKmsKeyId = common.String(fs.opt.SSEKMSKeyID)
}
if fs.opt.SSECustomerAlgorithm != "" {
request.OpcSseCustomerAlgorithm = common.String(fs.opt.SSECustomerAlgorithm)
}
if fs.opt.SSECustomerKey != "" {
request.OpcSseCustomerKey = common.String(fs.opt.SSECustomerKey)
}
if fs.opt.SSECustomerKeySha256 != "" {
request.OpcSseCustomerKeySha256 = common.String(fs.opt.SSECustomerKeySha256)
}
}
func useBYOKUpload(fs *Fs, request *transfer.UploadRequest) {
if fs.opt.SSEKMSKeyID != "" {
request.OpcSseKmsKeyId = common.String(fs.opt.SSEKMSKeyID)
}
if fs.opt.SSECustomerAlgorithm != "" {
request.OpcSseCustomerAlgorithm = common.String(fs.opt.SSECustomerAlgorithm)
}
if fs.opt.SSECustomerKey != "" {
request.OpcSseCustomerKey = common.String(fs.opt.SSECustomerKey)
}
if fs.opt.SSECustomerKeySha256 != "" {
request.OpcSseCustomerKeySha256 = common.String(fs.opt.SSECustomerKeySha256)
}
}

View File

@@ -0,0 +1,178 @@
//go:build !plan9 && !solaris && !js
// +build !plan9,!solaris,!js
package oracleobjectstorage
import (
"context"
"crypto/rsa"
"errors"
"net/http"
"os"
"path"
"strings"
"github.com/oracle/oci-go-sdk/v65/common"
"github.com/oracle/oci-go-sdk/v65/common/auth"
"github.com/oracle/oci-go-sdk/v65/objectstorage"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
)
func expandPath(filepath string) (expandedPath string) {
if filepath == "" {
return filepath
}
cleanedPath := path.Clean(filepath)
expandedPath = cleanedPath
if strings.HasPrefix(cleanedPath, "~") {
rest := cleanedPath[2:]
home, err := os.UserHomeDir()
if err != nil {
return expandedPath
}
expandedPath = path.Join(home, rest)
}
return
}
func getConfigurationProvider(opt *Options) (common.ConfigurationProvider, error) {
switch opt.Provider {
case instancePrincipal:
return auth.InstancePrincipalConfigurationProvider()
case userPrincipal:
expandConfigFilePath := expandPath(opt.ConfigFile)
if expandConfigFilePath != "" && !fileExists(expandConfigFilePath) {
fs.Errorf(userPrincipal, "oci config file doesn't exist at %v", expandConfigFilePath)
}
return common.CustomProfileConfigProvider(expandConfigFilePath, opt.ConfigProfile), nil
case resourcePrincipal:
return auth.ResourcePrincipalConfigurationProvider()
case noAuth:
fs.Infof("client", "using no auth provider")
return getNoAuthConfiguration()
default:
}
return common.DefaultConfigProvider(), nil
}
func newObjectStorageClient(ctx context.Context, opt *Options) (*objectstorage.ObjectStorageClient, error) {
p, err := getConfigurationProvider(opt)
if err != nil {
return nil, err
}
client, err := objectstorage.NewObjectStorageClientWithConfigurationProvider(p)
if err != nil {
fs.Errorf(opt.Provider, "failed to create object storage client, %v", err)
return nil, err
}
if opt.Region != "" {
client.SetRegion(opt.Region)
}
modifyClient(ctx, opt, &client.BaseClient)
return &client, err
}
func fileExists(filePath string) bool {
if _, err := os.Stat(filePath); errors.Is(err, os.ErrNotExist) {
return false
}
return true
}
func modifyClient(ctx context.Context, opt *Options, client *common.BaseClient) {
client.HTTPClient = getHTTPClient(ctx)
if opt.Provider == noAuth {
client.Signer = getNoAuthSigner()
}
}
// getClient makes http client according to the global options
// this has rclone specific options support like dump headers, body etc.
func getHTTPClient(ctx context.Context) *http.Client {
return fshttp.NewClient(ctx)
}
var retryErrorCodes = []int{
408, // Request Timeout
429, // Rate exceeded.
500, // Get occasional 500 Internal Server Error
503, // Service Unavailable
504, // Gateway Time-out
}
func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
if fserrors.ContextError(ctx, &err) {
return false, err
}
// If this is an ocierr object, try and extract more useful information to determine if we should retry
if ociError, ok := err.(common.ServiceError); ok {
// Simple case, check the original embedded error in case it's generically retryable
if fserrors.ShouldRetry(err) {
return true, err
}
// If it is a timeout then we want to retry that
if ociError.GetCode() == "RequestTimeout" {
return true, err
}
}
// Ok, not an oci error, check for generic failure conditions
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
}
func getNoAuthConfiguration() (common.ConfigurationProvider, error) {
return &noAuthConfigurator{}, nil
}
func getNoAuthSigner() common.HTTPRequestSigner {
return &noAuthSigner{}
}
type noAuthConfigurator struct {
}
type noAuthSigner struct {
}
func (n *noAuthSigner) Sign(*http.Request) error {
return nil
}
func (n *noAuthConfigurator) PrivateRSAKey() (*rsa.PrivateKey, error) {
return nil, nil
}
func (n *noAuthConfigurator) KeyID() (string, error) {
return "", nil
}
func (n *noAuthConfigurator) TenancyOCID() (string, error) {
return "", nil
}
func (n *noAuthConfigurator) UserOCID() (string, error) {
return "", nil
}
func (n *noAuthConfigurator) KeyFingerprint() (string, error) {
return "", nil
}
func (n *noAuthConfigurator) Region() (string, error) {
return "", nil
}
func (n *noAuthConfigurator) AuthType() (common.AuthConfig, error) {
return common.AuthConfig{
AuthType: common.UnknownAuthenticationType,
IsFromConfigFile: false,
OboToken: nil,
}, nil
}
// Check the interfaces are satisfied
var (
_ common.ConfigurationProvider = &noAuthConfigurator{}
_ common.HTTPRequestSigner = &noAuthSigner{}
)

View File

@@ -0,0 +1,228 @@
//go:build !plan9 && !solaris && !js
// +build !plan9,!solaris,!js
package oracleobjectstorage
import (
"context"
"fmt"
"strings"
"time"
"github.com/oracle/oci-go-sdk/v65/common"
"github.com/oracle/oci-go-sdk/v65/objectstorage"
"github.com/rclone/rclone/fs"
)
// ------------------------------------------------------------
// Command Interface Implementation
// ------------------------------------------------------------
const (
operationRename = "rename"
operationListMultiPart = "list-multipart-uploads"
operationCleanup = "cleanup"
)
var commandHelp = []fs.CommandHelp{{
Name: operationRename,
Short: "change the name of an object",
Long: `This command can be used to rename a object.
Usage Examples:
rclone backend rename oos:bucket relative-object-path-under-bucket object-new-name
`,
Opts: nil,
}, {
Name: operationListMultiPart,
Short: "List the unfinished multipart uploads",
Long: `This command lists the unfinished multipart uploads in JSON format.
rclone backend list-multipart-uploads oos:bucket/path/to/object
It returns a dictionary of buckets with values as lists of unfinished
multipart uploads.
You can call it with no bucket in which case it lists all bucket, with
a bucket or with a bucket and path.
{
"test-bucket": [
{
"namespace": "test-namespace",
"bucket": "test-bucket",
"object": "600m.bin",
"uploadId": "51dd8114-52a4-b2f2-c42f-5291f05eb3c8",
"timeCreated": "2022-07-29T06:21:16.595Z",
"storageTier": "Standard"
}
]
`,
}, {
Name: operationCleanup,
Short: "Remove unfinished multipart uploads.",
Long: `This command removes unfinished multipart uploads of age greater than
max-age which defaults to 24 hours.
Note that you can use --interactive/-i or --dry-run with this command to see what
it would do.
rclone backend cleanup oos:bucket/path/to/object
rclone backend cleanup -o max-age=7w oos:bucket/path/to/object
Durations are parsed as per the rest of rclone, 2h, 7d, 7w etc.
`,
Opts: map[string]string{
"max-age": "Max age of upload to delete",
},
},
}
/*
Command the backend to run a named command
The command run is name
args may be used to read arguments from
opts may be used to read optional arguments from
The result should be capable of being JSON encoded
If it is a string or a []string it will be shown to the user
otherwise it will be JSON encoded and shown to the user like that
*/
func (f *Fs) Command(ctx context.Context, commandName string, args []string,
opt map[string]string) (result interface{}, err error) {
// fs.Debugf(f, "command %v, args: %v, opts:%v", commandName, args, opt)
switch commandName {
case operationRename:
if len(args) < 2 {
return nil, fmt.Errorf("path to object or its new name to rename is empty")
}
remote := args[0]
newName := args[1]
return f.rename(ctx, remote, newName)
case operationListMultiPart:
return f.listMultipartUploadsAll(ctx)
case operationCleanup:
maxAge := 24 * time.Hour
if opt["max-age"] != "" {
maxAge, err = fs.ParseDuration(opt["max-age"])
if err != nil {
return nil, fmt.Errorf("bad max-age: %w", err)
}
}
return nil, f.cleanUp(ctx, maxAge)
default:
return nil, fs.ErrorCommandNotFound
}
}
func (f *Fs) rename(ctx context.Context, remote, newName string) (interface{}, error) {
if remote == "" {
return nil, fmt.Errorf("path to object file cannot be empty")
}
if newName == "" {
return nil, fmt.Errorf("the object's new name cannot be empty")
}
o := &Object{
fs: f,
remote: remote,
}
bucketName, objectPath := o.split()
err := o.readMetaData(ctx)
if err != nil {
fs.Errorf(f, "failed to read object:%v %v ", objectPath, err)
if strings.HasPrefix(objectPath, bucketName) {
fs.Errorf(f, "warn: ensure object path: %v is relative to bucket:%v and doesn't include the bucket name",
objectPath, bucketName)
}
return nil, fs.ErrorNotAFile
}
details := objectstorage.RenameObjectDetails{
SourceName: common.String(objectPath),
NewName: common.String(newName),
}
request := objectstorage.RenameObjectRequest{
NamespaceName: common.String(f.opt.Namespace),
BucketName: common.String(bucketName),
RenameObjectDetails: details,
OpcClientRequestId: nil,
RequestMetadata: common.RequestMetadata{},
}
var response objectstorage.RenameObjectResponse
err = f.pacer.Call(func() (bool, error) {
response, err = f.srv.RenameObject(ctx, request)
return shouldRetry(ctx, response.HTTPResponse(), err)
})
if err != nil {
return nil, err
}
fs.Infof(f, "success: renamed object-path: %v to %v", objectPath, newName)
return "renamed successfully", nil
}
func (f *Fs) listMultipartUploadsAll(ctx context.Context) (uploadsMap map[string][]*objectstorage.MultipartUpload,
err error) {
uploadsMap = make(map[string][]*objectstorage.MultipartUpload)
bucket, directory := f.split("")
if bucket != "" {
uploads, err := f.listMultipartUploads(ctx, bucket, directory)
if err != nil {
return uploadsMap, err
}
uploadsMap[bucket] = uploads
return uploadsMap, nil
}
entries, err := f.listBuckets(ctx)
if err != nil {
return uploadsMap, err
}
for _, entry := range entries {
bucket := entry.Remote()
uploads, listErr := f.listMultipartUploads(ctx, bucket, "")
if listErr != nil {
err = listErr
fs.Errorf(f, "%v", err)
}
uploadsMap[bucket] = uploads
}
return uploadsMap, err
}
// listMultipartUploads lists all outstanding multipart uploads for (bucket, key)
//
// Note that rather lazily we treat key as a prefix, so it matches
// directories and objects. This could surprise the user if they ask
// for "dir" and it returns "dirKey"
func (f *Fs) listMultipartUploads(ctx context.Context, bucketName, directory string) (
uploads []*objectstorage.MultipartUpload, err error) {
uploads = []*objectstorage.MultipartUpload{}
req := objectstorage.ListMultipartUploadsRequest{
NamespaceName: common.String(f.opt.Namespace),
BucketName: common.String(bucketName),
}
var response objectstorage.ListMultipartUploadsResponse
for {
err = f.pacer.Call(func() (bool, error) {
response, err = f.srv.ListMultipartUploads(ctx, req)
return shouldRetry(ctx, response.HTTPResponse(), err)
})
if err != nil {
// fs.Debugf(f, "failed to list multi part uploads %v", err)
return uploads, err
}
for index, item := range response.Items {
if directory != "" && item.Object != nil && !strings.HasPrefix(*item.Object, directory) {
continue
}
uploads = append(uploads, &response.Items[index])
}
if response.OpcNextPage == nil {
break
}
req.Page = response.OpcNextPage
}
return uploads, nil
}

View File

@@ -0,0 +1,156 @@
//go:build !plan9 && !solaris && !js
// +build !plan9,!solaris,!js
package oracleobjectstorage
import (
"context"
"fmt"
"strings"
"time"
"github.com/oracle/oci-go-sdk/v65/common"
"github.com/oracle/oci-go-sdk/v65/objectstorage"
"github.com/rclone/rclone/fs"
)
// ------------------------------------------------------------
// Implement Copier is an optional interfaces for Fs
//------------------------------------------------------------
// Copy src to this remote using server-side copy operations.
// This is stored with the remote path given
// It returns the destination Object and a possible error
// Will only be called if src.Fs().Name() == f.Name()
// If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
// fs.Debugf(f, "copying %v to %v", src.Remote(), remote)
srcObj, ok := src.(*Object)
if !ok {
// fs.Debugf(src, "Can't copy - not same remote type")
return nil, fs.ErrorCantCopy
}
// Temporary Object under construction
dstObj := &Object{
fs: f,
remote: remote,
}
err := f.copy(ctx, dstObj, srcObj)
if err != nil {
return nil, err
}
return f.NewObject(ctx, remote)
}
// copy does a server-side copy from dstObj <- srcObj
//
// If newInfo is nil then the metadata will be copied otherwise it
// will be replaced with newInfo
func (f *Fs) copy(ctx context.Context, dstObj *Object, srcObj *Object) (err error) {
srcBucket, srcPath := srcObj.split()
dstBucket, dstPath := dstObj.split()
if dstBucket != srcBucket {
exists, err := f.bucketExists(ctx, dstBucket)
if err != nil {
return err
}
if !exists {
err = f.makeBucket(ctx, dstBucket)
if err != nil {
return err
}
}
}
copyObjectDetails := objectstorage.CopyObjectDetails{
SourceObjectName: common.String(srcPath),
DestinationRegion: common.String(dstObj.fs.opt.Region),
DestinationNamespace: common.String(dstObj.fs.opt.Namespace),
DestinationBucket: common.String(dstBucket),
DestinationObjectName: common.String(dstPath),
DestinationObjectMetadata: metadataWithOpcPrefix(srcObj.meta),
}
req := objectstorage.CopyObjectRequest{
NamespaceName: common.String(srcObj.fs.opt.Namespace),
BucketName: common.String(srcBucket),
CopyObjectDetails: copyObjectDetails,
}
useBYOKCopyObject(f, &req)
var resp objectstorage.CopyObjectResponse
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CopyObject(ctx, req)
return shouldRetry(ctx, resp.HTTPResponse(), err)
})
if err != nil {
return err
}
workRequestID := resp.OpcWorkRequestId
timeout := time.Duration(f.opt.CopyTimeout)
dstName := dstObj.String()
// https://docs.oracle.com/en-us/iaas/Content/Object/Tasks/copyingobjects.htm
// To enable server side copy object, customers will have to
// grant policy to objectstorage service to manage object-family
// Allow service objectstorage-<region_identifier> to manage object-family in tenancy
// Another option to avoid the policy is to download and reupload the file.
// This download upload will work for maximum file size limit of 5GB
err = copyObjectWaitForWorkRequest(ctx, workRequestID, dstName, timeout, f.srv)
if err != nil {
return err
}
return err
}
func copyObjectWaitForWorkRequest(ctx context.Context, wID *string, entityType string, timeout time.Duration,
client *objectstorage.ObjectStorageClient) error {
stateConf := &StateChangeConf{
Pending: []string{
string(objectstorage.WorkRequestStatusAccepted),
string(objectstorage.WorkRequestStatusInProgress),
string(objectstorage.WorkRequestStatusCanceling),
},
Target: []string{
string(objectstorage.WorkRequestSummaryStatusCompleted),
string(objectstorage.WorkRequestSummaryStatusCanceled),
string(objectstorage.WorkRequestStatusFailed),
},
Refresh: func() (interface{}, string, error) {
getWorkRequestRequest := objectstorage.GetWorkRequestRequest{}
getWorkRequestRequest.WorkRequestId = wID
workRequestResponse, err := client.GetWorkRequest(context.Background(), getWorkRequestRequest)
wr := &workRequestResponse.WorkRequest
return workRequestResponse, string(wr.Status), err
},
Timeout: timeout,
}
wrr, e := stateConf.WaitForStateContext(ctx, entityType)
if e != nil {
return fmt.Errorf("work request did not succeed, workId: %s, entity: %s. Message: %s", *wID, entityType, e)
}
wr := wrr.(objectstorage.GetWorkRequestResponse).WorkRequest
if wr.Status == objectstorage.WorkRequestStatusFailed {
errorMessage, _ := getObjectStorageErrorFromWorkRequest(ctx, wID, client)
return fmt.Errorf("work request did not succeed, workId: %s, entity: %s. Message: %s", *wID, entityType, errorMessage)
}
return nil
}
func getObjectStorageErrorFromWorkRequest(ctx context.Context, workRequestID *string, client *objectstorage.ObjectStorageClient) (string, error) {
req := objectstorage.ListWorkRequestErrorsRequest{}
req.WorkRequestId = workRequestID
res, err := client.ListWorkRequestErrors(ctx, req)
if err != nil {
return "", err
}
allErrs := make([]string, 0)
for _, errs := range res.Items {
allErrs = append(allErrs, *errs.Message)
}
errorMessage := strings.Join(allErrs, "\n")
return errorMessage, nil
}

Some files were not shown because too many files have changed in this diff Show More