1
0
mirror of https://github.com/rclone/rclone.git synced 2025-12-06 00:03:32 +00:00

Compare commits

...

421 Commits

Author SHA1 Message Date
Nick Craig-Wood
82b963e372 Version v1.65.0 2023-11-26 16:07:39 +00:00
Nick Craig-Wood
74d5477fad onedrive: add --onedrive-delta flag to enable ListR
Before this change ListR was unconditionally enabled on onedrive.

This caused performance problems for some uses, so now the
--onedrive-delta flag has to be supplied.

Fixes #7362
2023-11-26 16:06:49 +00:00
Nick Craig-Wood
b5857f0bf8 smb: fix modtime of multithread uploads by setting PartialUploads
Before this change PartialUploads was not set. This is clearly wrong
since incoming files are visible on the smb server.

Setting PartialUploads fixes the multithread upload modtime problem as
it uses the PartialUploads flag as an indication that it needs to set
the modtime explicitly.

This problem was detected by the new TestMultithreadCopy integration
tests

Fixes #7411
2023-11-25 18:46:48 +00:00
Nick Craig-Wood
edb5ccdd0b smb: fix about size wrong by switching to github.com/cloudsoda/go-smb2/ fork
Before this change smb drives sometimes showed a fraction of the
correct size using `rclone about`.

This fixes the problem by switching the upstream library from
github.com/hirochachacha/go-smb2 to github.com/cloudsoda/go-smb2 which
has a fix for the problem.

The new library passes the integration tests.

Fixes #6733
2023-11-25 18:45:41 +00:00
Nick Craig-Wood
0244caf13a serve s3: fix overwrite of files with 0 length file
Before this change overwriting an existing file with a 0 length file
didn't update the file size.

This change corrects the issue and makes sure the file is truncated
properly.

This was discovered by the full integration tests.
2023-11-24 20:47:06 +00:00
Nick Craig-Wood
aaa897337d serve s3: fix error handling for listing non-existent prefix - fixes #7455
Before this change serve s3 would return NoSuchKey errors when a non
existent prefix was listed.

This change fixes it to return an empty list like AWS does.

This was discovered by the full integration tests.
2023-11-24 20:47:06 +00:00
Nick Craig-Wood
e7c002adef test_all: make integration test for serve s3 2023-11-24 20:47:06 +00:00
Nick Craig-Wood
9e62a74a23 Add Abhinav Dhiman to contributors 2023-11-24 20:47:06 +00:00
Nick Craig-Wood
a10abf9934 Add 你知道未来吗 to contributors 2023-11-24 20:47:06 +00:00
Abhinav Dhiman
36eb3cd660 imagekit: Added ImageKit backend 2023-11-24 18:18:01 +00:00
你知道未来吗
fd2322cb41 fs/fshttp: fix --contimeout being ignored
The following command will block for 60s(default) when the network is slow or unavailable:

```
rclone  --contimeout 10s --low-level-retries 0 lsd dropbox:
```

This change will make it timeout after the expected 10s.

Signed-off-by: rkonfj <rkonfj@gmail.com>
2023-11-24 17:53:33 +00:00
Nick Craig-Wood
4eed3ae99a s3: ensure we can set upload cutoff that we use for Rclone provider
This is a workaround to make the new multipart upload integration
tests pass.
2023-11-24 16:32:06 +00:00
Nick Craig-Wood
d8855b21eb serve s3: document multipart copy doesn't work #7454
This puts in a workaround for the tests also
2023-11-24 15:49:33 +00:00
Nick Craig-Wood
8f47b6746d b2: fix streaming chunked files an exact multiple of chunk size
Before this change, streaming files an exact multiple of the chunk
size would cause rclone to attempt to stream a 0 sized chunk which was
rejected by the b2 servers.

This bug was noticed by the new integration tests for chunked streaming.
2023-11-24 14:32:01 +00:00
Nick Craig-Wood
cc2a4c2e20 fstest: factor chunked streaming tests from b2 and use in all backends 2023-11-24 12:58:40 +00:00
Nick Craig-Wood
fabeb8e44e b2: fix server side chunked copy when file size was exactly --b2-copy-cutoff
Before this change the b2 servers would complain as this was only a
single part transfer.

This was noticed by the new integration tests for server side chunked copy.
2023-11-24 12:37:11 +00:00
Nick Craig-Wood
c27977d4d5 fstest: factor chunked copy tests from b2 and use them in s3 and oos 2023-11-24 12:37:11 +00:00
Nick Craig-Wood
d5d28a7513 operations: fix overwrite of destination when multi-thread transfer fails
Before this change, if a multithread upload failed (let's say the
source became unavailable) rclone would finalise the file first before
aborting the transfer.

This caused the partial file to be written which would overwrite any
existing files.

This was fixed by making sure we Abort the transfer before Close-ing
it.

This updates the docs to encourage calling of Abort before Close and
updates writerAtChunkWriter to make sure that works properly.

This also reworks the tests to detect this and to make sure we upload
and download to each multi-thread capable backend (we were only
downloading before which isn't a full test).

Fixes #7071
2023-11-24 11:19:58 +00:00
Nick Craig-Wood
94ccc95515 random: stop using deprecated rand.Seed in go1.20 and later 2023-11-24 11:19:58 +00:00
Nick Craig-Wood
5d5473c8a5 random: speed up String function for generating larger blocks 2023-11-24 11:19:58 +00:00
Nick Craig-Wood
251a8e3c39 hash: allow runtime configuration of supported hashes for testing 2023-11-24 11:19:58 +00:00
Nick Craig-Wood
a259226eb2 Add Alen Šiljak to contributors 2023-11-24 11:19:58 +00:00
Alen Šiljak
5fba502516 http: enable methods used with WebDAV - fixes #7444
Without this, requests like PROPFIND, issued from a browser, fail.
2023-11-23 16:49:03 +00:00
Nick Craig-Wood
ba11040d6b s3: detect looping when using gcs and versions
Apparently gcs doesn't return an S3 compatible result when using
versions.

In particular it doesn't return a NextKeyMarker - this means rclone
loops and fetches the same page over and over again.

This patch detects the problem and stops the infinite retries but it
doesn't fix the underlying problem.

See: https://forum.rclone.org/t/list-s3-versions-files-looping-bug/42974
See: https://issuetracker.google.com/u/0/issues/312292516
2023-11-23 09:50:28 +00:00
Nick Craig-Wood
668711e432 dropbox: fix missing encoding for rclone purge again
This commit fixed the problem but made the integration tests fail.

33376bf399 dropbox: fix missing encoding for rclone purge

This fixes the problem properly by making sure we send the encoded or
non encoded root to the right places.
2023-11-21 12:23:28 +00:00
Nick Craig-Wood
a71d181cb0 test_all: limit the Zoho tests to just the backend
The free account has a very ungenerous 1000 api calls per day limit
and the full integration test suite breaches that so limit the
integration tests to just the backend.
2023-11-21 12:06:31 +00:00
Nick Craig-Wood
cab42107f7 test_all: remove uptobox from integration tests
The uptobox service hasn't running since 20 September 2023.

This removes it from the integration tests to save noise.
2023-11-21 11:49:39 +00:00
Nick Craig-Wood
1f9a79ef09 operations: use less memory when doing multithread uploads
For uploads which are coming from disk or going to disk or going to a
backend which doesn't need to seek except for retries this doesn't
buffer the input.

This dramatically reduces rclone's memory usage.

Fixes #7350
2023-11-20 18:07:05 +00:00
Nick Craig-Wood
c0fb9ebfce operations: make Open() return an io.ReadSeekCloser #7350
As part of reducing memory usage in rclone, we need to have a raw
handle to an object we can seek with.
2023-11-20 18:07:05 +00:00
Nick Craig-Wood
e8fcde8de1 fs: add ChunkWriterDoesntSeek feature flag and set it for b2 2023-11-20 18:07:05 +00:00
Nick Craig-Wood
72dfdd97d8 mockobject: fix SetUnknownSize method to obey parameter passed in 2023-11-20 18:07:05 +00:00
Nick Craig-Wood
bb88b8499b box: fix performance problem reading metadata for single files
Before this change the backend used to list the directory to find the
metadata for a single file. For lots of files in a directory this
caused a serious performance problem.

This change uses the preflight check to check for a files existence
and find its ID.

See: https://forum.rclone.org/t/psa-box-com-has-serious-performance-issues-in-directories-with-thousands-of-files/41128/10
See: https://forum.box.com/t/is-there-an-api-to-find-a-file-by-leaf-name-given-a-folder-id/997/
See: https://developer.box.com/guides/uploads/check/
2023-11-20 18:07:05 +00:00
Nick Craig-Wood
4ac5cb07ca gcs: fix 400 Bad request errors when using multi-thread copy
Before this change, on every Open, we added the userProject parameter
to the URL in the object.

This meant it grew and grew until Google returned Error 400 (Bad
Request) errors when the URL became too long.

This fixes the problem by adding the userProject parameter once.

See: https://forum.rclone.org/t/endlessly-repeating-userproject-parameter-in-get-to-google-storage-context-canceled-got-http-response-code-400/42652
2023-11-20 18:07:05 +00:00
Nick Craig-Wood
4a3e9bbabf http: implement set backend command to update running backend
See: https://forum.rclone.org/t/updating-the-url-of-http-remote-not-applied-on-mounts/42763
2023-11-20 18:07:05 +00:00
Nick Craig-Wood
33376bf399 dropbox: fix missing encoding for rclone purge
This was causing directories with encodable characters in not to be
found on purge.

See: https://forum.rclone.org/t/purge-command-does-not-work-on-directories-with-files/42793
2023-11-20 18:07:05 +00:00
asdffdsazqqq
94b7c49196 Update Docs to show SMB remote supports modtime.md 2023-11-20 17:50:28 +00:00
albertony
a7faf05393 docs: cleanup backend hashes sections 2023-11-20 17:43:57 +00:00
albertony
98a96596df docs: replace mod-time with modtime 2023-11-20 17:43:57 +00:00
Nick Craig-Wood
88bd80c1fa march: Fix excessive parallelism when using --no-traverse
When using `--no-traverse` the march routines call NewObject on each
potential object in the destination.

The concurrency limiter was accidentally arranged so that there were
`--checkers` * `--checkers` NewObject calls going on at once.

This became obvious when using the sftp backend which used too many
connections.

Fixes #5824
2023-11-20 17:36:31 +00:00
Nick Craig-Wood
c6755aa768 Add Mina Galić to contributors 2023-11-20 17:36:31 +00:00
Mina Galić
01be5c75be Makefile: use POSIX compatible install arguments
install -t doesn't exist on BSD.
flip the arguments since we only have one.
2023-11-20 15:01:26 +00:00
Jacob Hands
20bd17f107 install.sh: Clean up temp files in install script 2023-11-20 15:00:08 +00:00
Nick Craig-Wood
64ec5709fe drive: fix integration tests by enabling metadata support from the context
Before this change, the drive backend only used metadata if it was
created with Metadata enabled.

This patch changes it so the Metadata support is enabled dynamically
if it is set in the context.

This fixes the metadata tests in the integration tests which have been
changed to make sure Metadata is enabled.
2023-11-19 12:48:27 +00:00
Nick Craig-Wood
1ea8678be2 fstests: make sure Metadata is enabled in the context for metadata tests 2023-11-19 12:48:27 +00:00
Nick Craig-Wood
8341de05c6 Refresh CONTRIBUTING.md
- add dos and don'ts section to writing a new backend
- bring markdown up to modern style
2023-11-19 12:48:27 +00:00
Nick Craig-Wood
47ca0c326e fs: implement --metadata-mapper to transform metatadata with a user supplied program 2023-11-18 17:49:35 +00:00
Nick Craig-Wood
54196f34e3 drive: fix error updating created time metadata on existing object
Google drive doesn't allow the btime (created time) metadata to be
updated when updating an existing object.

This changes skips btime metadata if we are updating an existing
object but allows it otherwise.
2023-11-18 17:49:35 +00:00
Nick Craig-Wood
9fdf3d548a drive: add read/write metadata support
- fetch metadata with listings and fetch permissions in parallel
- only write permissions out if they are not inherited.
- make setting labels, owner and permissions work controlled by flags
    - `--drive-metadata-labels`, `--drive-metadata-owner`, `--drive-metadata-permissions`
2023-11-18 17:49:35 +00:00
Nick Craig-Wood
10774d297a Add moongdal to contributors 2023-11-18 17:49:35 +00:00
Nick Craig-Wood
bf9053705d Add viktor to contributors 2023-11-18 17:49:35 +00:00
Nick Craig-Wood
0bd059ec55 Add karan to contributors 2023-11-18 17:49:35 +00:00
Nick Craig-Wood
59d363b3c1 Add Oksana Zhykina to contributors 2023-11-18 17:49:35 +00:00
Nick Craig-Wood
94a5de58c8 linkbox: pre-merge fixes
- convert to directoryCache - makes backend much more efficient
- don't force --low-level-retries to 2
- don't wrap paced calls in pacer
- fix shouldRetry
- fix file list searching mechanism
2023-11-18 17:14:45 +00:00
viktor
a466ababd0 backend: add Linkbox backend
Add backend for linkbox.io with read and write capabilities

fixes #6960 #6629
2023-11-18 17:14:45 +00:00
Nick Craig-Wood
168d577297 vfs: error out early if can't upload 0 length file
Before this change if a backend can't upload 0 length files and
`--vfs-cache-mode writes` was in use then the writeback logic would
try to upload the 0 length file forever.

This change causes it to exit on the first failure to upload.
2023-11-18 17:14:45 +00:00
Nick Craig-Wood
ddaf01ece9 azurefiles: finish docs and implementation and add optional interfaces
- use rclone's http Transport
- fix handling of 0 length files
- combine into one file and remove uneeded abstraction
- make `chunk_size` and `upload_concurrency` settable
- make auth the same as azureblob
- set the Features correctly
- implement `--azurefiles-max-stream-size`
- remove arbitrary sleep on Mkdir
- implement `--header-upload`
- implement read and write MimeType for objects
- implement optional methods
    - About
    - Copy
    - DirMove
    - Move
    - OpenWriterAt
    - PutStream
- finish documentation
- disable build on plan9 and js

Fixes #365
Fixes #7378
2023-11-18 16:48:23 +00:00
karan
b5301e03a6 Implement Azure Files backend
Co-authored-by: moongdal <moongdal@tutanota.com>
2023-11-18 16:42:13 +00:00
Dimitri Papadopoulos
e9763552f7 fs: fix a typo in a comment 2023-11-16 17:15:00 +00:00
Oksana Zhykina
6b60e09ff2 quatrix: overwrite files on conflict during server-side move 2023-11-16 17:14:00 +00:00
Oksana Zhykina
41a52f50df quatrix: add partial upload support 2023-11-16 17:14:00 +00:00
Nick Craig-Wood
93f35c915a serve s3: pre-merge tweaks
- Changes
    - Rename `--s3-authkey` to `--auth-key` to get it out of the s3 backend namespace
    - Enable `Content-MD5` integrity checks
    - Remove locking after code audit
- Documentation
    - Factor out documentation into seperate file
    - Add Quickstart to docs
    - Add Bugs section to docs
    - Add experimental tag to docs
    - Add rclone provider to s3 backend docs
- Fixes
    - Correct quirks in s3 backend
    - Change fmt.Printlns into fs.Logs
    - Make metadata storage per backend not global
    - Log on startup if anonymous access is enabled
- Coding style fixes
    - rename fs to vfs to save confusion with the rest of rclone code
    - rename db to b for *s3Backend

Fixes #7062
2023-11-16 16:59:56 +00:00
Nick Craig-Wood
a2c4f07a57 Add Saw-jan to contributors 2023-11-16 16:59:56 +00:00
Saw-jan
d3dcc61154 serve s3: fixes before merge
- add context to log and fallthrough to error log level
- test: use rclone random lib to generate random strings
- calculate hash from vfs cache if file is uploading
- add server started log with server url
- remove md5 hasher
2023-11-16 16:59:56 +00:00
Nick Craig-Wood
34ef5147aa Add Artur Neumann to contributors 2023-11-16 16:59:56 +00:00
Artur Neumann
aa29742be2 serve s3: fix file name encoding using s3 serve with mc client
using the mc (minio) client file encoding were wrong
see Mikubill/gofakes3#2 for details
2023-11-16 16:59:56 +00:00
Nick Craig-Wood
ef366b47f1 Add Mikubill to contributors 2023-11-16 16:59:55 +00:00
Mikubill
23abac2a59 serve s3: let rclone act as an S3 compatible server 2023-11-16 16:59:55 +00:00
Nick Craig-Wood
d3ba32c43e s3: add --s3-disable-multipart-uploads flag 2023-11-16 16:59:55 +00:00
Nick Craig-Wood
cdf5a97bb6 bin/update_authors.py: add authors from Co-authored-by: lines too 2023-11-16 16:59:55 +00:00
albertony
e1b0417c28 size: dont show duplicate object count when less than 1k 2023-11-14 16:44:12 +00:00
Nick Craig-Wood
acf1e2df84 lib/file: fix MkdirAll after go1.21.4 stdlib update
In ths security related issue the go1.21.4 stdlib changed the parsing
of volume names on Windows.

https://github.com/golang/go/issues/63713

This had the consequences of breaking the MkdirAll tests which were
looking for specific error messages which changed and using invalid
paths.

In particular under go1.21.3:

    filepath.VolumeName(`\\?\C:`) == `\\?\C:`

But under go1.21.4 it is:

    filepath.VolumeName(`\\?\C:`) == `\\?`

The path `\\?\C:` isn't actually a valid Windows path. I reported this
as a FYI bug upstream - I'm not expecting it to be fixed.

See: https://github.com/golang/go/issues/64101
2023-11-14 09:47:46 +00:00
Nick Craig-Wood
831d1df67f docs: factor large docs into separate .md files to make them easier to maintain.
We then use the go embed command to embed them back into the binary.
2023-11-13 16:27:09 +00:00
Nick Craig-Wood
e67157cf46 Add Tayo-pasedaRJ to contributors 2023-11-13 16:27:09 +00:00
Nick Craig-Wood
ac012618db Add Adithya Kumar to contributors 2023-11-13 16:27:09 +00:00
Nick Craig-Wood
7f09d9c2a0 Add wuxingzhong to contributors 2023-11-13 16:27:09 +00:00
Tayo-pasedaRJ
0548e61910 hdfs: added support for list of namenodes in hdfs remote config
Users can now input a comma separated list of namenodes when writing
config for hdfs remotes.

This is required when you have multiple namenodes in your hdfs cluster
and cannot be certain which namenodes will be in 'standby' or 'active'
states.

This was available before but wasn't documented and didn't use the
correct rclone interfaces.
2023-11-13 15:55:52 +00:00
Adithya Kumar
ad83ff769b webdav: added an rclone vendor to work with rclone serve webdav
Fixes #7160
2023-11-05 12:37:25 +00:00
albertony
ca14b00b34 docs: show hashsum arguments as optional in usage string 2023-11-03 23:31:00 +01:00
albertony
52d444f4a9 docs: document how to build with version info and icon resources on windows 2023-11-01 12:44:04 +01:00
albertony
4506f35f2e build: refactor version info and icon resource handling on windows
This makes it easier to add resources with any build method, and also when
building librclone.dll.

Goversioninfo is now used as a library, instead of running it as a tool.
2023-11-01 12:44:04 +01:00
wuxingzhong
4ab57eb90b serve dnla: fix crash on graceful exit
Before this change, closing a uninitialised chan would cause a crash.
2023-10-31 16:44:25 +00:00
Nick Craig-Wood
23ab6fa3a0 operations: fix server side copies on partial upload backends after refactor
After the copy refactor:

179f978f75 operations: refactor Copy into methods on an temporary object

There was some confusion in the code about server side copies - should
they or shouldn't they use partials?

This manifested in unit test failures for remotes which supported
server side Copy and PartialUploads. This combination is rare and only
exists in the sftp backend with the --sftp-copy-is-hardlink flag.

This fix makes the choice that backends which set PartialUploads
always use partials even for server side copies.
2023-10-30 16:50:19 +00:00
Nick Craig-Wood
af8ba18580 mount: disable mount for freebsd
The upstream library rclone uses for rclone mount no longer supports
freebsd. Not only is it broken, but it no longer compiles.

This patch disables rclone mount for freebsd.

However all is not lost for freebsd users - compiling rclone with the
`cmount` tag, so `go install -tags cmount` will install a working
`rclone mount` command which uses cgofuse and the libfuse C library
directly.

Note that the binaries from rclone.org will not have mount support as
we don't have a freebsd build machine in CI and it is very hard to
cross compile cmount.

See: https://github.com/bazil/fuse/issues/280
Fixes #5843
2023-10-29 15:46:41 +00:00
Nick Craig-Wood
0b90dd23c1 build: update all dependencies 2023-10-29 15:46:38 +00:00
Nick Craig-Wood
e64be7652a operations: fix invalid UTF-8 when truncating file names when not using --inplace
Before this change, when not using --inplace, rclone could generate
invalid file names when truncating file names to fit within the
character size limits.

This fixes it by taking care to truncate on UTF-8 character
boundaries.

See: https://forum.rclone.org/t/ssh-fx-failure-when-copying-file-with-nonstandard-characters-to-sftp-remote-with-ntfs-drive/42560/
2023-10-29 14:04:37 +00:00
Nick Craig-Wood
179f978f75 operations: refactor Copy into methods on an temporary object
operations.Copy had become very unwieldy. This refactors it into
methods on a copy object which is created for the duration of the
copy. This makes it much easier to read and reason about.
2023-10-29 14:04:37 +00:00
Nick Craig-Wood
17b7ee1f3a operations: factor Copy into its own file 2023-10-29 14:04:37 +00:00
dependabot[bot]
5c73363b16 build(deps): bump google.golang.org/grpc from 1.56.2 to 1.56.3
Bumps [google.golang.org/grpc](https://github.com/grpc/grpc-go) from 1.56.2 to 1.56.3.
- [Release notes](https://github.com/grpc/grpc-go/releases)
- [Commits](https://github.com/grpc/grpc-go/compare/v1.56.2...v1.56.3)

---
updated-dependencies:
- dependency-name: google.golang.org/grpc
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>
2023-10-28 18:31:35 +01:00
Nick Craig-Wood
bf21db0ac4 b2: fix multi-thread upload with copyto going to wrong name
See: https://forum.rclone.org/t/errors-and-failure-with-big-file-upload-to-b2/42522/
2023-10-28 15:18:00 +01:00
Nick Craig-Wood
0180301b3f fstests: add integration test for OpenChunkWriter uploading to the wrong name 2023-10-28 15:18:00 +01:00
Nick Craig-Wood
adfb1f7c7d b2: fix error handler to remove confusing DEBUG messages
On a 404 error, b2 returns an empty body which, before this change,
caused the error handler to try to parse an empty string and give the
following DEBUG message:

    Couldn't decode error response: EOF

This is confusing as it is expected in normal operations and isn't an
error.

This change reads the body of an error response first then tries to
decode it only if it isn't empty, which avoids the confusing DEBUG
message.

This also upgrades failure to read the body or failure to decode the
JSON to ERROR messages as now we are certain that we should have
something to read and decode.
2023-10-28 15:18:00 +01:00
Nick Craig-Wood
6092fe2aaa s3: emit a debug message if anonymous credentials are in use
This can indicate the user is expecting `env_auth=true` to be the
default so we say that in the debug message.

See: https://forum.rclone.org/t/rclone-with-amazon-s3-access-point/42411
2023-10-27 16:00:47 +01:00
Nick Craig-Wood
53868ef4e1 ncdu: fix crash when re-entering changed directory after rescan
ncdu stores the position that it was in for each directory. However
doing a rescan can cause those positions to be out of range if the
number of files decreased in a directory. When re-entering the
directory, this causes an index out of range error.

This fixes the problem by detecting the index out of range and
flushing the saved directory position.

See: https://forum.rclone.org/t/slice-bounds-out-of-range-during-ncdu/42492/
2023-10-24 14:26:57 +01:00
Nick Craig-Wood
e1ad467009 fs: fix docs for Bits 2023-10-23 15:43:55 +01:00
Nick Craig-Wood
12db7b6935 fs: add IsSet convenience method to Bits 2023-10-23 15:43:42 +01:00
Nick Craig-Wood
7434ad8618 docs: remove third party logos from source tree 2023-10-23 15:35:25 +01:00
Nick Craig-Wood
e4ab59bcc7 docs: update Storj image and link 2023-10-23 15:35:25 +01:00
Nick Craig-Wood
9119c6c76f Add alfish2000 to contributors 2023-10-23 15:35:25 +01:00
alfish2000
9d4d294793 union: fix documentation 2023-10-21 10:37:43 +01:00
Nick Craig-Wood
750ed556a5 build: fix new lint errors with golangci-lint v1.55.0 2023-10-20 18:53:30 +01:00
Nick Craig-Wood
5b0d3d060f selfupdate: make sure we don't run tests if selfupdate is set 2023-10-20 18:14:27 +01:00
Nick Craig-Wood
5b0f9dc4e3 local: fix copying from Windows Volume Shadows
For some files the Windows Volume Shadow Service (VSS) advertises the
file size as X in the directory listing but returns a different number
Y on stat-ing the file. If the file is opened and read there are Y
bytes available for reading.

Existing copy tools copy Y bytes rather than X so for consistency
rclone should do the same.

This fixes the problem by stat-ing the file immediately before opening
it. This will also reduce the unnecessary occurrence of "can't copy -
source file is being updated" errors; if the file has finished
changing by the time we come to copy it then we now can copy it
successfully.

See: https://forum.rclone.org/t/consistently-getting-corrupted-on-transfer-sizes-differ-syncing-to-an-smb-share/42218/
2023-10-19 16:38:10 +01:00
Nick Craig-Wood
b0a87d7cf1 Changelog updates from Version 1.64.2 2023-10-19 12:34:34 +01:00
Nick Craig-Wood
37d786c82a selfupdate: fix "invalid hashsum signature" error
This was caused by a change to the upstream library
ProtonMail/go-crypto checking the flags on the keys more strictly.

However the signing key for rclone is very old and does not have those
flags. Adding those flags using `gpg --edit-key` and then the
`change-usage` subcommand to remove, save, quite then re-add, save
quit the signing capabilities caused the key to work.

This also adds tests for the verification and adds the selfupdate
tests into the integration test harness as they had been disabled on
CI because they rely on external sources and are sometimes unreliable.

Fixes #7373
2023-10-18 17:55:19 +01:00
Nick Craig-Wood
56fe12c479 build: add the serve docker tests to the integration tester
These had been disabled on CI for being unreliable, so test them in
the integration tests framework which will retry them.
2023-10-18 17:55:19 +01:00
Nick Craig-Wood
9197180610 build: fix docker build running out of space
This removes some unused SDKs from the build machine to free some
space up before building. It also adds some lines to show the free
space.
2023-10-18 17:55:19 +01:00
Nick Craig-Wood
f4a538371d Add Ivan Yanitra to contributors 2023-10-18 17:55:10 +01:00
Nick Craig-Wood
f2ec08cba2 Add Keigo Imai to contributors 2023-10-18 17:55:10 +01:00
Nick Craig-Wood
8f25531b7f Add Gabriel Espinoza to contributors 2023-10-18 17:55:10 +01:00
Ivan Yanitra
0ee6d0b4bf azureblob: add support cold tier 2023-10-18 17:54:25 +01:00
Keigo Imai
4ac4597afb drive: add a note that --drive-scope accepts comma-separated list of scopes 2023-10-18 17:54:08 +01:00
Joda Stößer
143df6f6d2 docs: change authors email for SimJoSt 2023-10-18 16:31:15 +01:00
Nick Craig-Wood
8264ba987b Changelog updates from Version 1.64.1 2023-10-17 18:37:04 +01:00
Gabriel Espinoza
7a27d9a192 lib/http: export basic go strings functions
makes the following go strings functions available to be used in custom templates; contains, hasPrefix, hasSuffix

added documentation for exported funcs
2023-10-16 19:46:19 +01:00
albertony
195ad98311 docs: update documentation for --fast-list adding info about ListR 2023-10-16 18:11:22 +02:00
Nick Craig-Wood
29baa5888f mount: fix automount not detecting drive is ready
With automount the target mount drive appears twice in /proc/self/mountinfo.

    379 27 0:70 / /mnt/rclone rw,relatime shared:433 - autofs systemd-1 rw,fd=57,...
    566 379 0:90 / /mnt/rclone rw,nosuid,nodev,relatime shared:488 - fuse.rclone remote: rw,...

Before this fix we only looked for the mount once in
/proc/self/mountinfo. It finds the automount line and since this
doesn't have fs type rclone it concludes the mount isn't ready yet.

This patch makes rclone look through all the mounts and if any of them
have fs type rclone it concludes the mount is ready.

See: https://forum.rclone.org/t/systemd-mount-works-but-automount-does-not/42287/
2023-10-16 12:13:20 +01:00
Nick Craig-Wood
c7a2719fac sftp: implement --sftp-copy-is-hardlink to server side copy as hardlink
If the server does not support hardlinks then it falls back to normal
copy.

See: https://forum.rclone.org/t/sftp-remote-server-side-copy/41867
2023-10-16 12:08:22 +01:00
Nick Craig-Wood
c190b9b14f serve sftp: return not supported error for not supported commands
Before this change, if a hardlink command was issued, rclone would
just ignore it and not return an error.

This changes any unknown operations (including hardlink) to return an
unsupported error.
2023-10-16 12:08:22 +01:00
Nick Craig-Wood
5fa68e9ca5 b2: fix chunked streaming uploads
Streaming uploads are used by rclone rcat and rclone mount
--vfs-cache-mode off.

After the multipart chunker refactor the multipart chunked streaming
upload was accidentally mixing the first and the second parts up which
was causing corrupted uploads.

This was caused by a simple off by one error in the refactoring where
we went from 1 based part number counting to 0 based part number
counting.

Fixing this revealed that the metadata wasn't being re-read for the
copied object either.

This fixes both of those issues and adds an integration tests so it
won't happen again.

Fixes #7367
2023-10-13 15:46:36 +01:00
Nick Craig-Wood
b9727cc6ab build: upgrade golang.org/x/net to v0.17.0 to fix HTTP/2 rapid reset
Vulnerability1: GO-2023-2102

HTTP/2 rapid reset can cause excessive work in net/http

More info: https://pkg.go.dev/vuln/GO-2023-2102
2023-10-12 17:44:16 +01:00
Nick Craig-Wood
d8d76ff647 b2: fix server side copies greater than 4GB
After the multipart chunker refactor the multipart chunked server side
copy was accidentally sending one part too many. The last part was 0
length which was rejected by b2.

This was caused by a simple off by one error in the refactoring where
we went from 1 based part number counting to 0 based part number
counting.

Fixing this revealed that the metadata wasn't being re-read for the
copied object either.

This fixes both of those issues and adds an integration tests so it
won't happen again.

See: https://forum.rclone.org/t/large-server-side-copy-in-b2-fails-due-to-bad-byte-range/42294
2023-10-12 11:19:56 +01:00
Nick Craig-Wood
5afa838457 cmd: Make --progress output logs in the same format as without
See: https://forum.rclone.org/t/using-progress-change-dates-from-2023-10-05-to-2023-10-05/42173
2023-10-11 11:36:31 +01:00
Nick Craig-Wood
2de084944b operations: fix error message on delete to have file name - fixes #7355 2023-10-11 11:34:11 +01:00
Vitor Gomes
48a8bfa6b3 operations: fix OpenOptions ignored in copy if operation was a multiThreadCopy 2023-10-11 11:19:03 +01:00
Nick Craig-Wood
d3ce795c30 build: fix docker beta build running out of space
This removes some unused SDKs from the build machine to free some
space up before building. It also adds some lines to show the free
space.
2023-10-10 15:59:07 +01:00
Nick Craig-Wood
c04657cd4c Add Volodymyr to contributors 2023-10-10 15:59:07 +01:00
Volodymyr
6255d9dfaa operations: implement --partial-suffix to control extension of temporary file names 2023-10-10 12:27:32 +01:00
Nick Craig-Wood
f56ea2bee2 s3: fix no error being returned when creating a bucket we don't own
Before this change if you tried to create a bucket that already
existed, but someone else owned then rclone did not return an error.

This now will return an error on providers that return the
AlreadyOwnedByYou error code or no error on bucket creation of an
existing bucket owned by you.

This introduces a new provider quirk and this has been set or cleared
for as many providers as can be tested. This can be overridden by the
--s3-use-already-exists flag.

Fixes #7351
2023-10-09 18:15:02 +01:00
Nick Craig-Wood
d6ba60c04d oracleobjectstorage: fix OpenOptions being ignored in uploadMultipart with chunkWriter 2023-10-09 17:13:42 +01:00
Vitor Gomes
37eaa3682a s3: fix OpenOptions being ignored in uploadMultipart with chunkWriter 2023-10-09 17:12:56 +01:00
Nick Craig-Wood
c5f6fc3283 drive: add --drive-show-all-gdocs to allow unexportable gdocs to be server side copied
Before this change, attempting to server side copy a google form would
give this error

    No export formats found for "application/vnd.google-apps.form"

Adding this flag allows the form to be server side copied but not
downloaded.

Fixes #6302
2023-10-09 16:53:03 +01:00
Nick Craig-Wood
4daf755da0 Add Saleh Dindar to contributors 2023-10-09 16:53:03 +01:00
Nick Craig-Wood
eee8ad5146 Add Beyond Meat to contributors 2023-10-09 16:53:03 +01:00
Saleh Dindar
bcb3289dad nfsmount: documentation for new NFS mount feature for macOS 2023-10-06 14:08:20 +01:00
Saleh Dindar
ef2ef8ef84 nfsmount: New mount command to provide mount mechanism on macOS without FUSE
Summary:
In cases where cmount is not available in macOS, we alias nfsmount to mount command and transparently start the NFS server and mount it to the target dir.

The NFS server is started on localhost on a random port so it is reasonably secure.

Test Plan:
```
go run rclone.go mount --http-url https://beta.rclone.org :http: nfs-test
```

Added mount tests:
```
go test ./cmd/nfsmount
```
2023-10-06 14:08:20 +01:00
Saleh Dindar
c69cf46f06 serve nfs: new serve nfs command
Summary:
Adding a new command to serve any remote over NFS. This is only useful for new macOS versions where FUSE mounts are not available.
 * Added willscot/go-nfs dependency and updated go.mod and go.sum

Test Plan:
```
go run rclone.go serve nfs --http-url https://beta.rclone.org :http:
```

Test that it is serving correctly by mounting the NFS directory.

```
mkdir nfs-test
mount -oport=58654,mountport=58654 localhost: nfs-test
```

Then we can list the mounted directory to see it is working.
```
ls nfs-test
```
2023-10-06 14:08:20 +01:00
Saleh Dindar
25f59b2918 vfs: Add go-billy dependency and make sure vfs.Handle implements billy.File
billy defines a common file system interface that is used in multiple go packages.
vfs.Handle implements billy.File mostly, only two methods needed to be added to
make it compliant.

An interface check is added as well.

This is a preliminary work for adding serve nfs command.
2023-10-06 14:08:20 +01:00
Saleh Dindar
7801b160f2 vfs: [bugfix] Update dir modification time
A subtle bug where dir modification time is not updated when the dir already exists
in the cache. It is only noticeable when some clients use dir modification time to
invalidate cache.
2023-10-06 14:08:20 +01:00
Saleh Dindar
23f8dea182 vfs: [bugfix] Implement Name() method in WriteFileHandle and ReadFileHandle
Name() method was originally left out and defaulted to the base
class which always returns empty. This trigerred incorrect behavior
in serve nfs where it relied on the Name() of the interafce to figure
out what file it was modifying.

This method is copied from RWFileHandle struct.

Added extra assert in the tests.
2023-10-06 14:08:20 +01:00
Beyond Meat
3337fe31c7 vfs: add --vfs-refresh flag to read all the directories on start
Refreshes the directory listing recursively at VFS start time.
2023-10-06 13:11:09 +01:00
Nick Craig-Wood
a752563842 operations: add operations/check to the rc API
Fixes #7015
2023-10-04 17:52:57 +01:00
Nick Craig-Wood
c12085b265 operations: close file in TestUploadFile test so it can be deleted on Windows 2023-10-04 17:52:57 +01:00
Nick Craig-Wood
3ab9077820 googlephotos: implement batcher for uploads - fixes #6920 2023-10-03 18:01:34 +01:00
Nick Craig-Wood
b94806a143 dropbox: factor batcher into lib/batcher 2023-10-03 18:01:34 +01:00
Nick Craig-Wood
55d10f4d25 fs: re-implement DumpMode with Bits
This almost 100% backwards compatible. The only difference being that
in the rc options/get output DumpMode will be output as strings
instead of integers. This is a lot more convenient for the user. They
still accept integer inputs though so the fallout from this should be
minimal.
2023-10-03 15:24:09 +01:00
Nick Craig-Wood
75745fcb21 fs: create fs.Bits for easy creation of parameters from a bitset of choices 2023-10-03 15:24:09 +01:00
Nick Craig-Wood
1cc22da87d vfs: re-implement CacheMode with fs.Enum
This almost 100% backwards compatible. The only difference being that
in the rc options/get output CacheMode will be output as strings
instead of integers. This is a lot more convenient for the user. They
still accept integer inputs though so the fallout from this should be
minimal.
2023-10-03 15:14:24 +01:00
Nick Craig-Wood
3092f82dcc fs: re-implement CutoffMode, LogLevel, TerminalColorMode with Enum
This almost 100% backwards compatible. The only difference being that
in the rc options/get output CutoffMode, LogLevel, TerminalColorMode
will be output as strings instead of integers. This is a lot more
convenient for the user. They still accept integer inputs though so
the fallout from this should be minimal.
2023-10-03 15:14:24 +01:00
Nick Craig-Wood
60a6ef914c fs: create fs.Enum for easy creation of parameters from a list of choices 2023-10-03 15:14:24 +01:00
Nick Craig-Wood
3553cc4a5f fs: fix option types printing incorrectly for backend flags
Before this change backend types were printing incorrectly as the name
of the type, not what was defined by the Type() method.

This was not working due to not calling the Type() method. However
this needed to be defined on a non-pointer type due to the way the
options are handled.
2023-10-03 11:23:58 +01:00
Nick Craig-Wood
b8591b230d onedrive: implement ListR method which gives --fast-list support
This implents ListR for onedrive. The API only allows doing this at
the root so it is inefficient to use it not at the root.

Fixes #7317
2023-10-02 11:12:08 +01:00
Nick Craig-Wood
ecb09badba onedrive: factor API types back into correct file 2023-10-02 10:48:06 +01:00
Nick Craig-Wood
cb43e86d16 b2: reduce default --b2-upload-concurrency to 4 to reduce memory usage
In v1.63 memory usage in the b2 backend was limited to `--transfers` *
`--b2-chunk-size`

However in v1.64 this was raised to `--transfers` * `--b2-chunk-size`
* `--b2-upload-concurrency`.

The default value for this was accidently set quite high at 16 which
means by default rclone could use up to 6.4GB of memory!

The new default sets a more reasonable (but still high) max memory of 1.6GB.
2023-10-01 12:30:26 +01:00
Nick Craig-Wood
5c48102ede b2: fix locking window when getting mutipart upload URL
Before this change, the lock was held while the upload URL was being
fetched from the server.

This meant that any other threads were blocked from getting upload
URLs unecessarily.

It also increased the potential for deadlock.
2023-10-01 12:30:26 +01:00
Nick Craig-Wood
96438ff259 pacer: fix b2 deadlock by defaulting max connections to unlimited
Before this change, the maximum number of connections was set to 10.

This means that b2 could deadlock while uploading multipart uploads
due to a lock being held longer than it should have been.
2023-10-01 12:30:26 +01:00
albertony
c1df3ce08c docs: add utime (time of file upload) to standard system metadata 2023-09-29 13:19:57 +02:00
albertony
19ad39fa1c jottacloud: add support for reading and writing metadata
Most useful is the addition of the file created timestamp, but also a timestamp for
when the file was uploaded.

Currently supporting a rather minimalistic set of metadata items, see PR #6359 for
some thoughts about possible extensions.
2023-09-29 13:19:57 +02:00
Nick Craig-Wood
b296f37801 s3: fix slice bounds out of range error when listing
In this commit:

5f938fb9ed s3: fix "Entry doesn't belong in directory" errors when using directory markers

We checked that the remote has the prefix and then changed the remote
before removing the prefix. This sometimes causes:

    panic: runtime error: slice bounds out of range [56:55]

The fix is to do the modification of the remote after removing the
prefix.

See: https://forum.rclone.org/t/cryptcheck-panic-runtime-error-slice-bounds-out-of-range/41977
2023-09-25 11:52:23 +01:00
Nick Craig-Wood
23e44c6065 Add rinsuki to contributors 2023-09-25 11:52:23 +01:00
rinsuki
8fd66daab6 drive: add support of SHA-1 and SHA-256 checksum 2023-09-24 17:38:30 +01:00
Nick Craig-Wood
9e80d48b03 s3: add docs on how to add a new provider 2023-09-23 14:36:48 +01:00
Nick Craig-Wood
eb3082a1eb s3: add Linode provider 2023-09-23 14:34:00 +01:00
Nick Craig-Wood
77ea22ac5b s3: Factor providers list out and auto generate textual version 2023-09-23 14:34:00 +01:00
Nick Craig-Wood
9959712a06 docs: fix backend doc generator to not output duplicate config names
This was always the intention, it was just implemented wrong.

This shortens the s3 docs by 1369 bringing them down to half the size
just about.

Fixes #7325
2023-09-23 12:54:08 +01:00
Nick Craig-Wood
7586fecbca Add Nikita Shoshin to contributors 2023-09-23 12:54:08 +01:00
Nikita Shoshin
94cdb00eb6 rcserver: set Last-Modified header for files served by --rc-serve 2023-09-23 12:20:29 +01:00
Dimitri Papadopoulos Orfanos
3d473eb54e docs: fix typos found by codespell in docs and code comments 2023-09-23 12:20:01 +01:00
Nick Craig-Wood
50b4a2398e onedrive: fix the configurator to allow /teams/ID in the config
See: https://forum.rclone.org/t/sharepoint-to-google/41548/
2023-09-22 15:54:22 +01:00
Nick Craig-Wood
e6b718c938 build: add btesth target to output beta log in HTML for email pasting 2023-09-21 16:15:48 +01:00
Nick Craig-Wood
9370dbcc47 lsjson: make sure we set the global metadata flag too 2023-09-21 16:15:38 +01:00
Nick Craig-Wood
8c1e9a2905 rc: always report an error as JSON
Before this change, the rclone rc command wouldn't actually report the
error as a JSON blob which is inconsitent with what the HTTP API does.

This change make sure we always report a JSON error, making a
synthetic one if necessary.

See: https://forum.rclone.org/t/when-using-rclone-rc-commands-somehow-return-errors-as-parsable-json/41855
Co-authored-by: Fawzib Rojas
2023-09-20 21:57:40 +01:00
Nick Craig-Wood
6072d314e1 b2: fix multipart upload: corrupted on transfer: sizes differ XXX vs 0
Before this change the b2 backend wasn't writing the metadata to the
object properly after a multipart upload.

The symptom of this was that sometimes it would give the error:

    corrupted on transfer: sizes differ XXX vs 0

This was fixed by returning the metadata in the chunk writer and setting it in Update.

See: https://forum.rclone.org/t/multipart-upload-to-b2-sometimes-failing-with-corrupted-on-transfer-sizes-differ/41829
2023-09-18 20:41:31 +01:00
Nick Craig-Wood
9277ca1e54 b2: implement --b2-lifecycle to control lifecycle when creating buckets 2023-09-16 17:01:43 +01:00
Nick Craig-Wood
d6722607cb b2: implement "rclone backend lifecycle" to read and set bucket lifecycles 2023-09-16 16:44:28 +01:00
Nick Craig-Wood
4ef30db209 b2: fix listing all buckets when not needed
Before this change the b2 backend listed all the buckets to turn a
single bucket name into an ID.

However in July 26, 2018 a parameter was added to the list buckets API
to make listing all the buckets unecessary.

This code sets the bucketName parameter so that only the results for
the desired bucket are returned.
2023-09-16 16:04:50 +01:00
Nick Craig-Wood
55c12c9a2d azureblob: fix "fatal error: concurrent map writes"
Before this change, the metadata map could be accessed from multiple
goroutines at once, sometimes causing this error.

This fix adds a global mutex for adjusting the metadata map to make
all accesses safe.

See: https://forum.rclone.org/t/azure-blob-storage-with-vfs-cache-concurrent-map-writes-exception/41686
2023-09-16 11:33:03 +01:00
dependabot[bot]
4349dae784 build(deps): bump docker/setup-qemu-action from 2 to 3
Bumps [docker/setup-qemu-action](https://github.com/docker/setup-qemu-action) from 2 to 3.
- [Release notes](https://github.com/docker/setup-qemu-action/releases)
- [Commits](https://github.com/docker/setup-qemu-action/compare/v2...v3)

---
updated-dependencies:
- dependency-name: docker/setup-qemu-action
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
2023-09-15 10:25:14 +01:00
David Sze
3e63f2c249 box: add more logging for polling 2023-09-15 10:24:43 +01:00
David Sze
5118ab9609 box: filter more EventIDs when polling 2023-09-15 10:24:43 +01:00
dependabot[bot]
7ea118aeae build(deps): bump docker/setup-buildx-action from 2 to 3
Bumps [docker/setup-buildx-action](https://github.com/docker/setup-buildx-action) from 2 to 3.
- [Release notes](https://github.com/docker/setup-buildx-action/releases)
- [Commits](https://github.com/docker/setup-buildx-action/compare/v2...v3)

---
updated-dependencies:
- dependency-name: docker/setup-buildx-action
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
2023-09-15 09:15:26 +01:00
Kaloyan Raev
af260921c0 storj: update storj.io/uplink to v1.12.0
The improved upload logic is active by default in uplink v1.12.0, so the
`testuplink.WithConcurrentSegmentUploadsDefaultConfig(ctx)` is not
required anymore.

See https://github.com/rclone/rclone/pull/7198
2023-09-14 14:01:35 +01:00
Nick Craig-Wood
62db2bb329 docs: add notes on how to update the website between releases 2023-09-13 17:02:29 +01:00
Nick Craig-Wood
8d58adbd54 docs: remove minio sponsor box for the moment 2023-09-13 17:02:29 +01:00
Nick Craig-Wood
b4d251081f docs: update Storj partner link 2023-09-13 17:02:29 +01:00
Nick Craig-Wood
a382bf8f03 Add Herby Gillot to contributors 2023-09-13 17:02:29 +01:00
Nick Craig-Wood
28fc43fb11 Add Pat Patterson to contributors 2023-09-13 17:02:29 +01:00
Herby Gillot
83be1501db docs: add MacPorts install info
https://ports.macports.org/port/rclone/
2023-09-13 16:06:15 +01:00
dependabot[bot]
be156133c5 build(deps): bump docker/metadata-action from 4 to 5
Bumps [docker/metadata-action](https://github.com/docker/metadata-action) from 4 to 5.
- [Release notes](https://github.com/docker/metadata-action/releases)
- [Upgrade guide](https://github.com/docker/metadata-action/blob/master/UPGRADE.md)
- [Commits](https://github.com/docker/metadata-action/compare/v4...v5)

---
updated-dependencies:
- dependency-name: docker/metadata-action
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
2023-09-13 15:56:38 +01:00
dependabot[bot]
d494db78d9 build(deps): bump docker/login-action from 2 to 3
Bumps [docker/login-action](https://github.com/docker/login-action) from 2 to 3.
- [Release notes](https://github.com/docker/login-action/releases)
- [Commits](https://github.com/docker/login-action/compare/v2...v3)

---
updated-dependencies:
- dependency-name: docker/login-action
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
2023-09-13 15:55:58 +01:00
dependabot[bot]
1b9eb74204 build(deps): bump docker/build-push-action from 4 to 5
Bumps [docker/build-push-action](https://github.com/docker/build-push-action) from 4 to 5.
- [Release notes](https://github.com/docker/build-push-action/releases)
- [Commits](https://github.com/docker/build-push-action/compare/v4...v5)

---
updated-dependencies:
- dependency-name: docker/build-push-action
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
2023-09-13 15:55:23 +01:00
Manoj Ghosh
cda09704a8 fix overview of oracle object storage as it supports multithreaded 2023-09-13 12:14:07 +01:00
Pat Patterson
71f2883562 operations: ensure concurrency is no greater than the number of chunks - fixes #7299 2023-09-13 12:12:27 +01:00
Nick Craig-Wood
d29d263329 docs: fix minimum Go version and update to 1.18
Fixes #7288
2023-09-12 11:24:17 +01:00
Nick Craig-Wood
20126de1aa Start v1.65.0-DEV development 2023-09-12 11:24:17 +01:00
Nick Craig-Wood
77f7bb08af Version v1.64.0 2023-09-11 15:59:44 +01:00
Nick Craig-Wood
a5a61f4874 protondrive: make cached keys rclone style and not show with rclone config redacted 2023-09-11 15:57:08 +01:00
Nick Craig-Wood
e8879f3e77 docs: document release signing and verification
Fixes #7257
2023-09-11 12:28:23 +01:00
Nick Craig-Wood
2a6675cffd docs: fix typo in rc docs - fixes #7287 2023-09-11 09:34:22 +01:00
Nick Craig-Wood
fa4d171f62 protondrive: complete docs with all references to Proton Drive 2023-09-10 11:57:39 +01:00
Nick Craig-Wood
d4d530bd8e drive: add --drive-fast-list-bug-fix to control ListR bug workaround
See: https://forum.rclone.org/t/how-to-list-empty-directories-recursively/40995/12
2023-09-09 17:46:03 +01:00
Nick Craig-Wood
f4b011e4e4 s3: add rclone backend restore-status command
This command shows the restore status of objects being retrieved from GLACIER.

See: https://forum.rclone.org/t/aws-s3-glacier-monitor-restore-status-command-for-glacier-restoring-process/41373/7
2023-09-09 17:44:36 +01:00
Nick Craig-Wood
d80890bf32 Add Drew Stinnett to contributors 2023-09-09 17:44:36 +01:00
Nick Craig-Wood
39392d70dd Add David Pedersen to contributors 2023-09-09 17:44:36 +01:00
Drew Stinnett
643386f026 rc: Add operations/settier to API 2023-09-09 17:41:02 +01:00
Chun-Hung Tseng
ed755bf04f protondrive: implement two-password mode (#7279) 2023-09-08 22:54:46 +02:00
David Pedersen
071c3f28e5 vfs: Update parent directory modtimes on vfs actions
This isn't written back to the storage so might change on remount but
makes the VFS just a little more POSIX compatible.
2023-09-08 17:19:52 +01:00
Nick Craig-Wood
7453b7d5f3 hdfs: fix retry "replication in progress" errors when uploading
Before this change uploaded files could return the error "replication
in progress".

This error is harmless though and means the Close should be retried
which is what this patch does.
2023-09-08 15:35:50 +01:00
Nick Craig-Wood
c9350149d8 hdfs: fix uploading to the wrong object on Update with overriden remote name
In this commit we discovered a problem with objects being uploaded to
the incorrect object name. It added an integration test for the
problem.

65b2e378e0 drive: fix incorrect remote after Update on object

This test was tripped by the hdfs backend and this patch fixes the
problem.
2023-09-08 15:35:50 +01:00
Nick Craig-Wood
08789a5815 test_all: remove filefabric from integration tests
The filefabric test server doesn't seem to be working at all so remove
it from the integration tests pending revitalization.
2023-09-08 15:35:50 +01:00
Nick Craig-Wood
4037af9c1a Add Oksana and Volodymyr Kit to contributors 2023-09-08 15:35:50 +01:00
Oksana
628ff8e524 quatrix: add backend to support Quatrix
Co-authored-by: Volodymyr Kit <v.kit@maytech.net>
2023-09-08 14:31:29 +01:00
Chun-Hung Tseng
578c75cb1e protondrive: fix signature verification logic by accounting for legacy signing scheme (#7278) 2023-09-08 16:00:34 +08:00
Nick Craig-Wood
63ab250817 vfs: add --vfs-cache-min-free-space to control minimum free space on the disk containing the cache
See: https://forum.rclone.org/t/rclone-fails-to-control-disk-usage-and-its-filling-the-disk-to-100/41494/
2023-09-07 15:57:45 +01:00
Nick Craig-Wood
39f910a65d rc: add core/du to measure local disk usage 2023-09-07 15:57:45 +01:00
Nick Craig-Wood
0fb36562dd Add lib/diskusage to measure used/free on disks 2023-09-07 15:57:45 +01:00
Nick Craig-Wood
8c25a15a40 Add zjx20 to contributors 2023-09-07 15:57:45 +01:00
zjx20
f5ee16e201 local: rmdir return an error if the path is not a dir 2023-09-07 14:30:08 +01:00
Nick Craig-Wood
2bcbed30bd s3: implement backend set command to update running config 2023-09-07 12:26:48 +01:00
Chun-Hung Tseng
5026a9171d protondrive: improves 2fa and draft error messages (#7280) 2023-09-07 01:50:28 +08:00
Nick Craig-Wood
b750c50bfd zoho: remove Range requests workarounds to fix integration tests
Zoho are now responding to Range requests properly. The remnants of
our old workaround was breaking the integration tests so this removes
them.
2023-09-05 18:21:15 +01:00
Nick Craig-Wood
535acd0483 fstests: fix PublicLink failing on storj
Storj requires a minimum duration of 1 minute for the link expiry so
increase what we are asking for from 1 minute to 2 minutes.
2023-09-05 18:01:37 +01:00
Nick Craig-Wood
db37b3ef9e opendrive: fix List on a just deleted and remade directory
Sometimes opendrive reports "403 Folder is already deleted" on
directories which should exist.

This might be a bug in opendrive or in rclone however we work-around
here sufficient to get the tests passing.
2023-09-05 17:59:03 +01:00
Nick Craig-Wood
257607ab3d operations: fix TestCopyFileMaxTransfer test to not be quite so fussy 2023-09-05 17:42:17 +01:00
Nick Craig-Wood
3ea1c5c4d2 compress: fix ChangeNotify
ChangeNotify has been broken on the compress backend for a long time!

Before this change it was wrapping the file names received rather than
unwrapping them to discover the original names.

It is likely ChangeNotify was working adequately though for users as
the VFS just uses the directories rather than the file names.
2023-09-05 17:22:36 +01:00
Nick Craig-Wood
bd23ea028e azureblob: fix purging with directory markers 2023-09-05 17:07:44 +01:00
Nick Craig-Wood
c58d4fe939 test_all: ignore Rmdirs test failure on b2 as it fails because of versions 2023-09-05 15:47:14 +01:00
nielash
ddc7059a73 Add @nielash as bisync maintainer 2023-09-05 04:05:39 -04:00
dependabot[bot]
2677c43f26 build(deps): bump actions/checkout from 3 to 4
Bumps [actions/checkout](https://github.com/actions/checkout) from 3 to 4.
- [Release notes](https://github.com/actions/checkout/releases)
- [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md)
- [Commits](https://github.com/actions/checkout/compare/v3...v4)

---
updated-dependencies:
- dependency-name: actions/checkout
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
2023-09-05 08:55:32 +01:00
nielash
48ab67f090 bisync: fix dryRun rc parameter being ignored
Before this change, bisync ignored the dryRun parameter (only when specified
via the rc.)

This change fixes the issue, so that the dryRun rc parameter is equivalent to
the --dry-run flag.
2023-09-05 08:53:58 +01:00
nielash
089df7d977 bisync: add rc parameters for new flags
Added rc support for the flags recently introduced in #6971.

createEmptySrcDirs
ignoreListingChecksum
resilient
2023-09-05 08:53:58 +01:00
Nick Craig-Wood
4fbe0652c9 compress: fix integration tests by adding missing OpenChunkWriter exclude 2023-09-04 19:26:14 +01:00
Nick Craig-Wood
47665dad07 cache: fix integration tests by adding missing OpenChunkWriter exclude 2023-09-04 19:26:14 +01:00
eNV25
ad724463a5 cmd: refactor and use sysdnotify in more commands
* cmd: refactor and use sysdnotify in more commands

Fixes #5117
2023-09-04 16:32:04 +01:00
Nick Craig-Wood
6afd7088d3 box: add --box-impersonate to impersonate a user ID - fixes #7267 2023-09-04 12:09:54 +01:00
Nick Craig-Wood
b33140ddeb union: add :writback to act as a simple cache
This adds a :writeback tag to upstreams. If set on a single upstream
then it writes back objects not found into that upstream.

Fixes #6934
2023-09-04 12:03:26 +01:00
Nick Craig-Wood
b1c0ae5e7d azureblob: fix creation of directory markers
This also fixes the integration tests which is why we didn't notice this before!
2023-09-03 18:09:31 +01:00
Nick Craig-Wood
40bcc7a90b fstest: fix sftp ssh integration tests
This adds a private and public key to the SFTP SSH test so that it
works when it doesn't have access to my ssh agent!
2023-09-03 15:23:12 +01:00
Nick Craig-Wood
be17f1523a b2: fix ChunkWriter size return 2023-09-03 13:53:11 +01:00
Nick Craig-Wood
bb58040d9c s3: fix multpart streaming uploads of 0 length files 2023-09-03 12:37:20 +01:00
Nick Craig-Wood
2db0e23584 backends: change OpenChunkWriter interface to allow backend concurrency override
Before this change the concurrency used for an upload was rather
inconsistent.

- if size below `--backend-upload-cutoff` (default 200M) do single part upload.

- if size below `--multi-thread-cutoff` (default 256M) or using streaming
  uploads (eg `rclone rcat) do multipart upload using
  `--backend-upload-concurrency` to set the concurrency used by the uploader.

- otherwise do multipart upload using `--multi-thread-streams` to set the
  concurrency.

This change makes the default for the concurrency used be the
`--backend-upload-concurrency`. If `--multi-thread-streams` is set and larger
than the `--backend-upload-concurrency` then that will be used instead.

This means that if the user sets `--backend-upload-concurrency` then it will be
obeyed for all multipart/multi-thread transfers and the user can override them
all with `--multi-thread-streams`.

See: #7056
2023-09-03 11:47:05 +01:00
Nick Craig-Wood
a7337b0a95 Add Alishan Ladhani to contributors 2023-09-03 11:47:05 +01:00
Alishan Ladhani
7821cb884d b2: fix rclone link when object path contains special characters
Before this change, b2 would return an error when opening a link
generated by `rclone link`. The following error occurs when the object
path contains an ampersand that is not percent encoded:

{
  "code": "bad_request",
  "message": "Bad character in percent-encoded string: 38 (0x26)",
  "status": 400
}
2023-09-02 18:31:14 +01:00
Nick Craig-Wood
85c29e3629 serve dlna: fix MIME type if backend can't identify it
If the server returns the MIME type as application/octet-stream we
assume it doesn't really know what the MIME type. This patch tries
matching the MIME type from the file extension instead in this case.

This enables the use of servers (like OneDrive for Business) which
don't allow the setting of MIME types on upload and have a poor
selection of mime types.

Fixes #7259
2023-09-01 18:09:44 +01:00
Nick Craig-Wood
b7ec75aab6 docs: add Storj as a sponsor 2023-09-01 18:09:44 +01:00
Nick Craig-Wood
38309f2df2 Add Bjørn Smith to contributors 2023-09-01 18:09:44 +01:00
NoLooseEnds
7487d34c33 jotta: added Telia Sky whitelabel (Norway)
Duplicated Telia Cloud (Sweden) changed the URLs and added teliase and teliano
(instead of just telia) to differentiate.

See: #5153 #5016
2023-09-01 14:55:32 +01:00
kapitainsky
e45cb4fc75 docs: single character remote names in Windows
Clarify how single character remote names are interpreted in Windows (as drive letters)

See: https://forum.rclone.org/t/issue-with-single-character-configuration-on-windows-with-rclone/
2023-09-01 11:00:14 +01:00
Bjørn Smith
21008b4cd5 docs: sftp: add note regarding format of server_command
Elaborate exactly how server_command should be used in the configuration file
2023-09-01 10:57:42 +01:00
Nick Craig-Wood
cffe85e6c5 fshttp: fix --bind 0.0.0.0 allowing IPv6 and --bind ::0 allowing IPv4
Due to a bug/misfeature in the go standard library as described here:
https://github.com/golang/go/issues/48723 the go standard library
binds to both IPv4 and IPv6 when passed 0.0.0.0 or ::0.

This patch detects the bind address and forces the correct IP
protocol.

Fixes #6124
Fixes #6244
See: https://forum.rclone.org/t/issues-with-bind-0-0-0-0-and-onedrive-getting-etag-mismatch-when-using-ipv6/41379/
2023-09-01 10:47:39 +01:00
Nick Craig-Wood
d12a92eac9 box: fix unhelpful decoding of error messages into decimal numbers
Before this change the box backend could make errors like

    Error "not_found" (404): On-Behalf-Of User not found ([123 34 105 110
    118 97 108 105 100 95 117 115 101 114 95 105 100 34 58 123 34 105 100
    34 58 34 48 48 48 48 48 48 48 48 48 48 48 34 125 125])

This fixes it to produce this instead

    Error "not_found" (404): On-Behalf-Of User not found ({"invalid_user_id":{"id":"00000000000"}})
2023-08-31 23:03:27 +01:00
eNV25
11eeaaf792 cmd/ncdu: fix add keybinding to rescan filesystem 2023-08-30 14:29:46 +01:00
David Sze
a603efeaf4 box: add polling support 2023-08-30 09:25:00 +01:00
eNV25
0bd0a992a4 cmd/ncdu: add keybinding to rescan filesystem
Fixes #7255
2023-08-30 09:05:58 +01:00
Justin Hellings
82c8d78a44 docs: may not -> might not, to remove ambiguity
"may not" can be interpreted as "is not allowed".
Replaced with "might not" in both cases to remove ambiguity.
2023-08-29 15:10:50 +01:00
Nick Craig-Wood
a83fec756b build: fix lint errors when re-enabling revive var-naming 2023-08-29 13:03:49 +01:00
Nick Craig-Wood
e953598987 build: fix lint errors when re-enabling revive exported & package-comments 2023-08-29 13:03:13 +01:00
Nick Craig-Wood
feaa20d885 build: re-enable revive linters
In this commit:

75dfdbf211 ci: revert revive settings back to fix lint

We accidentally disabled all the revive linters. Unfortunately setting
the rules clears the default set of rules so it is necessary to
mention all rules that we need.
2023-08-29 13:01:15 +01:00
Nick Craig-Wood
967fc6d7f4 lib/multipart: fix accounting for multipart transfers
This change makes sure the accouting is done when bytes are taken out
of the buffer rather than put in.

See: https://forum.rclone.org/t/improve-transfer-stats-calculation-for-multipart-uploads/41172
2023-08-27 23:10:58 +01:00
Nick Craig-Wood
b95bda1e92 s3: fix purging of root directory with --s3-directory-markers - fixes #7247 2023-08-25 17:39:16 +01:00
Nick Craig-Wood
9c14562850 fstests: add backend integration test for purging root directory #7247 2023-08-25 17:39:07 +01:00
Nick Craig-Wood
f992742404 s3: fix accounting for multpart uploads 2023-08-25 16:31:31 +01:00
Nick Craig-Wood
f2467d07aa oracleobjectstorage: fix accounting for multpart uploads 2023-08-25 16:31:31 +01:00
Nick Craig-Wood
d69cdb79f7 b2: fix accounting for multpart uploads 2023-08-25 16:31:31 +01:00
Nick Craig-Wood
df5d92d709 operations: fix terminology in multi-thread copy 2023-08-25 16:31:31 +01:00
Nick Craig-Wood
1b5b36523b operations: fix accounting for multi-thread transfers
This stops the accounting moving in large chunks and gets it to move
as the data is read out of the buffer.
2023-08-24 17:51:36 +01:00
Nick Craig-Wood
2f424ceecf operations: don't buffer when a backend implements OpenWriterAt
In this case we don't seek on errors so no need for seeking.
2023-08-24 17:51:36 +01:00
Nick Craig-Wood
bc986b44b2 lib/pool: add DelayAccounting() to fix accounting when reading hashes 2023-08-24 16:42:09 +01:00
Nick Craig-Wood
f4b1a51af6 lib/pool: add SetAccounting to RW 2023-08-24 15:28:40 +01:00
Manoj Ghosh
25703ad20e oracleobjectstorage: implement OpenChunkWriter and multi-thread uploads #7056 2023-08-24 12:39:28 +01:00
Nick Craig-Wood
ab803d1278 b2: implement OpenChunkWriter and multi-thread uploads #7056
This implements the OpenChunkWriter interface for b2 which
enables multi-thread uploads.

This makes the memory controls of the s3 backend inoperative; they are
replaced with the global ones.

    --b2-memory-pool-flush-time
    --b2-memory-pool-use-mmap

By using the buffered reader this fixes excessive memory use when
uploading large files as it will share memory pages between all
readers.
2023-08-24 12:39:27 +01:00
Nick Craig-Wood
0427177857 azureblob: implement OpenChunkWriter and multi-thread uploads #7056
This implements the OpenChunkWriter interface for azureblob which
enables multi-thread uploads.

This makes the memory controls of the s3 backend inoperative; they are
replaced with the global ones.

    --azureblob-memory-pool-flush-time
    --azureblob-memory-pool-use-mmap

By using the buffered reader this fixes excessive memory use when
uploading large files as it will share memory pages between all
readers.
2023-08-24 12:39:27 +01:00
Nick Craig-Wood
3dfcfc2caa operations: document multi-thread copy and tweak defaults 2023-08-24 12:39:27 +01:00
Nick Craig-Wood
d4cff1ae19 operations: add abort on exit to multithread copy 2023-08-24 12:39:27 +01:00
Nick Craig-Wood
f5753369e4 operations: multipart: don't buffer transfers to local disk #7056 2023-08-24 12:39:27 +01:00
Nick Craig-Wood
4c76fac594 s3: factor generic multipart upload into lib/multipart #7056
This makes the memory controls of the s3 backend inoperative and
replaced with the global ones.

    --s3-memory-pool-flush-time
    --s3-memory-pool-use-mmap

By using the buffered reader this fixes excessive memory use when
uploading large files as it will share memory pages between all
readers.

Fixes #7141
2023-08-24 12:39:27 +01:00
Nick Craig-Wood
0d0bcdac31 fs: add context.Ctx to ChunkWriter methods
WriteChunk in particular needs a different context from that which
OpenChunkWriter was used with so add it to all the methods.
2023-08-24 12:39:27 +01:00
Nick Craig-Wood
f3bd02f0ef operations: fix and tidy multithread code
- fix docs and error messages for multithread
- use sync/errgroup built in concurrency limiting
- re-arrange multithread code
- don't continue multi-thread uploads if one part fails
2023-08-24 12:39:27 +01:00
Nick Craig-Wood
e6fde67491 s3: fix retry logic, logging and error reporting for chunk upload
- move retries into correct place into lowest level functions
- fix logging and error reporting
2023-08-24 12:39:27 +01:00
Nick Craig-Wood
b4e3332e02 fs: introduces aliases for OpenWriterAtFn and OpenChunkWriterFn 2023-08-24 12:39:27 +01:00
Nick Craig-Wood
0dea83a4aa pool: add page backed reader/writer for multi thread uploads 2023-08-24 12:39:27 +01:00
Nick Craig-Wood
e8f3f98aa0 lib/readers: add NoSeeker to adapt io.Reader to io.ReadSeeker 2023-08-24 12:39:27 +01:00
Nick Craig-Wood
d61328e459 serve ftp: fix race condition when using the auth proxy
In this commit we introduced a race condition when using the auth
proxy.

94a320f23c serve ftp: update to goftp.io/server v2.0.1

This was due to the re-organisation of the upstream library which made
the driver be a singleton rather than per session.

This means that when using the auth proxy we need to keep track of
which VFS to use by based on which FTP user is connected.

This also adjusts the locking so that the methods will run
concurrently.
2023-08-23 15:11:47 +01:00
r-ricci
9844704567 docs: remove contributor's old email 2023-08-23 12:31:48 +01:00
Nick Craig-Wood
94a320f23c serve ftp: update to goftp.io/server v2.0.1 - fixes #7237 2023-08-22 17:24:05 +01:00
Nick Craig-Wood
7fc573db27 serve sftp: fix hash calculations with --vfs-cache-mode full
Before this change uploading files with rclone to:

    rclone serve sftp --vfs-cache-mode full

Would return the error:

    command "md5sum XXX" failed with error: unexpected non file

This patch detects that the file is still in the VFS cache and reads
the MD5SUM from there rather from the remote.

Fixes #7241
2023-08-22 13:18:36 +01:00
Nick Craig-Wood
af95616122 Add Roberto Ricci to contributors 2023-08-22 13:18:29 +01:00
Roberto Ricci
72f9f1e9c0 vfs: make sure struct field is aligned for atomic access 2023-08-22 12:52:13 +01:00
Roberto Ricci
91b8152321 vfs: use atomic types 2023-08-22 12:52:13 +01:00
Roberto Ricci
552b6c47ff lib: use atomic types 2023-08-22 12:52:13 +01:00
Roberto Ricci
01a155fb00 fs: use atomic types 2023-08-22 12:52:13 +01:00
Roberto Ricci
50d0597d56 cmount: use atomic types 2023-08-22 12:52:13 +01:00
Roberto Ricci
123a030441 smb: use atomic types 2023-08-22 12:52:13 +01:00
Roberto Ricci
28ceb323ee sftp: use atomic types 2023-08-22 12:52:13 +01:00
Roberto Ricci
c624dd5c3a seafile: use atomic types 2023-08-22 12:52:13 +01:00
Roberto Ricci
a56c11753a local: use atomic types 2023-08-22 12:52:13 +01:00
Roberto Ricci
4341d472aa filefabric: use atomic types 2023-08-22 12:52:13 +01:00
Roberto Ricci
b6e7148daf box: use atomic types 2023-08-22 12:52:13 +01:00
Roberto Ricci
45458f2cdb union: use atomic types 2023-08-22 12:52:13 +01:00
Nick Craig-Wood
de147b6e54 sftp: fix --sftp-ssh looking for ssh agent - fixes #7235
Before this change if pass was empty this would attempt to connect to
the ssh agent even when using --sftp-ssh.

This patch prevents that.
2023-08-21 17:43:02 +01:00
Nick Craig-Wood
11de137660 sftp: fix spurious warning when using --sftp-ssh
When using --sftp-ssh we were warning about user/host/port even when
they were at their defaults.

See: #7235
2023-08-21 17:43:02 +01:00
Nick Craig-Wood
156c372cd7 sync: fix lockup with --cutoff-mode=soft and --max-duration
Before this change, when using --cutoff-mode=soft and --max-duration
rclone deadlocked when the cutoff limit was reached.

This was because the sync objects Pipe became full and nothing was
emptying it because the cutoff was reached.

This changes the context for putting items into the pipe to be the one
that gets cancelled when the cutoff is reached.

See: https://forum.rclone.org/t/sync-command-hanging-using-cutoff-mode-soft-with-max-duration-time-flags/40866
2023-08-18 17:33:54 +01:00
Nick Craig-Wood
c979cde002 ftp: fix 425 "TLS session of data connection not resumed" errors
As an extra security feature some FTP servers (eg FileZilla) require
that the data connection re-use the same TLS connection as the control
connection. This is a good thing for security.

The message "TLS session of data connection not resumed" means that it
was not done.

The problem turned out to be that rclone was re-using the TLS session
cache between concurrent connections so the resumed TLS data
connection could from any of the control connections.

This patch makes each TLS connection have its own session cache which
should fix the problem.

This also reverts the ftp library to the upstream version which now
contains all of our patches.

Fixes #7234
2023-08-18 14:44:13 +01:00
Nick Craig-Wood
03aab1a123 rmdirs: remove directories concurrently controlled by --checkers
See: https://forum.rclone.org/t/how-to-list-empty-directories-recursively/40995
2023-08-18 12:05:15 +01:00
Nick Craig-Wood
dc803b572c Add hideo aoyama to contributors 2023-08-18 12:05:15 +01:00
Nick Craig-Wood
4d19042a61 Add Jacob Hands to contributors 2023-08-18 12:05:15 +01:00
hideo aoyama
923989d1d7 build: add snap installation
I ( @boukendesho ) have volunteered to maintain the snap package so
this adds it back into the installation instructions.

It will set a `snap` tag visible in `rclone version` so we know where
it came from for support queries.
2023-08-18 11:57:25 +01:00
sitiom
cf65e36cf3 ci: change Winget Releaser job to ubuntu-latest 2023-08-17 11:36:28 +01:00
Jacob Hands
cf5457c2cd fs: Fix transferTime not being set in JSON logs
This was unintentionally broken in 04aa696
2023-08-17 11:19:35 +01:00
Jacob Hands
ea4aa696a5 fs: Don't stop calculating average transfer speed until the operation is complete
Currently, the average transfer speed will stop calculating 1 minute
after the last queued transfer completes. This causes the average to
stop calculating when checking is slow and the transfer queue becomes
empty.

This change will require all checks to complete before stopping the
average speed calculation.
2023-08-16 21:43:24 +01:00
Nick Craig-Wood
34195fd3e8 sync: fix erroneous test in TestSyncOverlapWithFilter
In this commit:

432d5d1e20 operations: fix overlapping check on case insensitive file systems

We introduced a test that makes no sense. This happens to pass without --fast-list and fail with it.

This removes the test.
2023-08-13 11:42:31 +01:00
Nick Craig-Wood
40b8167ab4 Add Vitor Gomes to contributors 2023-08-13 11:42:22 +01:00
Nick Craig-Wood
e365f237f5 Add nielash to contributors 2023-08-13 11:41:24 +01:00
Nick Craig-Wood
7d449572bd Add alexia to contributors 2023-08-13 11:41:24 +01:00
Vitor Gomes
181fecaec3 multithread: refactor multithread operation to use OpenChunkWriter if available #7056
If the feature OpenChunkWriter is not available, multithread tries to create an adapter from OpenWriterAt to OpenChunkWriter.
2023-08-12 17:55:01 +01:00
Vitor Gomes
7701d1d33d config: add "multi-thread-chunk-size" flag #7056 2023-08-12 17:55:01 +01:00
Vitor Gomes
6dd736fbdc s3: refactor MultipartUpload to use OpenChunkWriter and ChunkWriter #7056 2023-08-12 17:55:01 +01:00
Vitor Gomes
f36ca0cd25 features: add new interfaces OpenChunkWriter and ChunkWriter #7056 2023-08-12 17:55:01 +01:00
nielash
9b3b1c7067 bisync: typo corrections & other doc improvements 2023-08-12 17:24:21 +01:00
nielash
0dd0d6a13e bisync: Add support for --create-empty-src-dirs - Fixes #6109
Sync creation and deletion of empty directories.
https://forum.rclone.org/t/bisync-bugs-and-feature-requests/37636#:~:text=3.%20Bisync%20should%20create/delete%20empty%20directories%20as%20sync%20does%2C%20when%20%2D%2Dcreate%2Dempty%2Dsrc%2Ddirs%20is%20passed

Also fixed an issue causing --resync to erroneously delete empty folders and duplicate files unique to Path2
https://forum.rclone.org/t/bisync-bugs-and-feature-requests/37636#:~:text=2.%20%2D%2Dresync%20deletes%20data%2C%20contrary%20to%20docs
2023-08-12 17:24:21 +01:00
nielash
e5bde42303 bisync: Add experimental --resilient mode to allow recovery from self-correctable errors
https://forum.rclone.org/t/bisync-bugs-and-feature-requests/37636#:~:text=2.%20Bisync%20should%20be%20more%20resilient%20to%20self%2Dcorrectable%20errors
2023-08-12 17:24:21 +01:00
nielash
f01a50eb47 bisync: Add new --ignore-listing-checksum flag to distinguish from --ignore-checksum
https://forum.rclone.org/t/bisync-bugs-and-feature-requests/37636#:~:text=6.%20%2D%2Dignore%2Dchecksum%20should%20be%20split%20into%20two%20flags%20for%20separate%20purposes
2023-08-12 17:24:21 +01:00
nielash
5ca61ab705 bisync: equality check before renaming (leave identical files alone)
Improved detection of false positive change conflicts (identical files are now left alone instead of renamed)
https://forum.rclone.org/t/bisync-bugs-and-feature-requests/37636#:~:text=1.%20Identical%20files%20should%20be%20left%20alone%2C%20even%20if%20new/newer/changed%20on%20both%20sides
2023-08-12 17:24:21 +01:00
nielash
4ac4ce6afd bisync: apply filters correctly during deletes
Fixed an issue causing bisync to consider more files than necessary due to overbroad filters during delete operations
https://forum.rclone.org/t/bisync-bugs-and-feature-requests/37636#:~:text=5.%20Bisync%20reads%20files%20in%20excluded%20directories%20during%20delete%20operations
2023-08-12 17:24:21 +01:00
nielash
40a874a0d8 bisync: enforce --check-access during --resync
--check-access is now enforced during --resync, preventing data loss in certain user error scenarios
https://forum.rclone.org/t/bisync-bugs-and-feature-requests/37636#:~:text=%2D%2Dcheck%2Daccess%20doesn%27t%20always%20fail%20when%20it%20should
2023-08-12 17:24:21 +01:00
nielash
f4dd86238d bisync: dry runs no longer commit filter changes
Fixed an issue causing dry runs to inadvertently commit filter changes
https://forum.rclone.org/t/bisync-bugs-and-feature-requests/37636#:~:text=1.%20Dry%20runs%20are%20not%20completely%20dry
2023-08-12 17:24:21 +01:00
nielash
4f1eafb044 gitignore: add .DS_Store and remove *.log
.DS_Store is an irrelevant file created automatically by macOS.

*.log can't be ignored because bisync uses .log files in its test suite.
2023-08-12 17:24:21 +01:00
alexia
20c9e0cab6 fichier: fix error code parsing
This fixes the following error I encountered:

```
2023/08/09 16:18:49 DEBUG : failed parsing fichier error: strconv.Atoi: parsing "#374": invalid syntax
2023/08/09 16:18:49 DEBUG : pacer: low level retry 1/10 (error HTTP error 403 (403 Forbidden) returned body: "{\"status\":\"KO\",\"message\":\"Flood detected: IP Locked #374\"}")
```
2023-08-11 00:47:01 +09:00
Nick Craig-Wood
9c09cf9cf6 build: update to released go1.21 2023-08-09 22:41:19 +01:00
Nick Craig-Wood
3a3af00180 Add antoinetran to contributors 2023-08-09 22:41:19 +01:00
Nick Craig-Wood
281e0c2d62 Add James Braza to contributors 2023-08-09 22:41:19 +01:00
Nick Craig-Wood
25b81b8789 Add Masamune3210 to contributors 2023-08-09 22:41:19 +01:00
Nick Craig-Wood
90fdd97a7b Add Nihaal Sangha to contributors 2023-08-09 22:41:19 +01:00
Chun-Hung Tseng
3c58e0efe0 protondrive: update the information regarding the advance setting enable_caching (#7202) 2023-08-09 16:01:19 +02:00
antoinetran
db744f64f6 docs: clarify --checksum documentation - Fixes #7145 2023-08-09 20:00:46 +09:00
Nick Craig-Wood
480220a84a docs: add some more docs on making your own backend 2023-08-09 20:00:46 +09:00
James Braza
d0362171cf docs: environment variable remote name only supports letters, digits, or underscores 2023-08-09 11:42:04 +01:00
Masamune3210
45887d11f6 docs: local: fix typo 2023-08-09 11:32:45 +01:00
Eng Zer Jun
c4bad5c1bc lib/rest: remove unnecessary nil check
From the Go docs:

  "A `nil` map is equivalent to an empty map. [1]

Therefore, an additional nil check for `opts.ExtraHeaders` before the loop is
unnecessary because `opts.ExtraHeaders` is a `map`.

[1]: https://go.dev/ref/spec#Map_types

Signed-off-by: Eng Zer Jun <engzerjun@gmail.com>
2023-08-09 19:17:42 +09:00
Nihaal Sangha
40de89df73 drive: fix typo in docs 2023-08-05 12:51:51 +01:00
Manoj Ghosh
27f5297e8d oracleobjectstorage: Use rclone's rate limiter in mutipart transfers 2023-08-05 12:09:17 +09:00
Nick Craig-Wood
de185de215 accounting: show server side stats in own lines and not as bytes transferred
Before this change we showed both server side moves and server side
copies as bytes transferred.

This made a nice easy to use stats display, but also caused confusion
for users who saw unrealistic transfer times. It also caused a problem
with --max-transfer and chunker which renames each chunk after
uploading which was counted as a transfer byte.

This patch instead accounts the server side move and copy statistics
as a seperate lines in the stats display which will only appear if
there are any server side moves / copies. This is also output in the
rc.

This gives users something to look at when transfers are running which
was the point of the original change but it now means that transfer
bytes represents data transfers through this rclone instance only.

Fixes #7183
2023-08-05 03:54:01 +01:00
Nick Craig-Wood
d362db2e08 rclone test info: add --check-base32768 flag to check can store all base32768 characters
Fixes #7208
2023-08-05 02:59:58 +01:00
Nick Craig-Wood
db2a49e384 Add Raymond Berger to contributors 2023-08-05 02:59:58 +01:00
Kaloyan Raev
d63fcc6e44 storj: performance improvement for large file uploads
storj.io/uplink v1.11.0 comes with an improved logic for uploading large
files where file segments are uploaded concurrently instead of serially.
This allows to fully utilize the network connection during the entire
upload process.

This change enable the new upload logic.
2023-08-04 17:40:03 +09:00
kapitainsky
4444037f5c docs: box client_id creation
See: https://forum.rclone.org/t/box-getting-403-errors-when-using-chunker-working-fine-without-it/40388/22
2023-08-03 14:51:49 +01:00
Raymond Berger
a555513c26 docs: add missing comma to overview webdav footnote 2023-08-03 14:50:04 +01:00
Nick Craig-Wood
039c260216 build: update to go1.21rc4 2023-08-03 13:53:43 +01:00
Nick Craig-Wood
4577c08e05 Add Julian Lepinski to contributors 2023-08-03 13:53:43 +01:00
Nick Craig-Wood
c9ed691919 docs: add minio as a sponsor 2023-08-03 08:39:15 +01:00
Julian Lepinski
9f96c0d4ea swift: fix HEADing 0-length objects when --swift-no-large-objects set
The Swift backend does not always respect the flag telling it to skip
HEADing zero-length objects. This commit fixes that for ls/lsl/lsf.

Swift returns zero length for dynamic large object files when they're
included in a files lookup, which means that determining their size
requires HEADing each file that returns a size of zero. rclone's
--swift-no-large-objects instructs rclone that no large objects are
present and accordingly rclone should not HEAD files that return zero
length.

When rclone is performing an ls / lsf / lsl type lookup, however, it
continues to HEAD any zero length objects it encounters, even with
this flag set. Accordingly, this change causes rclone to respect the
flag in these situations.

NB: It is worth noting that this will cause rclone to incorrectly
report zero length for any dynamic large objects encountered with the
--swift-no-large-objects flag set.
2023-08-03 08:38:39 +01:00
Nick Craig-Wood
91d095f468 docs: update command docs to new style 2023-08-02 12:53:09 +01:00
Nick Craig-Wood
bff702a6f1 docs: group the global flags and make them appear on command and flags pages
This adds an additional parameter to the creation of each flag. This
specifies one or more flag groups. This **must** be set for global
flags and **must not** be set for local flags.

This causes flags.md to be built with sections to aid comprehension
and it causes the documentation pages for each command (and the
`--help`) to be built showing the flags groups as specified in the
`groups` annotation on the command.

See: https://forum.rclone.org/t/make-docs-for-mortals-not-only-rclone-gurus/39476/
2023-08-02 12:53:09 +01:00
Nick Craig-Wood
a1d6bbd31f Add rclone completion powershell - basic implementation only 2023-08-02 12:53:09 +01:00
Nick Craig-Wood
fb6a9dfbf3 docs: fix rclone config edit docs 2023-08-02 12:53:09 +01:00
Nick Craig-Wood
3f3c5f3ff4 build: remove unused package cmd/serve/http/data
This was superseded by lib/http/template.go
2023-08-02 12:53:09 +01:00
Nick Craig-Wood
89196cb353 Add nielash to contributors 2023-08-02 12:53:09 +01:00
Nick Craig-Wood
9284506b86 Add Zach to contributors 2023-08-02 12:53:09 +01:00
yuudi
88c72d1f4d http: fix webdav OPTIONS response (#6433) 2023-08-01 11:48:41 +09:00
Paul
5e3bf50b2e webdav: nextcloud: fix segment violation in low-level retry
Fix https://github.com/rclone/rclone/issues/7168

Co-authored-by: ncw <nick@craig-wood.com>
Co-authored-by: Paul <devnoname120@gmail.com>
2023-08-01 11:15:33 +09:00
nielash
982f76b4df sftp: support dynamic --sftp-path-override
Before this change, rclone always expected --sftp-path-override to be
the absolute SSH path to remote:path/subpath which effectively made it
unusable for wrapped remotes (for example, when used with a crypt
remote, the user would need to provide the full decrypted path.)

After this change, the old behavior remains the default, but dynamic
paths are now also supported, if the user adds '@' as the first
character of --sftp-path-override. Rclone will ignore the '@' and
treat the rest of the string as the path to the SFTP remote's root.
Rclone will then add any relative subpaths automatically (including
unwrapping/decrypting remotes as necessary).

In other words, the path_override config parameter can now be used to
specify the difference between the SSH and SFTP paths. Once specified
in the config, it is no longer necessary to re-specify for each
command.

See: https://forum.rclone.org/t/sftp-path-override-breaks-on-wrapped-remotes/40025
2023-07-30 03:12:07 +01:00
Zach
347812d1d3 ftp,sftp: add socks_proxy support for SOCKS5 proxies
Fixes #3558
2023-07-30 03:02:08 +01:00
yuudi
f4449440f8 http: CORS should not be send if not set (#6433) 2023-07-29 16:12:31 +09:00
kapitainsky
e66675d346 docs: rclone backend restore 2023-07-29 11:31:16 +09:00
Nick Craig-Wood
45228e2f18 build: update dependencies
This does not update bazil/fuse because it does not build on freebsd

https://github.com/bazil/fuse/issues/295

This partially updates the prometheus library as the latest no longer compiles with plan9

https://github.com/prometheus/procfs/issues/554
2023-07-29 01:57:23 +01:00
Nick Craig-Wood
b866850fdd Add yuudi to contributors 2023-07-29 01:57:23 +01:00
yuudi
5b63b9534f rc: add execute-id for job-id 2023-07-28 18:35:14 +09:00
Nick Craig-Wood
10449c86a4 sftp: add --sftp-ssh to specify an external ssh binary to use
This allows using an external ssh binary instead of the built in ssh
library for making SFTP connections.

This makes another integration test target TestSFTPRcloneSSH:

Fixes #7012
2023-07-28 10:29:02 +01:00
Nick Craig-Wood
26a9a9fed2 Add Niklas Hambüchen to contributors 2023-07-28 10:29:02 +01:00
Chun-Hung Tseng
602e42d334 protondrive: fix a bug in parsing User metadata (#7174) 2023-07-28 11:03:23 +02:00
Niklas Hambüchen
4c5a21703e docs: dropbox: Explain that Teams needs "Full Dropbox" 2023-07-28 17:52:29 +09:00
Nick Craig-Wood
f2ee949eff fichier: implement DirMove
See: https://forum.rclone.org/t/1fichier-rclone-does-not-allow-to-rename-files-and-folders-when-you-mount-a-1fichier-disk-drive/24726/
2023-07-28 01:25:42 +01:00
kapitainsky
3ad255172c docs: b2 versions names caveat 2023-07-28 09:23:34 +09:00
Nick Craig-Wood
29b1751d0e serve webdav: fix error: Expecting fs.Object or fs.Directory, got <nil>
Before this change rclone serve webdav would sometimes give this error

    Expecting fs.Object or fs.Directory, got <nil>

It turns out that when a file is being updated it doesn't have a
DirEntry and it is allowed to be <nil> so in this case we create the
mime type from the extension.

See: https://forum.rclone.org/t/webdav-union-of-onedrive-expecting-fs-object-or-fs-directory-got-nil/40298
2023-07-28 00:54:45 +01:00
kapitainsky
363da9aa82 docs: s3 versions names caveat 2023-07-27 12:36:50 +09:00
yuudi
6c8148ef39 http servers: allow CORS to be set with --allow-origin flag - fixes #5078
Some changes about test cases:
Because MiddlewareCORS will return early on OPTIONS request,
this middleware should only be used once at NewServer function.
Test cases should pass AllowOrigin config instead of adding
this middleware again.

A new test case was added to test CORS preflight request with
an authenticator. Preflight request should always return 200 OK
regardless of autentications.

Co-authored-by: yuudi <yuudi@users.noreply.github.com>
2023-07-26 10:15:54 +01:00
Nick Craig-Wood
3ed4a2e963 sftp: stop uploads re-using the same ssh connection to improve performance
Before this change we released the ssh connection back to the pool
before the upload was finished.

This meant that uploads were re-using the same ssh connection which
reduces throughput.

This releases the ssh connection back to the pool only after the
upload has finished, or on error state.

See: https://forum.rclone.org/t/sftp-backend-opens-less-connection-than-expected/40245
2023-07-25 13:05:37 +01:00
Anagh Kumar Baranwal
aaadb48d48 vfs: keep virtual directory status accurate and reduce deadlock potential
This changes hasVirtual to an atomic struct variable that's updated on
add or delete from the virtual map.

This keeps it up to date and avoids deadlocks.

Signed-off-by: Anagh Kumar Baranwal <6824881+darthShadow@users.noreply.github.com>
2023-07-25 09:08:16 +01:00
Anagh Kumar Baranwal
52e25c43b9 vfs: Added cache cleaner for directories to reduce memory usage
This empties the directory cache after twice the directory cache
period to release memory.

Signed-off-by: Anagh Kumar Baranwal <6824881+darthShadow@users.noreply.github.com>
2023-07-25 09:08:16 +01:00
Nick Craig-Wood
9a66563fc6 Add Edwin Mackenzie-Owen to contributors 2023-07-25 09:08:16 +01:00
Nick Craig-Wood
6ca670d66a Add Tiago Boeing to contributors 2023-07-25 09:08:16 +01:00
Nick Craig-Wood
809653055d Add gabriel-suela to contributors 2023-07-25 09:08:16 +01:00
Nick Craig-Wood
61325ce507 Add Ricardo D'O. Albanus to contributors 2023-07-25 09:08:16 +01:00
Edwin Mackenzie-Owen
c3989d1906 smb: implement multi-threaded writes for copies to smb
smb2.File implements the WriterAtCloser interface defined in
fs/types.go. Expose it via a OpenWriterAt method on
the fs struct to support multi-threaded writes.
2023-07-25 08:31:36 +01:00
Tiago Boeing
a79887171c docs: mega: update with solution when receiving killed on process 2023-07-25 04:21:37 +01:00
Chun-Hung Tseng
f29e284c90 protondrive: fix download signature verification bug (#7169) 2023-07-24 14:54:39 +02:00
Chun-Hung Tseng
9a66086fa0 protondrive: fix bug in digests parsing (#7164) 2023-07-24 09:00:18 +02:00
Chun-Hung Tseng
1845c261c6 protondrive: fix missing file sha1 and appstring issues (#7163) 2023-07-24 08:56:21 +02:00
Chun-Hung Tseng
70cbcef624 Add Chun-Hung Tseng to Maintainer (#7162) 2023-07-23 16:29:24 +02:00
gabriel-suela
9169b2b5ab cmd: fix log message typo 2023-07-23 08:43:03 +09:00
Ricardo D'O. Albanus
0957c8fb74 chunker: Update documentation to mention issue with small files
See: https://forum.rclone.org/t/chunker-not-deactivating-for-small-files-and-wasting-api-calls/40122
2023-07-23 00:40:50 +01:00
Anagh Kumar Baranwal
bb0cd76a5f fix: mount parsing for linux 2023-07-22 17:29:20 +05:30
Nick Craig-Wood
08240c8cf5 Add Chun-Hung Tseng to contributors 2023-07-22 10:54:21 +01:00
Chun-Hung Tseng
014acc902d protondrive: add protondrive backend - fixes #6072 2023-07-22 10:46:21 +01:00
Benjamin
33fec9c835 doc: Fix Leviia block 2023-07-18 19:58:19 +01:00
kapitainsky
3a5ffc7839 docs: mention Box as base32768 compatible
As suddenly many people move to Box - another "unlimited" cloud story migration saga there are frequent questions about crypt files encoding to be used.

Box is base32768 friendly.

It has been tested with:

https://pub.rclone.org/base32768.zip

and:

rclone test info --check-length boxremote:

maxFileLength = 255 // for 1 byte unicode characters
maxFileLength = 255 // for 2 byte unicode characters
maxFileLength = 255 // for 3 byte unicode characters
maxFileLength = -1 // for 4 byte unicode characters
2023-07-18 19:55:54 +01:00
Benjamin
8a6bf35481 Add Leviia Object Storage on index.md 2023-07-18 09:52:05 +01:00
Benjamin
f7d27f4bf2 Add Object storage to Leviia on README.md 2023-07-18 09:52:05 +01:00
kapitainsky
378a2d21ee --max-transfer - add new exit code (10)
It adds dedicated exit code (10) for --max-duration flag.

Rclone will exit with exit code 10 if the duration limit is reached.

It behaves in similar fashion as --max-transfer and exit code 8.

discussed on the forum:

https://forum.rclone.org/t/max-duration-option-is-triggering-exit-with-error/39917/6
2023-07-18 09:51:31 +01:00
Nick Craig-Wood
3404eb0444 Changelog updates from Version v1.63.1 2023-07-17 15:15:16 +01:00
Nick Craig-Wood
13e5701f2a build: add new sponsors page to docs 2023-07-17 14:28:40 +01:00
Nick Craig-Wood
432d5d1e20 operations: fix overlapping check on case insensitive file systems
Before this change, the overlapping check could erroneously give this
error on case insensitive file systems:

    Failed to sync: destination and parameter to --backup-dir mustn't overlap

The code was fixed and re-worked to be simpler and more reliable.

See: https://forum.rclone.org/t/backup-dir-cannot-be-in-root-even-when-excluded/39844/
2023-07-17 14:00:04 +01:00
Nick Craig-Wood
cc05159518 Add Benjamin to contributors 2023-07-17 14:00:04 +01:00
Benjamin
119ccb2b95 s3: add Leviia S3 Object Storage as provider 2023-07-16 18:08:47 +01:00
Anagh Kumar Baranwal
0ef0e908ca build: update to go1.21rc3 and make go1.19 the minimum required version
Signed-off-by: Anagh Kumar Baranwal <6824881+darthShadow@users.noreply.github.com>
2023-07-16 10:09:25 +01:00
Nick Craig-Wood
0063d14dbb Add darix to contributors 2023-07-14 10:27:20 +01:00
albertony
0d34efb10f box: fix reconnect failing with HTTP 400 Bad Request
The error is:

  Error: failed to configure token with jwt authentication: jwtutil: failed making auth request: 400 Bad Request

With the following additional debug information:

  jwtutil: Response Body: {"error":"invalid_grant","error_description":"Please check the 'aud' claim. Should be a string"}

Problem is that in jwt-go the RegisteredClaims type has Audience field (aud claim) that
is a list, while box apparantly expects it to be a singular string. In jwt-go v4 we
currently use there is an alternative type StandardClaims which matches what box wants.
Unfortunately StandardClaims is marked as deprecated, and is removed in the
newer v5 version, so we this is a short term fix only.

Fixes #7114
2023-07-14 10:24:33 +01:00
darix
415f4b2b93 webdav: nextcloud chunking: add more guidance for the user to check the config 2023-07-10 14:37:09 +01:00
Nick Craig-Wood
07cf5f1d25 operations: fix .rclonelink files not being converted back to symlinks
Before this change the new partial downloads code was causing symlinks
to be copied as regular files.

This was because the partial isn't named .rclonelink so the local
backend saves it as a normal file and renaming it to .rclonelink
doesn't cause it to become a symlink.

This fixes the problem by not copying .rclonelink files using the
partials mechanism but reverting to the previous --inplace behaviour.

This could potentially be fixed better in the future by changing the
local backend Move to change files to and from symlinks depending on
their name. However this was deemed too complicated for a point
release.

This also adds a test in the local backend. This test should ideally
be in operations but it isn't easy to put it there as operations knows
nothing of symlinks.

Fixes #7101
See: https://forum.rclone.org/t/reggression-in-v1-63-0-links-drops-the-rclonelink-extension/39483
2023-07-10 14:30:59 +01:00
Nick Craig-Wood
7d31956169 local: fix partial directory read for corrupted filesystem
Before this change if a directory entry could be listed but not
lstat-ed then rclone would give an error and abort the directory
listing with the error

    failed to read directory entry: failed to read directory "XXX": lstat XXX

This change makes sure that the directory listing carries on even
after this kind of error.

The sync will be failed but it will carry on.

This problem was caused by a programming error setting the err
variable in an outer scope when it should have been using a local err
variable.

See: https://forum.rclone.org/t/sync-aborts-if-even-one-single-unreadable-folder-is-encountered/39653
2023-07-09 17:58:03 +01:00
Nick Craig-Wood
473d443874 smb: fix "Statfs failed: bucket or container name is needed" when mounting
Before this change, if you mounted the root of the smb then it would
give an error on rclone about and periodically in the mount logs:

    Statfs failed: bucket or container name is needed in remote

This fix makes the smb backend return empty usage in this case which
will stop the errors and show the default 1P of free space.

See: https://forum.rclone.org/t/error-statfs-failed-bucket-or-container-name-is-needed-in-remote/39631
2023-07-08 12:24:46 +01:00
Nick Craig-Wood
e294b76121 Add Vladislav Vorobev to contributors 2023-07-08 12:24:46 +01:00
Vladislav Vorobev
8f3c583870 docs: no need to disable 2FA for Mail.ru Cloud anymore
This sentence was written at the time when backend used access token, nowadays, users need to generate and use application password instead, see #6398.
2023-07-08 10:27:40 +02:00
Nick Craig-Wood
d0d41fe847 rclone config redacted: implement support mechanism for showing redacted config
This introduces a new fs.Option flag, Sensitive and uses this along
with IsPassword to redact the info in the config file for support
purposes.

It adds this flag into backends where appropriate. It was necessary to
add oauthutil.SharedOptions to some backends as they were missing
them.

Fixes #5209
2023-07-07 16:25:14 +01:00
Nick Craig-Wood
297f15a3e3 docs: update the number of providers supported 2023-07-07 16:25:14 +01:00
Nick Craig-Wood
d5f0affd4b Add Mahad to contributors 2023-07-07 16:25:14 +01:00
Nick Craig-Wood
0598aafbfd Add BakaWang to contributors 2023-07-07 16:25:14 +01:00
Mahad
528e22f139 docs: drive: Fix step 4 in "Making your own client_id" 2023-07-06 21:24:17 +01:00
BakaWang
f1a8420814 s3: add synology to s3 provider list 2023-07-06 10:54:07 +01:00
Nick Craig-Wood
e250f1afcd docs: remove old donate page 2023-07-06 10:13:42 +01:00
Nick Craig-Wood
ebf24c9872 docs: update contact page on website 2023-07-05 16:57:07 +01:00
Paul
b4c7b240d8 webdav: nextcloud: fix must use /dav/files/USER endpoint not /webdav error
Fix https://github.com/rclone/rclone/issues/7103

Before this change the RegExp validating the endpoint URL was a bit
too strict allowing only /dav/files/USER due to chunking limitations.

This patch adds back support for /dav/files/USER/dir/subdir etc.

Co-authored-by: Nick Craig-Wood <nick@craig-wood.com>
2023-07-05 16:56:01 +01:00
625 changed files with 106097 additions and 63711 deletions

View File

@@ -27,12 +27,12 @@ jobs:
strategy:
fail-fast: false
matrix:
job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.18', 'go1.19']
job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.19', 'go1.20']
include:
- job_name: linux
os: ubuntu-latest
go: '1.20'
go: '1.21'
gotags: cmount
build_flags: '-include "^linux/"'
check: true
@@ -43,14 +43,14 @@ jobs:
- job_name: linux_386
os: ubuntu-latest
go: '1.20'
go: '1.21'
goarch: 386
gotags: cmount
quicktest: true
- job_name: mac_amd64
os: macos-11
go: '1.20'
go: '1.21'
gotags: 'cmount'
build_flags: '-include "^darwin/amd64" -cgo'
quicktest: true
@@ -59,14 +59,14 @@ jobs:
- job_name: mac_arm64
os: macos-11
go: '1.20'
go: '1.21'
gotags: 'cmount'
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
deploy: true
- job_name: windows
os: windows-latest
go: '1.20'
go: '1.21'
gotags: cmount
cgo: '0'
build_flags: '-include "^windows/"'
@@ -76,30 +76,30 @@ jobs:
- job_name: other_os
os: ubuntu-latest
go: '1.20'
go: '1.21'
build_flags: '-exclude "^(windows/|darwin/|linux/)"'
compile_all: true
deploy: true
- job_name: go1.18
os: ubuntu-latest
go: '1.18'
quicktest: true
racequicktest: true
- job_name: go1.19
os: ubuntu-latest
go: '1.19'
quicktest: true
racequicktest: true
- job_name: go1.20
os: ubuntu-latest
go: '1.20'
quicktest: true
racequicktest: true
name: ${{ matrix.job_name }}
runs-on: ${{ matrix.os }}
steps:
- name: Checkout
uses: actions/checkout@v3
uses: actions/checkout@v4
with:
fetch-depth: 0
@@ -130,6 +130,11 @@ jobs:
- name: Install Libraries on macOS
shell: bash
run: |
# https://github.com/Homebrew/brew/issues/15621#issuecomment-1619266788
# https://github.com/orgs/Homebrew/discussions/4612#discussioncomment-6319008
unset HOMEBREW_NO_INSTALL_FROM_API
brew untap --force homebrew/core
brew untap --force homebrew/cask
brew update
brew install --cask macfuse
if: matrix.os == 'macos-11'
@@ -211,7 +216,6 @@ jobs:
shell: bash
run: |
if [[ "${{ matrix.os }}" == "ubuntu-latest" ]]; then make release_dep_linux ; fi
if [[ "${{ matrix.os }}" == "windows-latest" ]]; then make release_dep_windows ; fi
make ci_beta
env:
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
@@ -227,7 +231,7 @@ jobs:
steps:
- name: Checkout
uses: actions/checkout@v3
uses: actions/checkout@v4
- name: Code quality test
uses: golangci/golangci-lint-action@v3
@@ -239,7 +243,7 @@ jobs:
- name: Install Go
uses: actions/setup-go@v4
with:
go-version: '1.20'
go-version: '1.21'
check-latest: true
- name: Install govulncheck
@@ -256,7 +260,7 @@ jobs:
steps:
- name: Checkout
uses: actions/checkout@v3
uses: actions/checkout@v4
with:
fetch-depth: 0
@@ -264,7 +268,7 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v4
with:
go-version: '1.20'
go-version: '1.21'
- name: Go module cache
uses: actions/cache@v3

View File

@@ -10,26 +10,35 @@ jobs:
runs-on: ubuntu-latest
name: Build image job
steps:
- name: Free some space
shell: bash
run: |
df -h .
# Remove android SDK
sudo rm -rf /usr/local/lib/android || true
# Remove .net runtime
sudo rm -rf /usr/share/dotnet || true
df -h .
- name: Checkout master
uses: actions/checkout@v3
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Login to Docker Hub
uses: docker/login-action@v2
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Extract metadata (tags, labels) for Docker
id: meta
uses: docker/metadata-action@v4
uses: docker/metadata-action@v5
with:
images: ghcr.io/${{ github.repository }}
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
uses: docker/setup-buildx-action@v3
- name: Login to GitHub Container Registry
uses: docker/login-action@v2
uses: docker/login-action@v3
with:
registry: ghcr.io
# This is the user that triggered the Workflow. In this case, it will
@@ -42,9 +51,12 @@ jobs:
# See https://docs.github.com/en/actions/security-guides/automatic-token-authentication#about-the-github_token-secret
# for more detailed information.
password: ${{ secrets.GITHUB_TOKEN }}
- name: Show disk usage
shell: bash
run: |
df -h .
- name: Build and publish image
uses: docker/build-push-action@v4
uses: docker/build-push-action@v5
with:
file: Dockerfile
context: .
@@ -54,8 +66,12 @@ jobs:
rclone/rclone:beta
labels: ${{ steps.meta.outputs.labels }}
platforms: linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6
cache-from: type=gha
cache-to: type=gha,mode=max
cache-from: type=gha, scope=${{ github.workflow }}
cache-to: type=gha, mode=max, scope=${{ github.workflow }}
provenance: false
# Eventually cache will need to be cleared if builds more frequent than once a week
# https://github.com/docker/build-push-action/issues/252
- name: Show disk usage
shell: bash
run: |
df -h .

View File

@@ -10,8 +10,17 @@ jobs:
runs-on: ubuntu-latest
name: Build image job
steps:
- name: Free some space
shell: bash
run: |
df -h .
# Remove android SDK
sudo rm -rf /usr/local/lib/android || true
# Remove .net runtime
sudo rm -rf /usr/share/dotnet || true
df -h .
- name: Checkout master
uses: actions/checkout@v3
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Get actual patch version
@@ -39,8 +48,17 @@ jobs:
runs-on: ubuntu-latest
name: Build docker plugin job
steps:
- name: Free some space
shell: bash
run: |
df -h .
# Remove android SDK
sudo rm -rf /usr/local/lib/android || true
# Remove .net runtime
sudo rm -rf /usr/share/dotnet || true
df -h .
- name: Checkout master
uses: actions/checkout@v3
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Build and publish docker plugin

View File

@@ -5,7 +5,7 @@ on:
jobs:
publish:
runs-on: windows-latest # Action can only run on Windows
runs-on: ubuntu-latest
steps:
- uses: vedantmgoyal2009/winget-releaser@v2
with:

5
.gitignore vendored
View File

@@ -8,10 +8,13 @@ rclone.iml
.idea
.history
*.test
*.log
*.iml
fuzz-build.zip
*.orig
*.rej
Thumbs.db
__pycache__
.DS_Store
/docs/static/img/logos/
resource_windows_*.syso
.devcontainer

View File

@@ -33,23 +33,67 @@ issues:
- staticcheck
text: 'SA1019: "github.com/rclone/rclone/cmd/serve/httplib" is deprecated'
# don't disable the revive messages about comments on exported functions
include:
- EXC0012
- EXC0013
- EXC0014
- EXC0015
run:
# timeout for analysis, e.g. 30s, 5m, default is 1m
timeout: 10m
linters-settings:
revive:
# setting rules seems to disable all the rules, so re-enable them here
rules:
- name: unreachable-code
disabled: true
- name: unused-parameter
disabled: true
- name: blank-imports
disabled: false
- name: context-as-argument
disabled: false
- name: context-keys-type
disabled: false
- name: dot-imports
disabled: false
- name: empty-block
disabled: true
- name: error-naming
disabled: false
- name: error-return
disabled: false
- name: error-strings
disabled: false
- name: errorf
disabled: false
- name: exported
disabled: false
- name: increment-decrement
disabled: true
- name: indent-error-flow
disabled: false
- name: package-comments
disabled: false
- name: range
disabled: false
- name: receiver-naming
disabled: false
- name: redefines-builtin-id
disabled: true
- name: superfluous-else
disabled: true
- name: time-naming
disabled: false
- name: unexported-return
disabled: false
- name: unreachable-code
disabled: true
- name: unused-parameter
disabled: true
- name: var-declaration
disabled: false
- name: var-naming
disabled: false
stylecheck:
# Only enable the checks performed by the staticcheck stand-alone tool,
# as documented here: https://staticcheck.io/docs/configuration/options/#checks

View File

@@ -1,8 +1,8 @@
# Contributing to rclone #
# Contributing to rclone
This is a short guide on how to contribute things to rclone.
## Reporting a bug ##
## Reporting a bug
If you've just got a question or aren't sure if you've found a bug
then please use the [rclone forum](https://forum.rclone.org/) instead
@@ -12,13 +12,13 @@ When filing an issue, please include the following information if
possible as well as a description of the problem. Make sure you test
with the [latest beta of rclone](https://beta.rclone.org/):
* Rclone version (e.g. output from `rclone version`)
* Which OS you are using and how many bits (e.g. Windows 10, 64 bit)
* The command you were trying to run (e.g. `rclone copy /tmp remote:tmp`)
* A log of the command with the `-vv` flag (e.g. output from `rclone -vv copy /tmp remote:tmp`)
* if the log contains secrets then edit the file with a text editor first to obscure them
- Rclone version (e.g. output from `rclone version`)
- Which OS you are using and how many bits (e.g. Windows 10, 64 bit)
- The command you were trying to run (e.g. `rclone copy /tmp remote:tmp`)
- A log of the command with the `-vv` flag (e.g. output from `rclone -vv copy /tmp remote:tmp`)
- if the log contains secrets then edit the file with a text editor first to obscure them
## Submitting a new feature or bug fix ##
## Submitting a new feature or bug fix
If you find a bug that you'd like to fix, or a new feature that you'd
like to implement then please submit a pull request via GitHub.
@@ -73,9 +73,9 @@ This is typically enough if you made a simple bug fix, otherwise please read the
Make sure you
* Add [unit tests](#testing) for a new feature.
* Add [documentation](#writing-documentation) for a new feature.
* [Commit your changes](#committing-your-changes) using the [message guideline](#commit-messages).
- Add [unit tests](#testing) for a new feature.
- Add [documentation](#writing-documentation) for a new feature.
- [Commit your changes](#committing-your-changes) using the [commit message guidelines](#commit-messages).
When you are done with that push your changes to GitHub:
@@ -88,9 +88,9 @@ Your changes will then get reviewed and you might get asked to fix some stuff. I
You may sometimes be asked to [base your changes on the latest master](#basing-your-changes-on-the-latest-master) or [squash your commits](#squashing-your-commits).
## Using Git and GitHub ##
## Using Git and GitHub
### Committing your changes ###
### Committing your changes
Follow the guideline for [commit messages](#commit-messages) and then:
@@ -107,7 +107,7 @@ You can modify the message or changes in the latest commit using:
If you amend to commits that have been pushed to GitHub, then you will have to [replace your previously pushed commits](#replacing-your-previously-pushed-commits).
### Replacing your previously pushed commits ###
### Replacing your previously pushed commits
Note that you are about to rewrite the GitHub history of your branch. It is good practice to involve your collaborators before modifying commits that have been pushed to GitHub.
@@ -115,7 +115,7 @@ Your previously pushed commits are replaced by:
git push --force origin my-new-feature
### Basing your changes on the latest master ###
### Basing your changes on the latest master
To base your changes on the latest version of the [rclone master](https://github.com/rclone/rclone/tree/master) (upstream):
@@ -149,13 +149,21 @@ If you squash commits that have been pushed to GitHub, then you will have to [re
Tip: You may like to use `git rebase -i master` if you are experienced or have a more complex situation.
### GitHub Continuous Integration ###
### GitHub Continuous Integration
rclone currently uses [GitHub Actions](https://github.com/rclone/rclone/actions) to build and test the project, which should be automatically available for your fork too from the `Actions` tab in your repository.
## Testing ##
## Testing
### Quick testing ###
### Code quality tests
If you install [golangci-lint](https://github.com/golangci/golangci-lint) then you can run the same tests as get run in the CI which can be very helpful.
You can run them with `make check` or with `golangci-lint run ./...`.
Using these tests ensures that the rclone codebase all uses the same coding standards. These tests also check for easy mistakes to make (like forgetting to check an error return).
### Quick testing
rclone's tests are run from the go testing framework, so at the top
level you can run this to run all the tests.
@@ -168,7 +176,7 @@ You can also use `make`, if supported by your platform
The quicktest is [automatically run by GitHub](#github-continuous-integration) when you push your branch to GitHub.
### Backend testing ###
### Backend testing
rclone contains a mixture of unit tests and integration tests.
Because it is difficult (and in some respects pointless) to test cloud
@@ -203,7 +211,7 @@ project root:
go install github.com/rclone/rclone/fstest/test_all
test_all -backend drive
### Full integration testing ###
### Full integration testing
If you want to run all the integration tests against all the remotes,
then change into the project root and run
@@ -218,55 +226,56 @@ The commands may require some extra go packages which you can install with
The full integration tests are run daily on the integration test server. You can
find the results at https://pub.rclone.org/integration-tests/
## Code Organisation ##
## Code Organisation
Rclone code is organised into a small number of top level directories
with modules beneath.
* backend - the rclone backends for interfacing to cloud providers -
* all - import this to load all the cloud providers
* ...providers
* bin - scripts for use while building or maintaining rclone
* cmd - the rclone commands
* all - import this to load all the commands
* ...commands
* cmdtest - end-to-end tests of commands, flags, environment variables,...
* docs - the documentation and website
* content - adjust these docs only - everything else is autogenerated
* command - these are auto-generated - edit the corresponding .go file
* fs - main rclone definitions - minimal amount of code
* accounting - bandwidth limiting and statistics
* asyncreader - an io.Reader which reads ahead
* config - manage the config file and flags
* driveletter - detect if a name is a drive letter
* filter - implements include/exclude filtering
* fserrors - rclone specific error handling
* fshttp - http handling for rclone
* fspath - path handling for rclone
* hash - defines rclone's hash types and functions
* list - list a remote
* log - logging facilities
* march - iterates directories in lock step
* object - in memory Fs objects
* operations - primitives for sync, e.g. Copy, Move
* sync - sync directories
* walk - walk a directory
* fstest - provides integration test framework
* fstests - integration tests for the backends
* mockdir - mocks an fs.Directory
* mockobject - mocks an fs.Object
* test_all - Runs integration tests for everything
* graphics - the images used in the website, etc.
* lib - libraries used by the backend
* atexit - register functions to run when rclone exits
* dircache - directory ID to name caching
* oauthutil - helpers for using oauth
* pacer - retries with backoff and paces operations
* readers - a selection of useful io.Readers
* rest - a thin abstraction over net/http for REST
* vfs - Virtual FileSystem layer for implementing rclone mount and similar
- backend - the rclone backends for interfacing to cloud providers -
- all - import this to load all the cloud providers
- ...providers
- bin - scripts for use while building or maintaining rclone
- cmd - the rclone commands
- all - import this to load all the commands
- ...commands
- cmdtest - end-to-end tests of commands, flags, environment variables,...
- docs - the documentation and website
- content - adjust these docs only - everything else is autogenerated
- command - these are auto-generated - edit the corresponding .go file
- fs - main rclone definitions - minimal amount of code
- accounting - bandwidth limiting and statistics
- asyncreader - an io.Reader which reads ahead
- config - manage the config file and flags
- driveletter - detect if a name is a drive letter
- filter - implements include/exclude filtering
- fserrors - rclone specific error handling
- fshttp - http handling for rclone
- fspath - path handling for rclone
- hash - defines rclone's hash types and functions
- list - list a remote
- log - logging facilities
- march - iterates directories in lock step
- object - in memory Fs objects
- operations - primitives for sync, e.g. Copy, Move
- sync - sync directories
- walk - walk a directory
- fstest - provides integration test framework
- fstests - integration tests for the backends
- mockdir - mocks an fs.Directory
- mockobject - mocks an fs.Object
- test_all - Runs integration tests for everything
- graphics - the images used in the website, etc.
- lib - libraries used by the backend
- atexit - register functions to run when rclone exits
- dircache - directory ID to name caching
- oauthutil - helpers for using oauth
- pacer - retries with backoff and paces operations
- readers - a selection of useful io.Readers
- rest - a thin abstraction over net/http for REST
- librclone - in memory interface to rclone's API for embedding rclone
- vfs - Virtual FileSystem layer for implementing rclone mount and similar
## Writing Documentation ##
## Writing Documentation
If you are adding a new feature then please update the documentation.
@@ -277,22 +286,22 @@ alphabetical order.
If you add a new backend option/flag, then it should be documented in
the source file in the `Help:` field.
* Start with the most important information about the option,
- Start with the most important information about the option,
as a single sentence on a single line.
* This text will be used for the command-line flag help.
* It will be combined with other information, such as any default value,
- This text will be used for the command-line flag help.
- It will be combined with other information, such as any default value,
and the result will look odd if not written as a single sentence.
* It should end with a period/full stop character, which will be shown
- It should end with a period/full stop character, which will be shown
in docs but automatically removed when producing the flag help.
* Try to keep it below 80 characters, to reduce text wrapping in the terminal.
* More details can be added in a new paragraph, after an empty line (`"\n\n"`).
* Like with docs generated from Markdown, a single line break is ignored
- Try to keep it below 80 characters, to reduce text wrapping in the terminal.
- More details can be added in a new paragraph, after an empty line (`"\n\n"`).
- Like with docs generated from Markdown, a single line break is ignored
and two line breaks creates a new paragraph.
* This text will be shown to the user in `rclone config`
- This text will be shown to the user in `rclone config`
and in the docs (where it will be added by `make backenddocs`,
normally run some time before next release).
* To create options of enumeration type use the `Examples:` field.
* Each example value have their own `Help:` field, but they are treated
- To create options of enumeration type use the `Examples:` field.
- Each example value have their own `Help:` field, but they are treated
a bit different than the main option help text. They will be shown
as an unordered list, therefore a single line break is enough to
create a new list item. Also, for enumeration texts like name of
@@ -312,12 +321,12 @@ combined unmodified with other information (such as any default value).
Note that you can use [GitHub's online editor](https://help.github.com/en/github/managing-files-in-a-repository/editing-files-in-another-users-repository)
for small changes in the docs which makes it very easy.
## Making a release ##
## Making a release
There are separate instructions for making a release in the RELEASE.md
file.
## Commit messages ##
## Commit messages
Please make the first line of your commit message a summary of the
change that a user (not a developer) of rclone would like to read, and
@@ -358,7 +367,7 @@ error fixing the hang.
Fixes #1498
```
## Adding a dependency ##
## Adding a dependency
rclone uses the [go
modules](https://tip.golang.org/cmd/go/#hdr-Modules__module_versions__and_more)
@@ -370,7 +379,7 @@ To add a dependency `github.com/ncw/new_dependency` see the
instructions below. These will fetch the dependency and add it to
`go.mod` and `go.sum`.
GO111MODULE=on go get github.com/ncw/new_dependency
go get github.com/ncw/new_dependency
You can add constraints on that package when doing `go get` (see the
go docs linked above), but don't unless you really need to.
@@ -378,15 +387,15 @@ go docs linked above), but don't unless you really need to.
Please check in the changes generated by `go mod` including `go.mod`
and `go.sum` in the same commit as your other changes.
## Updating a dependency ##
## Updating a dependency
If you need to update a dependency then run
GO111MODULE=on go get -u golang.org/x/crypto
go get golang.org/x/crypto
Check in a single commit as above.
## Updating all the dependencies ##
## Updating all the dependencies
In order to update all the dependencies then run `make update`. This
just uses the go modules to update all the modules to their latest
@@ -395,7 +404,7 @@ stable release. Check in the changes in a single commit as above.
This should be done early in the release cycle to pick up new versions
of packages in time for them to get some testing.
## Updating a backend ##
## Updating a backend
If you update a backend then please run the unit tests and the
integration tests for that backend.
@@ -410,82 +419,133 @@ integration tests.
The next section goes into more detail about the tests.
## Writing a new backend ##
## Writing a new backend
Choose a name. The docs here will use `remote` as an example.
Note that in rclone terminology a file system backend is called a
remote or an fs.
Research
### Research
* Look at the interfaces defined in `fs/fs.go`
* Study one or more of the existing remotes
- Look at the interfaces defined in `fs/types.go`
- Study one or more of the existing remotes
Getting going
### Getting going
* Create `backend/remote/remote.go` (copy this from a similar remote)
* box is a good one to start from if you have a directory-based remote
* b2 is a good one to start from if you have a bucket-based remote
* Add your remote to the imports in `backend/all/all.go`
* HTTP based remotes are easiest to maintain if they use rclone's rest module, but if there is a really good go SDK then use that instead.
* Try to implement as many optional methods as possible as it makes the remote more usable.
* Use lib/encoder to make sure we can encode any path name and `rclone info` to help determine the encodings needed
* `rclone purge -v TestRemote:rclone-info`
* `rclone test info --all --remote-encoding None -vv --write-json remote.json TestRemote:rclone-info`
* `go run cmd/test/info/internal/build_csv/main.go -o remote.csv remote.json`
* open `remote.csv` in a spreadsheet and examine
- Create `backend/remote/remote.go` (copy this from a similar remote)
- box is a good one to start from if you have a directory-based remote (and shows how to use the directory cache)
- b2 is a good one to start from if you have a bucket-based remote
- Add your remote to the imports in `backend/all/all.go`
- HTTP based remotes are easiest to maintain if they use rclone's [lib/rest](https://pkg.go.dev/github.com/rclone/rclone/lib/rest) module, but if there is a really good Go SDK from the provider then use that instead.
- Try to implement as many optional methods as possible as it makes the remote more usable.
- Use [lib/encoder](https://pkg.go.dev/github.com/rclone/rclone/lib/encoder) to make sure we can encode any path name and `rclone info` to help determine the encodings needed
- `rclone purge -v TestRemote:rclone-info`
- `rclone test info --all --remote-encoding None -vv --write-json remote.json TestRemote:rclone-info`
- `go run cmd/test/info/internal/build_csv/main.go -o remote.csv remote.json`
- open `remote.csv` in a spreadsheet and examine
Unit tests
### Guidelines for a speedy merge
* Create a config entry called `TestRemote` for the unit tests to use
* Create a `backend/remote/remote_test.go` - copy and adjust your example remote
* Make sure all tests pass with `go test -v`
- **Do** use [lib/rest](https://pkg.go.dev/github.com/rclone/rclone/lib/rest) if you are implementing a REST like backend and parsing XML/JSON in the backend.
- **Do** use rclone's Client or Transport from [fs/fshttp](https://pkg.go.dev/github.com/rclone/rclone/fs/fshttp) if your backend is HTTP based - this adds features like `--dump bodies`, `--tpslimit`, `--user-agent` without you having to code anything!
- **Do** follow your example backend exactly - use the same code order, function names, layout, structure. **Don't** move stuff around and **Don't** delete the comments.
- **Do not** split your backend up into `fs.go` and `object.go` (there are a few backends like that - don't follow them!)
- **Do** put your API type definitions in a separate file - by preference `api/types.go`
- **Remember** we have >50 backends to maintain so keeping them as similar as possible to each other is a high priority!
Integration tests
### Unit tests
* Add your backend to `fstest/test_all/config.yaml`
* Once you've done that then you can use the integration test framework from the project root:
* go install ./...
* test_all -backends remote
- Create a config entry called `TestRemote` for the unit tests to use
- Create a `backend/remote/remote_test.go` - copy and adjust your example remote
- Make sure all tests pass with `go test -v`
### Integration tests
- Add your backend to `fstest/test_all/config.yaml`
- Once you've done that then you can use the integration test framework from the project root:
- go install ./...
- test_all -backends remote
Or if you want to run the integration tests manually:
* Make sure integration tests pass with
* `cd fs/operations`
* `go test -v -remote TestRemote:`
* `cd fs/sync`
* `go test -v -remote TestRemote:`
* If your remote defines `ListR` check with this also
* `go test -v -remote TestRemote: -fast-list`
- Make sure integration tests pass with
- `cd fs/operations`
- `go test -v -remote TestRemote:`
- `cd fs/sync`
- `go test -v -remote TestRemote:`
- If your remote defines `ListR` check with this also
- `go test -v -remote TestRemote: -fast-list`
See the [testing](#testing) section for more information on integration tests.
Add your fs to the docs - you'll need to pick an icon for it from
### Backend documentation
Add your backend to the docs - you'll need to pick an icon for it from
[fontawesome](http://fontawesome.io/icons/). Keep lists of remotes in
alphabetical order of full name of remote (e.g. `drive` is ordered as
`Google Drive`) but with the local file system last.
* `README.md` - main GitHub page
* `docs/content/remote.md` - main docs page (note the backend options are automatically added to this file with `make backenddocs`)
* make sure this has the `autogenerated options` comments in (see your reference backend docs)
* update them with `make backenddocs` - revert any changes in other backends
* `docs/content/overview.md` - overview docs
* `docs/content/docs.md` - list of remotes in config section
* `docs/content/_index.md` - front page of rclone.org
* `docs/layouts/chrome/navbar.html` - add it to the website navigation
* `bin/make_manual.py` - add the page to the `docs` constant
- `README.md` - main GitHub page
- `docs/content/remote.md` - main docs page (note the backend options are automatically added to this file with `make backenddocs`)
- make sure this has the `autogenerated options` comments in (see your reference backend docs)
- update them in your backend with `bin/make_backend_docs.py remote`
- `docs/content/overview.md` - overview docs
- `docs/content/docs.md` - list of remotes in config section
- `docs/content/_index.md` - front page of rclone.org
- `docs/layouts/chrome/navbar.html` - add it to the website navigation
- `bin/make_manual.py` - add the page to the `docs` constant
Once you've written the docs, run `make serve` and check they look OK
in the web browser and the links (internal and external) all work.
## Writing a plugin ##
## Adding a new s3 provider
It is quite easy to add a new S3 provider to rclone.
You'll need to modify the following files
- `backend/s3/s3.go`
- Add the provider to `providerOption` at the top of the file
- Add endpoints and other config for your provider gated on the provider in `fs.RegInfo`.
- Exclude your provider from genric config questions (eg `region` and `endpoint).
- Add the provider to the `setQuirks` function - see the documentation there.
- `docs/content/s3.md`
- Add the provider at the top of the page.
- Add a section about the provider linked from there.
- Add a transcript of a trial `rclone config` session
- Edit the transcript to remove things which might change in subsequent versions
- **Do not** alter or add to the autogenerated parts of `s3.md`
- **Do not** run `make backenddocs` or `bin/make_backend_docs.py s3`
- `README.md` - this is the home page in github
- Add the provider and a link to the section you wrote in `docs/contents/s3.md`
- `docs/content/_index.md` - this is the home page of rclone.org
- Add the provider and a link to the section you wrote in `docs/contents/s3.md`
When adding the provider, endpoints, quirks, docs etc keep them in
alphabetical order by `Provider` name, but with `AWS` first and
`Other` last.
Once you've written the docs, run `make serve` and check they look OK
in the web browser and the links (internal and external) all work.
Once you've written the code, test `rclone config` works to your
satisfaction, and check the integration tests work `go test -v -remote
NewS3Provider:`. You may need to adjust the quirks to get them to
pass. Some providers just can't pass the tests with control characters
in the names so if these fail and the provider doesn't support
`urlEncodeListings` in the quirks then ignore them. Note that the
`SetTier` test may also fail on non AWS providers.
For an example of adding an s3 provider see [eb3082a1](https://github.com/rclone/rclone/commit/eb3082a1ebdb76d5625f14cedec3f5154a5e7b10).
## Writing a plugin
New features (backends, commands) can also be added "out-of-tree", through Go plugins.
Changes will be kept in a dynamically loaded file instead of being compiled into the main binary.
This is useful if you can't merge your changes upstream or don't want to maintain a fork of rclone.
Usage
### Usage
- Naming
- Plugins names must have the pattern `librcloneplugin_KIND_NAME.so`.
@@ -500,7 +560,7 @@ Usage
- Plugins must be compiled against the exact version of rclone to work.
(The rclone used during building the plugin must be the same as the source of rclone)
Building
### Building
To turn your existing additions into a Go plugin, move them to an external repository
and change the top-level package name to `main`.

View File

@@ -18,6 +18,9 @@ Current active maintainers of rclone are:
| Caleb Case | @calebcase | storj backend |
| wiserain | @wiserain | pikpak backend |
| albertony | @albertony | |
| Chun-Hung Tseng | @henrybear327 | Proton Drive Backend |
| Hideo Aoyama | @boukendesho | snap packaging |
| nielash | @nielash | bisync |
**This is a work in progress Draft**

34471
MANUAL.html generated

File diff suppressed because it is too large Load Diff

10908
MANUAL.md generated

File diff suppressed because it is too large Load Diff

35729
MANUAL.txt generated

File diff suppressed because it is too large Load Diff

View File

@@ -30,6 +30,7 @@ ifdef RELEASE_TAG
TAG := $(RELEASE_TAG)
endif
GO_VERSION := $(shell go version)
GO_OS := $(shell go env GOOS)
ifdef BETA_SUBDIR
BETA_SUBDIR := /$(BETA_SUBDIR)
endif
@@ -46,7 +47,13 @@ endif
.PHONY: rclone test_all vars version
rclone:
ifeq ($(GO_OS),windows)
go run bin/resource_windows.go -version $(TAG) -syso resource_windows_`go env GOARCH`.syso
endif
go build -v --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS) $(BUILD_ARGS)
ifeq ($(GO_OS),windows)
rm resource_windows_`go env GOARCH`.syso
endif
mkdir -p `go env GOPATH`/bin/
cp -av rclone`go env GOEXE` `go env GOPATH`/bin/rclone`go env GOEXE`.new
mv -v `go env GOPATH`/bin/rclone`go env GOEXE`.new `go env GOPATH`/bin/rclone`go env GOEXE`
@@ -66,6 +73,10 @@ btest:
@echo "[$(TAG)]($(BETA_URL)) on branch [$(BRANCH)](https://github.com/rclone/rclone/tree/$(BRANCH)) (uploaded in 15-30 mins)" | xclip -r -sel clip
@echo "Copied markdown of beta release to clip board"
btesth:
@echo "<a href="$(BETA_URL)">$(TAG)</a> on branch <a href="https://github.com/rclone/rclone/tree/$(BRANCH)">$(BRANCH)</a> (uploaded in 15-30 mins)" | xclip -r -sel clip -t text/html
@echo "Copied beta release in HTML to clip board"
version:
@echo '$(TAG)'
@@ -98,10 +109,6 @@ build_dep:
release_dep_linux:
go install github.com/goreleaser/nfpm/v2/cmd/nfpm@latest
# Get the release dependencies we only install on Windows
release_dep_windows:
GOOS="" GOARCH="" go install github.com/josephspurrier/goversioninfo/cmd/goversioninfo@latest
# Update dependencies
showupdates:
@echo "*** Direct dependencies that could be updated ***"
@@ -146,7 +153,7 @@ rcdocs: rclone
install: rclone
install -d ${DESTDIR}/usr/bin
install -t ${DESTDIR}/usr/bin ${GOPATH}/bin/rclone
install ${GOPATH}/bin/rclone ${DESTDIR}/usr/bin
clean:
go clean ./...

View File

@@ -51,12 +51,16 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
* IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3)
* IONOS Cloud [:page_facing_up:](https://rclone.org/s3/#ionos)
* Koofr [:page_facing_up:](https://rclone.org/koofr/)
* Leviia Object Storage [:page_facing_up:](https://rclone.org/s3/#leviia)
* Liara Object Storage [:page_facing_up:](https://rclone.org/s3/#liara-object-storage)
* Linkbox [:page_facing_up:](https://rclone.org/linkbox)
* Linode Object Storage [:page_facing_up:](https://rclone.org/s3/#linode)
* Mail.ru Cloud [:page_facing_up:](https://rclone.org/mailru/)
* Memset Memstore [:page_facing_up:](https://rclone.org/swift/)
* Mega [:page_facing_up:](https://rclone.org/mega/)
* Memory [:page_facing_up:](https://rclone.org/memory/)
* Microsoft Azure Blob Storage [:page_facing_up:](https://rclone.org/azureblob/)
* Microsoft Azure Files Storage [:page_facing_up:](https://rclone.org/azurefiles/)
* Microsoft OneDrive [:page_facing_up:](https://rclone.org/onedrive/)
* Minio [:page_facing_up:](https://rclone.org/s3/#minio)
* Nextcloud [:page_facing_up:](https://rclone.org/webdav/#nextcloud)
@@ -72,8 +76,10 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
* PikPak [:page_facing_up:](https://rclone.org/pikpak/)
* premiumize.me [:page_facing_up:](https://rclone.org/premiumizeme/)
* put.io [:page_facing_up:](https://rclone.org/putio/)
* Proton Drive [:page_facing_up:](https://rclone.org/protondrive/)
* QingStor [:page_facing_up:](https://rclone.org/qingstor/)
* Qiniu Cloud Object Storage (Kodo) [:page_facing_up:](https://rclone.org/s3/#qiniu)
* Quatrix [:page_facing_up:](https://rclone.org/quatrix/)
* Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
* RackCorp Object Storage [:page_facing_up:](https://rclone.org/s3/#RackCorp)
* Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)
@@ -84,6 +90,7 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
* StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
* Storj [:page_facing_up:](https://rclone.org/storj/)
* SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)
* Synology C2 Object Storage [:page_facing_up:](https://rclone.org/s3/#synology-c2)
* Tencent Cloud Object Storage (COS) [:page_facing_up:](https://rclone.org/s3/#tencent-cos)
* Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi)
* WebDAV [:page_facing_up:](https://rclone.org/webdav/)

View File

@@ -41,12 +41,15 @@ Early in the next release cycle update the dependencies
* Review any pinned packages in go.mod and remove if possible
* make updatedirect
* make
* make GOTAGS=cmount
* make compiletest
* git commit -a -v
* make update
* make
* make GOTAGS=cmount
* make compiletest
* roll back any updates which didn't compile
* git commit -a -v --amend
* **NB** watch out for this changing the default go version in `go.mod`
Note that `make update` updates all direct and indirect dependencies
and there can occasionally be forwards compatibility problems with
@@ -90,6 +93,35 @@ Now
* git commit -a -v -m "Changelog updates from Version ${NEW_TAG}"
* git push
## Sponsor logos
If updating the website note that the sponsor logos have been moved out of the main repository.
You will need to checkout `/docs/static/img/logos` from https://github.com/rclone/third-party-logos
which is a private repo containing artwork from sponsors.
## Update the website between releases
Create an update website branch based off the last release
git co -b update-website
If the branch already exists, double check there are no commits that need saving.
Now reset the branch to the last release
git reset --hard v1.64.0
Create the changes, check them in, test with `make serve` then
make upload_test_website
Check out https://test.rclone.org and when happy
make upload_website
Cherry pick any changes back to master and the stable branch if it is active.
## Making a manual build of docker
The rclone docker image should autobuild on via GitHub actions. If it doesn't

View File

@@ -1 +1 @@
v1.64.0
v1.65.0

View File

@@ -6,6 +6,7 @@ import (
_ "github.com/rclone/rclone/backend/alias"
_ "github.com/rclone/rclone/backend/amazonclouddrive"
_ "github.com/rclone/rclone/backend/azureblob"
_ "github.com/rclone/rclone/backend/azurefiles"
_ "github.com/rclone/rclone/backend/b2"
_ "github.com/rclone/rclone/backend/box"
_ "github.com/rclone/rclone/backend/cache"
@@ -24,9 +25,11 @@ import (
_ "github.com/rclone/rclone/backend/hdfs"
_ "github.com/rclone/rclone/backend/hidrive"
_ "github.com/rclone/rclone/backend/http"
_ "github.com/rclone/rclone/backend/imagekit"
_ "github.com/rclone/rclone/backend/internetarchive"
_ "github.com/rclone/rclone/backend/jottacloud"
_ "github.com/rclone/rclone/backend/koofr"
_ "github.com/rclone/rclone/backend/linkbox"
_ "github.com/rclone/rclone/backend/local"
_ "github.com/rclone/rclone/backend/mailru"
_ "github.com/rclone/rclone/backend/mega"
@@ -38,8 +41,10 @@ import (
_ "github.com/rclone/rclone/backend/pcloud"
_ "github.com/rclone/rclone/backend/pikpak"
_ "github.com/rclone/rclone/backend/premiumizeme"
_ "github.com/rclone/rclone/backend/protondrive"
_ "github.com/rclone/rclone/backend/putio"
_ "github.com/rclone/rclone/backend/qingstor"
_ "github.com/rclone/rclone/backend/quatrix"
_ "github.com/rclone/rclone/backend/s3"
_ "github.com/rclone/rclone/backend/seafile"
_ "github.com/rclone/rclone/backend/sftp"

View File

@@ -1,11 +1,10 @@
//go:build !plan9 && !solaris && !js && go1.18
// +build !plan9,!solaris,!js,go1.18
//go:build !plan9 && !solaris && !js
// +build !plan9,!solaris,!js
// Package azureblob provides an interface to the Microsoft Azure blob object storage system
package azureblob
import (
"bytes"
"context"
"crypto/md5"
"encoding/base64"
@@ -18,6 +17,7 @@ import (
"net/url"
"os"
"path"
"sort"
"strconv"
"strings"
"sync"
@@ -33,7 +33,6 @@ import (
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/chunksize"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
@@ -46,10 +45,8 @@ import (
"github.com/rclone/rclone/lib/bucket"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/env"
"github.com/rclone/rclone/lib/multipart"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/pool"
"github.com/rclone/rclone/lib/readers"
"golang.org/x/sync/errgroup"
)
const (
@@ -70,12 +67,16 @@ const (
emulatorAccount = "devstoreaccount1"
emulatorAccountKey = "Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw=="
emulatorBlobEndpoint = "http://127.0.0.1:10000/devstoreaccount1"
memoryPoolFlushTime = fs.Duration(time.Minute) // flush the cached buffers after this long
memoryPoolUseMmap = false
)
var (
errCantUpdateArchiveTierBlobs = fserrors.NoRetryError(errors.New("can't update archive tier blob without --azureblob-archive-tier-delete"))
// Take this when changing or reading metadata.
//
// It acts as global metadata lock so we don't bloat Object
// with an extra lock that will only very rarely be contended.
metadataMu sync.Mutex
)
// Register with Fs
@@ -95,6 +96,7 @@ Leave blank to use SAS URL or Emulator, otherwise it needs to be set.
If this is blank and if env_auth is set it will be read from the
environment variable ` + "`AZURE_STORAGE_ACCOUNT_NAME`" + ` if possible.
`,
Sensitive: true,
}, {
Name: "env_auth",
Help: `Read credentials from runtime (environment variables, CLI or MSI).
@@ -106,11 +108,13 @@ See the [authentication docs](/azureblob#authentication) for full info.`,
Help: `Storage Account Shared Key.
Leave blank to use SAS URL or Emulator.`,
Sensitive: true,
}, {
Name: "sas_url",
Help: `SAS URL for container level access only.
Leave blank if using account/key or Emulator.`,
Sensitive: true,
}, {
Name: "tenant",
Help: `ID of the service principal's tenant. Also called its directory ID.
@@ -120,6 +124,7 @@ Set this if using
- Service principal with certificate
- User with username and password
`,
Sensitive: true,
}, {
Name: "client_id",
Help: `The ID of the client in use.
@@ -129,6 +134,7 @@ Set this if using
- Service principal with certificate
- User with username and password
`,
Sensitive: true,
}, {
Name: "client_secret",
Help: `One of the service principal's client secrets
@@ -136,6 +142,7 @@ Set this if using
Set this if using
- Service principal with client secret
`,
Sensitive: true,
}, {
Name: "client_certificate_path",
Help: `Path to a PEM or PKCS12 certificate file including the private key.
@@ -173,7 +180,8 @@ Optionally set this if using
Set this if using
- User with username and password
`,
Advanced: true,
Advanced: true,
Sensitive: true,
}, {
Name: "password",
Help: `The user's password
@@ -216,17 +224,20 @@ msi_client_id, or msi_mi_res_id parameters.`,
Default: false,
Advanced: true,
}, {
Name: "msi_object_id",
Help: "Object ID of the user-assigned MSI to use, if any.\n\nLeave blank if msi_client_id or msi_mi_res_id specified.",
Advanced: true,
Name: "msi_object_id",
Help: "Object ID of the user-assigned MSI to use, if any.\n\nLeave blank if msi_client_id or msi_mi_res_id specified.",
Advanced: true,
Sensitive: true,
}, {
Name: "msi_client_id",
Help: "Object ID of the user-assigned MSI to use, if any.\n\nLeave blank if msi_object_id or msi_mi_res_id specified.",
Advanced: true,
Name: "msi_client_id",
Help: "Object ID of the user-assigned MSI to use, if any.\n\nLeave blank if msi_object_id or msi_mi_res_id specified.",
Advanced: true,
Sensitive: true,
}, {
Name: "msi_mi_res_id",
Help: "Azure resource ID of the user-assigned MSI to use, if any.\n\nLeave blank if msi_client_id or msi_object_id specified.",
Advanced: true,
Name: "msi_mi_res_id",
Help: "Azure resource ID of the user-assigned MSI to use, if any.\n\nLeave blank if msi_client_id or msi_object_id specified.",
Advanced: true,
Sensitive: true,
}, {
Name: "use_emulator",
Help: "Uses local storage emulator if provided as 'true'.\n\nLeave blank if using real azure storage endpoint.",
@@ -284,10 +295,10 @@ avoid the time out.`,
Advanced: true,
}, {
Name: "access_tier",
Help: `Access tier of blob: hot, cool or archive.
Help: `Access tier of blob: hot, cool, cold or archive.
Archived blobs can be restored by setting access tier to hot or
cool. Leave blank if you intend to use default access tier, which is
Archived blobs can be restored by setting access tier to hot, cool or
cold. Leave blank if you intend to use default access tier, which is
set at account level
If there is no "access tier" specified, rclone doesn't apply any tier.
@@ -295,7 +306,7 @@ rclone performs "Set Tier" operation on blobs while uploading, if objects
are not modified, specifying "access tier" to new one will have no effect.
If blobs are in "archive tier" at remote, trying to perform data transfer
operations from remote will not be allowed. User should first restore by
tiering blob to "Hot" or "Cool".`,
tiering blob to "Hot", "Cool" or "Cold".`,
Advanced: true,
}, {
Name: "archive_tier_delete",
@@ -327,17 +338,16 @@ to start uploading.`,
Advanced: true,
}, {
Name: "memory_pool_flush_time",
Default: memoryPoolFlushTime,
Default: fs.Duration(time.Minute),
Advanced: true,
Help: `How often internal memory buffer pools will be flushed.
Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations.
This option controls how often unused buffers will be removed from the pool.`,
Hide: fs.OptionHideBoth,
Help: `How often internal memory buffer pools will be flushed. (no longer used)`,
}, {
Name: "memory_pool_use_mmap",
Default: memoryPoolUseMmap,
Default: false,
Advanced: true,
Help: `Whether to use mmap buffers in internal memory pool.`,
Hide: fs.OptionHideBoth,
Help: `Whether to use mmap buffers in internal memory pool. (no longer used)`,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
@@ -422,8 +432,6 @@ type Options struct {
ArchiveTierDelete bool `config:"archive_tier_delete"`
UseEmulator bool `config:"use_emulator"`
DisableCheckSum bool `config:"disable_checksum"`
MemoryPoolFlushTime fs.Duration `config:"memory_pool_flush_time"`
MemoryPoolUseMmap bool `config:"memory_pool_use_mmap"`
Enc encoder.MultiEncoder `config:"encoding"`
PublicAccess string `config:"public_access"`
DirectoryMarkers bool `config:"directory_markers"`
@@ -447,8 +455,6 @@ type Fs struct {
cache *bucket.Cache // cache for container creation status
pacer *fs.Pacer // To pace and retry the API calls
uploadToken *pacer.TokenDispenser // control concurrency
pool *pool.Pool // memory pool
poolSize int64 // size of pages in memory pool
publicAccess container.PublicAccessType // Container Public Access Level
}
@@ -461,7 +467,7 @@ type Object struct {
size int64 // Size of the object
mimeType string // Content-Type of the object
accessTier blob.AccessTier // Blob Access Tier
meta map[string]string // blob metadata
meta map[string]string // blob metadata - take metadataMu when accessing
}
// ------------------------------------------------------------
@@ -514,6 +520,7 @@ func (o *Object) split() (container, containerPath string) {
func validateAccessTier(tier string) bool {
return strings.EqualFold(tier, string(blob.AccessTierHot)) ||
strings.EqualFold(tier, string(blob.AccessTierCool)) ||
strings.EqualFold(tier, string(blob.AccessTierCold)) ||
strings.EqualFold(tier, string(blob.AccessTierArchive))
}
@@ -643,8 +650,8 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
if opt.AccessTier == "" {
opt.AccessTier = string(defaultAccessTier)
} else if !validateAccessTier(opt.AccessTier) {
return nil, fmt.Errorf("supported access tiers are %s, %s and %s",
string(blob.AccessTierHot), string(blob.AccessTierCool), string(blob.AccessTierArchive))
return nil, fmt.Errorf("supported access tiers are %s, %s, %s and %s",
string(blob.AccessTierHot), string(blob.AccessTierCool), string(blob.AccessTierCold), string(blob.AccessTierArchive))
}
if !validatePublicAccess((opt.PublicAccess)) {
@@ -661,13 +668,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
uploadToken: pacer.NewTokenDispenser(ci.Transfers),
cache: bucket.NewCache(),
cntSVCcache: make(map[string]*container.Client, 1),
pool: pool.New(
time.Duration(opt.MemoryPoolFlushTime),
int(opt.ChunkSize),
ci.Transfers,
opt.MemoryPoolUseMmap,
),
poolSize: int64(opt.ChunkSize),
}
f.publicAccess = container.PublicAccessType(opt.PublicAccess)
f.setRoot(root)
@@ -962,6 +962,9 @@ func (f *Fs) getBlockBlobSVC(container, containerPath string) *blockblob.Client
// updateMetadataWithModTime adds the modTime passed in to o.meta.
func (o *Object) updateMetadataWithModTime(modTime time.Time) {
metadataMu.Lock()
defer metadataMu.Unlock()
// Make sure o.meta is not nil
if o.meta == nil {
o.meta = make(map[string]string, 1)
@@ -1493,7 +1496,7 @@ func (f *Fs) deleteContainer(ctx context.Context, containerName string) error {
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
container, directory := f.split(dir)
// Remove directory marker file
if f.opt.DirectoryMarkers && container != "" && dir != "" {
if f.opt.DirectoryMarkers && container != "" && directory != "" {
o := &Object{
fs: f,
remote: dir + "/",
@@ -1527,7 +1530,10 @@ func (f *Fs) Hashes() hash.Set {
// Purge deletes all the files and directories including the old versions.
func (f *Fs) Purge(ctx context.Context, dir string) error {
container, directory := f.split(dir)
if container == "" || directory != "" {
if container == "" {
return errors.New("can't purge from root")
}
if directory != "" {
// Delegate to caller if not root of a container
return fs.ErrorCantPurge
}
@@ -1584,19 +1590,6 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
return f.NewObject(ctx, remote)
}
func (f *Fs) getMemoryPool(size int64) *pool.Pool {
if size == int64(f.opt.ChunkSize) {
return f.pool
}
return pool.New(
time.Duration(f.opt.MemoryPoolFlushTime),
int(size),
f.ci.Transfers,
f.opt.MemoryPoolUseMmap,
)
}
// ------------------------------------------------------------
// Fs returns the parent Fs
@@ -1640,6 +1633,9 @@ func (o *Object) Size() int64 {
// Set o.metadata from metadata
func (o *Object) setMetadata(metadata map[string]*string) {
metadataMu.Lock()
defer metadataMu.Unlock()
if len(metadata) > 0 {
// Lower case the metadata
o.meta = make(map[string]string, len(metadata))
@@ -1664,6 +1660,9 @@ func (o *Object) setMetadata(metadata map[string]*string) {
// Get metadata from o.meta
func (o *Object) getMetadata() (metadata map[string]*string) {
metadataMu.Lock()
defer metadataMu.Unlock()
if len(o.meta) == 0 {
return nil
}
@@ -1875,12 +1874,7 @@ func (o *Object) ModTime(ctx context.Context) (result time.Time) {
// SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
// Make sure o.meta is not nil
if o.meta == nil {
o.meta = make(map[string]string, 1)
}
// Set modTimeKey in it
o.meta[modTimeKey] = modTime.Format(timeFormatOut)
o.updateMetadataWithModTime(modTime)
blb := o.getBlobSVC()
opt := blob.SetMetadataOptions{}
@@ -1906,7 +1900,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
var offset int64
var count int64
if o.AccessTier() == blob.AccessTierArchive {
return nil, fmt.Errorf("blob in archive tier, you need to set tier to hot or cool first")
return nil, fmt.Errorf("blob in archive tier, you need to set tier to hot, cool, cold first")
}
fs.FixRangeOption(options, o.size)
for _, option := range options {
@@ -1972,8 +1966,8 @@ func (rs *readSeekCloser) Close() error {
return nil
}
// increment the slice passed in as LSB binary
func increment(xs []byte) {
// increment the array as LSB binary
func increment(xs *[8]byte) {
for i, digit := range xs {
newDigit := digit + 1
xs[i] = newDigit
@@ -1984,22 +1978,43 @@ func increment(xs []byte) {
}
}
var warnStreamUpload sync.Once
// record chunk number and id for Close
type azBlock struct {
chunkNumber int
id string
}
// uploadMultipart uploads a file using multipart upload
// Implements the fs.ChunkWriter interface
type azChunkWriter struct {
chunkSize int64
size int64
f *Fs
ui uploadInfo
blocksMu sync.Mutex // protects the below
blocks []azBlock // list of blocks for finalize
binaryBlockID [8]byte // block counter as LSB first 8 bytes
o *Object
}
// OpenChunkWriter returns the chunk size and a ChunkWriter
//
// Write a larger blob, using CreateBlockBlob, PutBlock, and PutBlockList.
func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, size int64, blb *blockblob.Client, httpHeaders *blob.HTTPHeaders) (err error) {
// Calculate correct partSize
partSize := o.fs.opt.ChunkSize
totalParts := -1
// make concurrency machinery
concurrency := o.fs.opt.UploadConcurrency
if concurrency < 1 {
concurrency = 1
// Pass in the remote and the src object
// You can also use options to hint at the desired chunk size
func (f *Fs) OpenChunkWriter(ctx context.Context, remote string, src fs.ObjectInfo, options ...fs.OpenOption) (info fs.ChunkWriterInfo, writer fs.ChunkWriter, err error) {
// Temporary Object under construction
o := &Object{
fs: f,
remote: remote,
}
tokens := pacer.NewTokenDispenser(concurrency)
ui, err := o.prepareUpload(ctx, src, options)
if err != nil {
return info, nil, fmt.Errorf("failed to prepare upload: %w", err)
}
// Calculate correct partSize
partSize := f.opt.ChunkSize
totalParts := -1
size := src.Size()
// Note that the max size of file is 4.75 TB (100 MB X 50,000
// blocks) and this is bigger than the max uncommitted block
@@ -2013,13 +2028,13 @@ func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, size int64,
// 195GB which seems like a not too unreasonable limit.
if size == -1 {
warnStreamUpload.Do(func() {
fs.Logf(o, "Streaming uploads using chunk size %v will have maximum file size of %v",
o.fs.opt.ChunkSize, partSize*fs.SizeSuffix(blockblob.MaxBlocks))
fs.Logf(f, "Streaming uploads using chunk size %v will have maximum file size of %v",
f.opt.ChunkSize, partSize*fs.SizeSuffix(blockblob.MaxBlocks))
})
} else {
partSize = chunksize.Calculator(o, size, blockblob.MaxBlocks, o.fs.opt.ChunkSize)
partSize = chunksize.Calculator(remote, size, blockblob.MaxBlocks, f.opt.ChunkSize)
if partSize > fs.SizeSuffix(blockblob.MaxStageBlockBytes) {
return fmt.Errorf("can't upload as it is too big %v - takes more than %d chunks of %v", fs.SizeSuffix(size), fs.SizeSuffix(blockblob.MaxBlocks), fs.SizeSuffix(blockblob.MaxStageBlockBytes))
return info, nil, fmt.Errorf("can't upload as it is too big %v - takes more than %d chunks of %v", fs.SizeSuffix(size), fs.SizeSuffix(blockblob.MaxBlocks), fs.SizeSuffix(blockblob.MaxStageBlockBytes))
}
totalParts = int(fs.SizeSuffix(size) / partSize)
if fs.SizeSuffix(size)%partSize != 0 {
@@ -2029,173 +2044,264 @@ func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, size int64,
fs.Debugf(o, "Multipart upload session started for %d parts of size %v", totalParts, partSize)
// unwrap the accounting from the input, we use wrap to put it
// back on after the buffering
in, wrap := accounting.UnWrap(in)
// FIXME it would be nice to delete uncommitted blocks
// See: https://github.com/rclone/rclone/issues/5583
//
// However there doesn't seem to be an easy way of doing this other than
// by deleting the target.
//
// This means that a failed upload deletes the target which isn't ideal.
//
// Uploading a zero length blob and deleting it will remove the
// uncommitted blocks I think.
//
// Could check to see if a file exists already and if it
// doesn't then create a 0 length file and delete it to flush
// the uncommitted blocks.
//
// This is what azcopy does
// https://github.com/MicrosoftDocs/azure-docs/issues/36347#issuecomment-541457962
// defer atexit.OnError(&err, func() {
// fs.Debugf(o, "Cancelling multipart upload")
// // Code goes here!
// })()
// Upload the chunks
var (
g, gCtx = errgroup.WithContext(ctx)
remaining = fs.SizeSuffix(size) // remaining size in file for logging only, -1 if size < 0
position = fs.SizeSuffix(0) // position in file
memPool = o.fs.getMemoryPool(int64(partSize)) // pool to get memory from
finished = false // set when we have read EOF
blocks []string // list of blocks for finalize
binaryBlockID = make([]byte, 8) // block counter as LSB first 8 bytes
)
for part := 0; !finished; part++ {
// Get a block of memory from the pool and a token which limits concurrency
tokens.Get()
buf := memPool.Get()
free := func() {
memPool.Put(buf) // return the buf
tokens.Put() // return the token
}
// Fail fast, in case an errgroup managed function returns an error
// gCtx is cancelled. There is no point in uploading all the other parts.
if gCtx.Err() != nil {
free()
break
}
// Read the chunk
n, err := readers.ReadFill(in, buf) // this can never return 0, nil
if err == io.EOF {
if n == 0 { // end if no data
free()
break
}
finished = true
} else if err != nil {
free()
return fmt.Errorf("multipart upload failed to read source: %w", err)
}
buf = buf[:n]
// increment the blockID and save the blocks for finalize
increment(binaryBlockID)
blockID := base64.StdEncoding.EncodeToString(binaryBlockID)
blocks = append(blocks, blockID)
// Transfer the chunk
fs.Debugf(o, "Uploading part %d/%d offset %v/%v part size %d", part+1, totalParts, position, fs.SizeSuffix(size), len(buf))
g.Go(func() (err error) {
defer free()
// Upload the block, with MD5 for check
md5sum := md5.Sum(buf)
transactionalMD5 := md5sum[:]
err = o.fs.pacer.Call(func() (bool, error) {
bufferReader := bytes.NewReader(buf)
wrappedReader := wrap(bufferReader)
rs := readSeekCloser{wrappedReader, bufferReader}
options := blockblob.StageBlockOptions{
// Specify the transactional md5 for the body, to be validated by the service.
TransactionalValidation: blob.TransferValidationTypeMD5(transactionalMD5),
}
_, err = blb.StageBlock(ctx, blockID, &rs, &options)
return o.fs.shouldRetry(ctx, err)
})
if err != nil {
return fmt.Errorf("multipart upload failed to upload part: %w", err)
}
return nil
})
// ready for next block
if size >= 0 {
remaining -= partSize
}
position += partSize
chunkWriter := &azChunkWriter{
chunkSize: int64(partSize),
size: size,
f: f,
ui: ui,
o: o,
}
err = g.Wait()
info = fs.ChunkWriterInfo{
ChunkSize: int64(partSize),
Concurrency: o.fs.opt.UploadConcurrency,
//LeavePartsOnError: o.fs.opt.LeavePartsOnError,
}
fs.Debugf(o, "open chunk writer: started multipart upload")
return info, chunkWriter, nil
}
// WriteChunk will write chunk number with reader bytes, where chunk number >= 0
func (w *azChunkWriter) WriteChunk(ctx context.Context, chunkNumber int, reader io.ReadSeeker) (int64, error) {
if chunkNumber < 0 {
err := fmt.Errorf("invalid chunk number provided: %v", chunkNumber)
return -1, err
}
// Upload the block, with MD5 for check
m := md5.New()
currentChunkSize, err := io.Copy(m, reader)
if err != nil {
return err
return -1, err
}
options := blockblob.CommitBlockListOptions{
Metadata: o.getMetadata(),
Tier: parseTier(o.fs.opt.AccessTier),
HTTPHeaders: httpHeaders,
// If no data read, don't write the chunk
if currentChunkSize == 0 {
return 0, nil
}
md5sum := m.Sum(nil)
transactionalMD5 := md5sum[:]
// Finalise the upload session
err = o.fs.pacer.Call(func() (bool, error) {
_, err := blb.CommitBlockList(ctx, blocks, &options)
return o.fs.shouldRetry(ctx, err)
// increment the blockID and save the blocks for finalize
increment(&w.binaryBlockID)
blockID := base64.StdEncoding.EncodeToString(w.binaryBlockID[:])
// Save the blockID for the commit
w.blocksMu.Lock()
w.blocks = append(w.blocks, azBlock{
chunkNumber: chunkNumber,
id: blockID,
})
w.blocksMu.Unlock()
err = w.f.pacer.Call(func() (bool, error) {
// rewind the reader on retry and after reading md5
_, err = reader.Seek(0, io.SeekStart)
if err != nil {
return false, err
}
options := blockblob.StageBlockOptions{
// Specify the transactional md5 for the body, to be validated by the service.
TransactionalValidation: blob.TransferValidationTypeMD5(transactionalMD5),
}
_, err = w.ui.blb.StageBlock(ctx, blockID, &readSeekCloser{Reader: reader, Seeker: reader}, &options)
if err != nil {
if chunkNumber <= 8 {
return w.f.shouldRetry(ctx, err)
}
// retry all chunks once have done the first few
return true, err
}
return false, nil
})
if err != nil {
return fmt.Errorf("multipart upload failed to finalize: %w", err)
return -1, fmt.Errorf("failed to upload chunk %d with %v bytes: %w", chunkNumber+1, currentChunkSize, err)
}
fs.Debugf(w.o, "multipart upload wrote chunk %d with %v bytes", chunkNumber+1, currentChunkSize)
return currentChunkSize, err
}
// Abort the multipart upload.
//
// FIXME it would be nice to delete uncommitted blocks.
//
// See: https://github.com/rclone/rclone/issues/5583
//
// However there doesn't seem to be an easy way of doing this other than
// by deleting the target.
//
// This means that a failed upload deletes the target which isn't ideal.
//
// Uploading a zero length blob and deleting it will remove the
// uncommitted blocks I think.
//
// Could check to see if a file exists already and if it doesn't then
// create a 0 length file and delete it to flush the uncommitted
// blocks.
//
// This is what azcopy does
// https://github.com/MicrosoftDocs/azure-docs/issues/36347#issuecomment-541457962
func (w *azChunkWriter) Abort(ctx context.Context) error {
fs.Debugf(w.o, "multipart upload aborted (did nothing - see issue #5583)")
return nil
}
// Close and finalise the multipart upload
func (w *azChunkWriter) Close(ctx context.Context) (err error) {
// sort the completed parts by part number
sort.Slice(w.blocks, func(i, j int) bool {
return w.blocks[i].chunkNumber < w.blocks[j].chunkNumber
})
// Create a list of block IDs
blockIDs := make([]string, len(w.blocks))
for i := range w.blocks {
blockIDs[i] = w.blocks[i].id
}
options := blockblob.CommitBlockListOptions{
Metadata: w.o.getMetadata(),
Tier: parseTier(w.f.opt.AccessTier),
HTTPHeaders: &w.ui.httpHeaders,
}
// Finalise the upload session
err = w.f.pacer.Call(func() (bool, error) {
_, err := w.ui.blb.CommitBlockList(ctx, blockIDs, &options)
return w.f.shouldRetry(ctx, err)
})
if err != nil {
return fmt.Errorf("failed to complete multipart upload: %w", err)
}
fs.Debugf(w.o, "multipart upload finished")
return err
}
var warnStreamUpload sync.Once
// uploadMultipart uploads a file using multipart upload
//
// Write a larger blob, using CreateBlockBlob, PutBlock, and PutBlockList.
func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (ui uploadInfo, err error) {
chunkWriter, err := multipart.UploadMultipart(ctx, src, in, multipart.UploadMultipartOptions{
Open: o.fs,
OpenOptions: options,
})
if err != nil {
return ui, err
}
return chunkWriter.(*azChunkWriter).ui, nil
}
// uploadSinglepart uploads a short blob using a single part upload
func (o *Object) uploadSinglepart(ctx context.Context, in io.Reader, size int64, blb *blockblob.Client, httpHeaders *blob.HTTPHeaders) (err error) {
func (o *Object) uploadSinglepart(ctx context.Context, in io.Reader, size int64, ui uploadInfo) (err error) {
chunkSize := int64(o.fs.opt.ChunkSize)
// fs.Debugf(o, "Single part upload starting of object %d bytes", size)
if size > o.fs.poolSize || size < 0 {
return fmt.Errorf("internal error: single part upload size too big %d > %d", size, o.fs.opt.ChunkSize)
if size > chunkSize || size < 0 {
return fmt.Errorf("internal error: single part upload size too big %d > %d", size, chunkSize)
}
buf := o.fs.pool.Get()
defer o.fs.pool.Put(buf)
rw := multipart.NewRW()
defer fs.CheckClose(rw, &err)
n, err := readers.ReadFill(in, buf)
if err == nil {
// Check to see whether in is exactly len(buf) or bigger
var buf2 = []byte{0}
n2, err2 := readers.ReadFill(in, buf2)
if n2 != 0 || err2 != io.EOF {
return fmt.Errorf("single part upload read failed: object longer than expected (expecting %d but got > %d)", size, len(buf))
}
}
n, err := io.CopyN(rw, in, size+1)
if err != nil && err != io.EOF {
return fmt.Errorf("single part upload read failed: %w", err)
}
if int64(n) != size {
if n != size {
return fmt.Errorf("single part upload: expecting to read %d bytes but read %d", size, n)
}
b := bytes.NewReader(buf[:n])
rs := &readSeekCloser{Reader: b, Seeker: b}
rs := &readSeekCloser{Reader: rw, Seeker: rw}
options := blockblob.UploadOptions{
Metadata: o.getMetadata(),
Tier: parseTier(o.fs.opt.AccessTier),
HTTPHeaders: httpHeaders,
HTTPHeaders: &ui.httpHeaders,
}
// Don't retry, return a retry error instead
return o.fs.pacer.CallNoRetry(func() (bool, error) {
_, err = blb.Upload(ctx, rs, &options)
return o.fs.pacer.Call(func() (bool, error) {
// rewind the reader on retry
_, err = rs.Seek(0, io.SeekStart)
if err != nil {
return false, err
}
_, err = ui.blb.Upload(ctx, rs, &options)
return o.fs.shouldRetry(ctx, err)
})
}
// Info needed for an upload
type uploadInfo struct {
blb *blockblob.Client
httpHeaders blob.HTTPHeaders
isDirMarker bool
}
// Prepare the object for upload
func (o *Object) prepareUpload(ctx context.Context, src fs.ObjectInfo, options []fs.OpenOption) (ui uploadInfo, err error) {
container, containerPath := o.split()
if container == "" || containerPath == "" {
return ui, fmt.Errorf("can't upload to root - need a container")
}
// Create parent dir/bucket if not saving directory marker
metadataMu.Lock()
_, ui.isDirMarker = o.meta[dirMetaKey]
metadataMu.Unlock()
if !ui.isDirMarker {
err = o.fs.mkdirParent(ctx, o.remote)
if err != nil {
return ui, err
}
}
// Update Mod time
o.updateMetadataWithModTime(src.ModTime(ctx))
if err != nil {
return ui, err
}
// Create the HTTP headers for the upload
ui.httpHeaders = blob.HTTPHeaders{
BlobContentType: pString(fs.MimeType(ctx, src)),
}
// Compute the Content-MD5 of the file. As we stream all uploads it
// will be set in PutBlockList API call using the 'x-ms-blob-content-md5' header
if !o.fs.opt.DisableCheckSum {
if sourceMD5, _ := src.Hash(ctx, hash.MD5); sourceMD5 != "" {
sourceMD5bytes, err := hex.DecodeString(sourceMD5)
if err == nil {
ui.httpHeaders.BlobContentMD5 = sourceMD5bytes
} else {
fs.Debugf(o, "Failed to decode %q as MD5: %v", sourceMD5, err)
}
}
}
// Apply upload options (also allows one to overwrite content-type)
for _, option := range options {
key, value := option.Header()
lowerKey := strings.ToLower(key)
switch lowerKey {
case "":
// ignore
case "cache-control":
ui.httpHeaders.BlobCacheControl = pString(value)
case "content-disposition":
ui.httpHeaders.BlobContentDisposition = pString(value)
case "content-encoding":
ui.httpHeaders.BlobContentEncoding = pString(value)
case "content-language":
ui.httpHeaders.BlobContentLanguage = pString(value)
case "content-type":
ui.httpHeaders.BlobContentType = pString(value)
}
}
ui.blb = o.fs.getBlockBlobSVC(container, containerPath)
return ui, nil
}
// Update the object with the contents of the io.Reader, modTime and size
//
// The new object may have been created if an error is returned
@@ -2211,80 +2317,26 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return errCantUpdateArchiveTierBlobs
}
}
container, containerPath := o.split()
if container == "" || containerPath == "" {
return fmt.Errorf("can't upload to root - need a container")
}
// Create parent dir/bucket if not saving directory marker
_, isDirMarker := o.meta[dirMetaKey]
if !isDirMarker {
err = o.fs.mkdirParent(ctx, o.remote)
if err != nil {
return err
}
}
// Update Mod time
fs.Debugf(nil, "o.meta = %+v", o.meta)
o.updateMetadataWithModTime(src.ModTime(ctx))
if err != nil {
return err
}
// Create the HTTP headers for the upload
httpHeaders := blob.HTTPHeaders{
BlobContentType: pString(fs.MimeType(ctx, src)),
}
// Compute the Content-MD5 of the file. As we stream all uploads it
// will be set in PutBlockList API call using the 'x-ms-blob-content-md5' header
if !o.fs.opt.DisableCheckSum {
if sourceMD5, _ := src.Hash(ctx, hash.MD5); sourceMD5 != "" {
sourceMD5bytes, err := hex.DecodeString(sourceMD5)
if err == nil {
httpHeaders.BlobContentMD5 = sourceMD5bytes
} else {
fs.Debugf(o, "Failed to decode %q as MD5: %v", sourceMD5, err)
}
}
}
// Apply upload options (also allows one to overwrite content-type)
for _, option := range options {
key, value := option.Header()
lowerKey := strings.ToLower(key)
switch lowerKey {
case "":
// ignore
case "cache-control":
httpHeaders.BlobCacheControl = pString(value)
case "content-disposition":
httpHeaders.BlobContentDisposition = pString(value)
case "content-encoding":
httpHeaders.BlobContentEncoding = pString(value)
case "content-language":
httpHeaders.BlobContentLanguage = pString(value)
case "content-type":
httpHeaders.BlobContentType = pString(value)
}
}
blb := o.fs.getBlockBlobSVC(container, containerPath)
size := src.Size()
multipartUpload := size < 0 || size > o.fs.poolSize
multipartUpload := size < 0 || size > int64(o.fs.opt.ChunkSize)
var ui uploadInfo
fs.Debugf(nil, "o.meta = %+v", o.meta)
if multipartUpload {
err = o.uploadMultipart(ctx, in, size, blb, &httpHeaders)
ui, err = o.uploadMultipart(ctx, in, src, options...)
} else {
err = o.uploadSinglepart(ctx, in, size, blb, &httpHeaders)
ui, err = o.prepareUpload(ctx, src, options)
if err != nil {
return fmt.Errorf("failed to prepare upload: %w", err)
}
err = o.uploadSinglepart(ctx, in, size, ui)
}
if err != nil {
return err
}
// Refresh metadata on object
if !isDirMarker {
if !ui.isDirMarker {
o.clearMetaData()
err = o.readMetaData(ctx)
if err != nil {
@@ -2373,13 +2425,14 @@ func parseTier(tier string) *blob.AccessTier {
// Check the interfaces are satisfied
var (
_ fs.Fs = &Fs{}
_ fs.Copier = &Fs{}
_ fs.PutStreamer = &Fs{}
_ fs.Purger = &Fs{}
_ fs.ListRer = &Fs{}
_ fs.Object = &Object{}
_ fs.MimeTyper = &Object{}
_ fs.GetTierer = &Object{}
_ fs.SetTierer = &Object{}
_ fs.Fs = &Fs{}
_ fs.Copier = &Fs{}
_ fs.PutStreamer = &Fs{}
_ fs.Purger = &Fs{}
_ fs.ListRer = &Fs{}
_ fs.OpenChunkWriter = &Fs{}
_ fs.Object = &Object{}
_ fs.MimeTyper = &Object{}
_ fs.GetTierer = &Object{}
_ fs.SetTierer = &Object{}
)

View File

@@ -1,5 +1,5 @@
//go:build !plan9 && !solaris && !js && go1.18
// +build !plan9,!solaris,!js,go1.18
//go:build !plan9 && !solaris && !js
// +build !plan9,!solaris,!js
package azureblob
@@ -20,17 +20,18 @@ func (f *Fs) InternalTest(t *testing.T) {
func TestIncrement(t *testing.T) {
for _, test := range []struct {
in []byte
want []byte
in [8]byte
want [8]byte
}{
{[]byte{0, 0, 0, 0}, []byte{1, 0, 0, 0}},
{[]byte{0xFE, 0, 0, 0}, []byte{0xFF, 0, 0, 0}},
{[]byte{0xFF, 0, 0, 0}, []byte{0, 1, 0, 0}},
{[]byte{0, 1, 0, 0}, []byte{1, 1, 0, 0}},
{[]byte{0xFF, 0xFF, 0xFF, 0xFE}, []byte{0, 0, 0, 0xFF}},
{[]byte{0xFF, 0xFF, 0xFF, 0xFF}, []byte{0, 0, 0, 0}},
{[8]byte{0, 0, 0, 0}, [8]byte{1, 0, 0, 0}},
{[8]byte{0xFE, 0, 0, 0}, [8]byte{0xFF, 0, 0, 0}},
{[8]byte{0xFF, 0, 0, 0}, [8]byte{0, 1, 0, 0}},
{[8]byte{0, 1, 0, 0}, [8]byte{1, 1, 0, 0}},
{[8]byte{0xFF, 0xFF, 0xFF, 0xFE}, [8]byte{0, 0, 0, 0xFF}},
{[8]byte{0xFF, 0xFF, 0xFF, 0xFF}, [8]byte{0, 0, 0, 0, 1}},
{[8]byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, [8]byte{0, 0, 0, 0, 0, 0, 0}},
} {
increment(test.in)
increment(&test.in)
assert.Equal(t, test.want, test.in)
}
}

View File

@@ -1,7 +1,7 @@
// Test AzureBlob filesystem interface
//go:build !plan9 && !solaris && !js && go1.18
// +build !plan9,!solaris,!js,go1.18
//go:build !plan9 && !solaris && !js
// +build !plan9,!solaris,!js
package azureblob
@@ -19,7 +19,7 @@ func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestAzureBlob:",
NilObject: (*Object)(nil),
TiersToTest: []string{"Hot", "Cool"},
TiersToTest: []string{"Hot", "Cool", "Cold"},
ChunkedUpload: fstests.ChunkedUploadConfig{
MinChunkSize: defaultChunkSize,
},
@@ -31,11 +31,11 @@ func TestIntegration2(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
name := "TestAzureBlob:"
name := "TestAzureBlob"
fstests.Run(t, &fstests.Opt{
RemoteName: name,
RemoteName: name + ":",
NilObject: (*Object)(nil),
TiersToTest: []string{"Hot", "Cool"},
TiersToTest: []string{"Hot", "Cool", "Cold"},
ChunkedUpload: fstests.ChunkedUploadConfig{
MinChunkSize: defaultChunkSize,
},
@@ -62,6 +62,7 @@ func TestValidateAccessTier(t *testing.T) {
"HOT": {"HOT", true},
"Hot": {"Hot", true},
"cool": {"cool", true},
"cold": {"cold", true},
"archive": {"archive", true},
"empty": {"", false},
"unknown": {"unknown", false},

View File

@@ -1,7 +1,7 @@
// Build for azureblob for unsupported platforms to stop go complaining
// about "no buildable Go source files "
//go:build plan9 || solaris || js || !go1.18
// +build plan9 solaris js !go1.18
//go:build plan9 || solaris || js
// +build plan9 solaris js
package azureblob

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,70 @@
//go:build !plan9 && !js
// +build !plan9,!js
package azurefiles
import (
"context"
"math/rand"
"strings"
"testing"
"github.com/rclone/rclone/fstest/fstests"
"github.com/stretchr/testify/assert"
)
func (f *Fs) InternalTest(t *testing.T) {
t.Run("Authentication", f.InternalTestAuth)
}
var _ fstests.InternalTester = (*Fs)(nil)
func (f *Fs) InternalTestAuth(t *testing.T) {
t.Skip("skipping since this requires authentication credentials which are not part of repo")
shareName := "test-rclone-oct-2023"
testCases := []struct {
name string
options *Options
}{
{
name: "ConnectionString",
options: &Options{
ShareName: shareName,
ConnectionString: "",
},
},
{
name: "AccountAndKey",
options: &Options{
ShareName: shareName,
Account: "",
Key: "",
}},
{
name: "SASUrl",
options: &Options{
ShareName: shareName,
SASURL: "",
}},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
fs, err := newFsFromOptions(context.TODO(), "TestAzureFiles", "", tc.options)
assert.NoError(t, err)
dirName := randomString(10)
assert.NoError(t, fs.Mkdir(context.TODO(), dirName))
})
}
}
const chars = "abcdefghijklmnopqrstuvwzyxABCDEFGHIJKLMNOPQRSTUVWZYX"
func randomString(charCount int) string {
strBldr := strings.Builder{}
for i := 0; i < charCount; i++ {
randPos := rand.Int63n(52)
strBldr.WriteByte(chars[randPos])
}
return strBldr.String()
}

View File

@@ -0,0 +1,18 @@
//go:build !plan9 && !js
// +build !plan9,!js
package azurefiles
import (
"testing"
"github.com/rclone/rclone/fstest/fstests"
)
func TestIntegration(t *testing.T) {
var objPtr *Object
fstests.Run(t, &fstests.Opt{
RemoteName: "TestAzureFiles:",
NilObject: objPtr,
})
}

View File

@@ -0,0 +1,7 @@
// Build for azurefiles for unsupported platforms to stop go complaining
// about "no buildable Go source files "
//go:build plan9 || js
// +build plan9 js
package azurefiles

View File

@@ -33,10 +33,18 @@ var _ fserrors.Fataler = (*Error)(nil)
// Bucket describes a B2 bucket
type Bucket struct {
ID string `json:"bucketId"`
AccountID string `json:"accountId"`
Name string `json:"bucketName"`
Type string `json:"bucketType"`
ID string `json:"bucketId"`
AccountID string `json:"accountId"`
Name string `json:"bucketName"`
Type string `json:"bucketType"`
LifecycleRules []LifecycleRule `json:"lifecycleRules,omitempty"`
}
// LifecycleRule is a single lifecycle rule
type LifecycleRule struct {
DaysFromHidingToDeleting *int `json:"daysFromHidingToDeleting"`
DaysFromUploadingToHiding *int `json:"daysFromUploadingToHiding"`
FileNamePrefix string `json:"fileNamePrefix"`
}
// Timestamp is a UTC time when this file was uploaded. It is a base
@@ -206,9 +214,10 @@ type FileInfo struct {
// CreateBucketRequest is used to create a bucket
type CreateBucketRequest struct {
AccountID string `json:"accountId"`
Name string `json:"bucketName"`
Type string `json:"bucketType"`
AccountID string `json:"accountId"`
Name string `json:"bucketName"`
Type string `json:"bucketType"`
LifecycleRules []LifecycleRule `json:"lifecycleRules,omitempty"`
}
// DeleteBucketRequest is used to create a bucket
@@ -331,3 +340,11 @@ type CopyPartRequest struct {
PartNumber int64 `json:"partNumber"` // Which part this is (starting from 1)
Range string `json:"range,omitempty"` // The range of bytes to copy. If not provided, the whole source file will be copied.
}
// UpdateBucketRequest describes a request to modify a B2 bucket
type UpdateBucketRequest struct {
ID string `json:"bucketId"`
AccountID string `json:"accountId"`
Type string `json:"bucketType,omitempty"`
LifecycleRules []LifecycleRule `json:"lifecycleRules,omitempty"`
}

View File

@@ -9,6 +9,7 @@ import (
"bytes"
"context"
"crypto/sha1"
"encoding/json"
"errors"
"fmt"
gohash "hash"
@@ -32,6 +33,7 @@ import (
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/bucket"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/multipart"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/pool"
"github.com/rclone/rclone/lib/rest"
@@ -57,9 +59,7 @@ const (
minChunkSize = 5 * fs.Mebi
defaultChunkSize = 96 * fs.Mebi
defaultUploadCutoff = 200 * fs.Mebi
largeFileCopyCutoff = 4 * fs.Gibi // 5E9 is the max
memoryPoolFlushTime = fs.Duration(time.Minute) // flush the cached buffers after this long
memoryPoolUseMmap = false
largeFileCopyCutoff = 4 * fs.Gibi // 5E9 is the max
)
// Globals
@@ -74,14 +74,17 @@ func init() {
Name: "b2",
Description: "Backblaze B2",
NewFs: NewFs,
CommandHelp: commandHelp,
Options: []fs.Option{{
Name: "account",
Help: "Account ID or Application Key ID.",
Required: true,
Name: "account",
Help: "Account ID or Application Key ID.",
Required: true,
Sensitive: true,
}, {
Name: "key",
Help: "Application Key.",
Required: true,
Name: "key",
Help: "Application Key.",
Required: true,
Sensitive: true,
}, {
Name: "endpoint",
Help: "Endpoint for the service.\n\nLeave blank normally.",
@@ -147,6 +150,18 @@ might a maximum of "--transfers" chunks in progress at once.
5,000,000 Bytes is the minimum size.`,
Default: defaultChunkSize,
Advanced: true,
}, {
Name: "upload_concurrency",
Help: `Concurrency for multipart uploads.
This is the number of chunks of the same file that are uploaded
concurrently.
Note that chunks are stored in memory and there may be up to
"--transfers" * "--b2-upload-concurrency" chunks stored at once
in memory.`,
Default: 4,
Advanced: true,
}, {
Name: "disable_checksum",
Help: `Disable checksums for large (> upload cutoff) files.
@@ -186,16 +201,41 @@ The minimum value is 1 second. The maximum value is one week.`,
Advanced: true,
}, {
Name: "memory_pool_flush_time",
Default: memoryPoolFlushTime,
Default: fs.Duration(time.Minute),
Advanced: true,
Help: `How often internal memory buffer pools will be flushed.
Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations.
This option controls how often unused buffers will be removed from the pool.`,
Hide: fs.OptionHideBoth,
Help: `How often internal memory buffer pools will be flushed. (no longer used)`,
}, {
Name: "memory_pool_use_mmap",
Default: memoryPoolUseMmap,
Default: false,
Advanced: true,
Hide: fs.OptionHideBoth,
Help: `Whether to use mmap buffers in internal memory pool. (no longer used)`,
}, {
Name: "lifecycle",
Help: `Set the number of days deleted files should be kept when creating a bucket.
On bucket creation, this parameter is used to create a lifecycle rule
for the entire bucket.
If lifecycle is 0 (the default) it does not create a lifecycle rule so
the default B2 behaviour applies. This is to create versions of files
on delete and overwrite and to keep them indefinitely.
If lifecycle is >0 then it creates a single rule setting the number of
days before a file that is deleted or overwritten is deleted
permanently. This is known as daysFromHidingToDeleting in the b2 docs.
The minimum value for this parameter is 1 day.
You can also enable hard_delete in the config also which will mean
deletions won't cause versions but overwrites will still cause
versions to be made.
See: [rclone backend lifecycle](#lifecycle) for setting lifecycles after bucket creation.
`,
Default: 0,
Advanced: true,
Help: `Whether to use mmap buffers in internal memory pool.`,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
@@ -222,11 +262,11 @@ type Options struct {
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
CopyCutoff fs.SizeSuffix `config:"copy_cutoff"`
ChunkSize fs.SizeSuffix `config:"chunk_size"`
UploadConcurrency int `config:"upload_concurrency"`
DisableCheckSum bool `config:"disable_checksum"`
DownloadURL string `config:"download_url"`
DownloadAuthorizationDuration fs.Duration `config:"download_auth_duration"`
MemoryPoolFlushTime fs.Duration `config:"memory_pool_flush_time"`
MemoryPoolUseMmap bool `config:"memory_pool_use_mmap"`
Lifecycle int `config:"lifecycle"`
Enc encoder.MultiEncoder `config:"encoding"`
}
@@ -251,7 +291,6 @@ type Fs struct {
authMu sync.Mutex // lock for authorizing the account
pacer *fs.Pacer // To pace and retry the API calls
uploadToken *pacer.TokenDispenser // control concurrency
pool *pool.Pool // memory pool
}
// Object describes a b2 object
@@ -361,11 +400,18 @@ func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error) (b
// errorHandler parses a non 2xx error response into an error
func errorHandler(resp *http.Response) error {
// Decode error response
errResponse := new(api.Error)
err := rest.DecodeJSON(resp, &errResponse)
body, err := rest.ReadBody(resp)
if err != nil {
fs.Debugf(nil, "Couldn't decode error response: %v", err)
fs.Errorf(nil, "Couldn't read error out of body: %v", err)
body = nil
}
// Decode error response if there was one - they can be blank
errResponse := new(api.Error)
if len(body) > 0 {
err = json.Unmarshal(body, errResponse)
if err != nil {
fs.Errorf(nil, "Couldn't decode error response: %v", err)
}
}
if errResponse.Code == "" {
errResponse.Code = "unknown"
@@ -409,6 +455,14 @@ func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
return
}
func (f *Fs) setCopyCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
err = checkUploadChunkSize(cs)
if err == nil {
old, f.opt.CopyCutoff = f.opt.CopyCutoff, cs
}
return
}
// setRoot changes the root of the Fs
func (f *Fs) setRoot(root string) {
f.root = parsePath(root)
@@ -456,19 +510,14 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
uploads: make(map[string][]*api.GetUploadURLResponse),
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
uploadToken: pacer.NewTokenDispenser(ci.Transfers),
pool: pool.New(
time.Duration(opt.MemoryPoolFlushTime),
int(opt.ChunkSize),
ci.Transfers,
opt.MemoryPoolUseMmap,
),
}
f.setRoot(root)
f.features = (&fs.Features{
ReadMimeType: true,
WriteMimeType: true,
BucketBased: true,
BucketBasedRootOK: true,
ReadMimeType: true,
WriteMimeType: true,
BucketBased: true,
BucketBasedRootOK: true,
ChunkWriterDoesntSeek: true,
}).Fill(ctx, f)
// Set the test flag if required
if opt.TestMode != "" {
@@ -595,23 +644,24 @@ func (f *Fs) clearUploadURL(bucketID string) {
f.uploadMu.Unlock()
}
// getBuf gets a buffer of f.opt.ChunkSize and an upload token
// getRW gets a RW buffer and an upload token
//
// If noBuf is set then it just gets an upload token
func (f *Fs) getBuf(noBuf bool) (buf []byte) {
func (f *Fs) getRW(noBuf bool) (rw *pool.RW) {
f.uploadToken.Get()
if !noBuf {
buf = f.pool.Get()
rw = multipart.NewRW()
}
return buf
return rw
}
// putBuf returns a buffer to the memory pool and an upload token
// putRW returns a RW buffer to the memory pool and returns an upload
// token
//
// If noBuf is set then it just returns the upload token
func (f *Fs) putBuf(buf []byte, noBuf bool) {
if !noBuf {
f.pool.Put(buf)
// If buf is nil then it just returns the upload token
func (f *Fs) putRW(rw *pool.RW) {
if rw != nil {
_ = rw.Close()
}
f.uploadToken.Put()
}
@@ -818,7 +868,7 @@ func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addB
// listBuckets returns all the buckets to out
func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error) {
err = f.listBucketsToFn(ctx, func(bucket *api.Bucket) error {
err = f.listBucketsToFn(ctx, "", func(bucket *api.Bucket) error {
d := fs.NewDir(bucket.Name, time.Time{})
entries = append(entries, d)
return nil
@@ -911,11 +961,14 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
type listBucketFn func(*api.Bucket) error
// listBucketsToFn lists the buckets to the function supplied
func (f *Fs) listBucketsToFn(ctx context.Context, fn listBucketFn) error {
func (f *Fs) listBucketsToFn(ctx context.Context, bucketName string, fn listBucketFn) error {
var account = api.ListBucketsRequest{
AccountID: f.info.AccountID,
BucketID: f.info.Allowed.BucketID,
}
if bucketName != "" && account.BucketID == "" {
account.BucketName = f.opt.Enc.FromStandardName(bucketName)
}
var response api.ListBucketsResponse
opts := rest.Opts{
@@ -961,7 +1014,7 @@ func (f *Fs) getbucketType(ctx context.Context, bucket string) (bucketType strin
if bucketType != "" {
return bucketType, nil
}
err = f.listBucketsToFn(ctx, func(bucket *api.Bucket) error {
err = f.listBucketsToFn(ctx, bucket, func(bucket *api.Bucket) error {
// listBucketsToFn reads bucket Types
return nil
})
@@ -996,7 +1049,7 @@ func (f *Fs) getBucketID(ctx context.Context, bucket string) (bucketID string, e
if bucketID != "" {
return bucketID, nil
}
err = f.listBucketsToFn(ctx, func(bucket *api.Bucket) error {
err = f.listBucketsToFn(ctx, bucket, func(bucket *api.Bucket) error {
// listBucketsToFn sets IDs
return nil
})
@@ -1060,6 +1113,11 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) error {
Name: f.opt.Enc.FromStandardName(bucket),
Type: "allPrivate",
}
if f.opt.Lifecycle > 0 {
request.LifecycleRules = []api.LifecycleRule{{
DaysFromHidingToDeleting: &f.opt.Lifecycle,
}}
}
var response api.Bucket
err := f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CallJSON(ctx, &opts, &request, &response)
@@ -1280,7 +1338,7 @@ func (f *Fs) CleanUp(ctx context.Context) error {
// If newInfo is nil then the metadata will be copied otherwise it
// will be replaced with newInfo
func (f *Fs) copy(ctx context.Context, dstObj *Object, srcObj *Object, newInfo *api.File) (err error) {
if srcObj.size >= int64(f.opt.CopyCutoff) {
if srcObj.size > int64(f.opt.CopyCutoff) {
if newInfo == nil {
newInfo, err = srcObj.getMetaData(ctx)
if err != nil {
@@ -1291,7 +1349,11 @@ func (f *Fs) copy(ctx context.Context, dstObj *Object, srcObj *Object, newInfo *
if err != nil {
return err
}
return up.Upload(ctx)
err = up.Copy(ctx)
if err != nil {
return err
}
return dstObj.decodeMetaDataFileInfo(up.info)
}
dstBucket, dstPath := dstObj.split()
@@ -1420,7 +1482,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
if err != nil {
return "", err
}
absPath := "/" + bucketPath
absPath := "/" + urlEncode(bucketPath)
link = RootURL + "/file/" + urlEncode(bucket) + absPath
bucketType, err := f.getbucketType(ctx, bucket)
if err != nil {
@@ -1859,11 +1921,11 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
if err != nil {
return err
}
if size == -1 {
if size < 0 {
// Check if the file is large enough for a chunked upload (needs to be at least two chunks)
buf := o.fs.getBuf(false)
rw := o.fs.getRW(false)
n, err := io.ReadFull(in, buf)
n, err := io.CopyN(rw, in, int64(o.fs.opt.ChunkSize))
if err == nil {
bufReader := bufio.NewReader(in)
in = bufReader
@@ -1874,26 +1936,34 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
fs.Debugf(o, "File is big enough for chunked streaming")
up, err := o.fs.newLargeUpload(ctx, o, in, src, o.fs.opt.ChunkSize, false, nil)
if err != nil {
o.fs.putBuf(buf, false)
o.fs.putRW(rw)
return err
}
// NB Stream returns the buffer and token
return up.Stream(ctx, buf)
} else if err == io.EOF || err == io.ErrUnexpectedEOF {
err = up.Stream(ctx, rw)
if err != nil {
return err
}
return o.decodeMetaDataFileInfo(up.info)
} else if err == io.EOF {
fs.Debugf(o, "File has %d bytes, which makes only one chunk. Using direct upload.", n)
defer o.fs.putBuf(buf, false)
size = int64(n)
in = bytes.NewReader(buf[:n])
defer o.fs.putRW(rw)
size = n
in = rw
} else {
o.fs.putBuf(buf, false)
o.fs.putRW(rw)
return err
}
} else if size > int64(o.fs.opt.UploadCutoff) {
up, err := o.fs.newLargeUpload(ctx, o, in, src, o.fs.opt.ChunkSize, false, nil)
chunkWriter, err := multipart.UploadMultipart(ctx, src, in, multipart.UploadMultipartOptions{
Open: o.fs,
OpenOptions: options,
})
if err != nil {
return err
}
return up.Upload(ctx)
up := chunkWriter.(*largeUpload)
return o.decodeMetaDataFileInfo(up.info)
}
modTime := src.ModTime(ctx)
@@ -2001,6 +2071,41 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return o.decodeMetaDataFileInfo(&response)
}
// OpenChunkWriter returns the chunk size and a ChunkWriter
//
// Pass in the remote and the src object
// You can also use options to hint at the desired chunk size
func (f *Fs) OpenChunkWriter(ctx context.Context, remote string, src fs.ObjectInfo, options ...fs.OpenOption) (info fs.ChunkWriterInfo, writer fs.ChunkWriter, err error) {
// FIXME what if file is smaller than 1 chunk?
if f.opt.Versions {
return info, nil, errNotWithVersions
}
if f.opt.VersionAt.IsSet() {
return info, nil, errNotWithVersionAt
}
//size := src.Size()
// Temporary Object under construction
o := &Object{
fs: f,
remote: remote,
}
bucket, _ := o.split()
err = f.makeBucket(ctx, bucket)
if err != nil {
return info, nil, err
}
info = fs.ChunkWriterInfo{
ChunkSize: int64(f.opt.ChunkSize),
Concurrency: o.fs.opt.UploadConcurrency,
//LeavePartsOnError: o.fs.opt.LeavePartsOnError,
}
up, err := f.newLargeUpload(ctx, o, nil, src, f.opt.ChunkSize, false, nil)
return info, up, err
}
// Remove an object
func (o *Object) Remove(ctx context.Context) error {
bucket, bucketPath := o.split()
@@ -2026,16 +2131,149 @@ func (o *Object) ID() string {
return o.id
}
var lifecycleHelp = fs.CommandHelp{
Name: "lifecycle",
Short: "Read or set the lifecycle for a bucket",
Long: `This command can be used to read or set the lifecycle for a bucket.
Usage Examples:
To show the current lifecycle rules:
rclone backend lifecycle b2:bucket
This will dump something like this showing the lifecycle rules.
[
{
"daysFromHidingToDeleting": 1,
"daysFromUploadingToHiding": null,
"fileNamePrefix": ""
}
]
If there are no lifecycle rules (the default) then it will just return [].
To reset the current lifecycle rules:
rclone backend lifecycle b2:bucket -o daysFromHidingToDeleting=30
rclone backend lifecycle b2:bucket -o daysFromUploadingToHiding=5 -o daysFromHidingToDeleting=1
This will run and then print the new lifecycle rules as above.
Rclone only lets you set lifecycles for the whole bucket with the
fileNamePrefix = "".
You can't disable versioning with B2. The best you can do is to set
the daysFromHidingToDeleting to 1 day. You can enable hard_delete in
the config also which will mean deletions won't cause versions but
overwrites will still cause versions to be made.
rclone backend lifecycle b2:bucket -o daysFromHidingToDeleting=1
See: https://www.backblaze.com/docs/cloud-storage-lifecycle-rules
`,
Opts: map[string]string{
"daysFromHidingToDeleting": "After a file has been hidden for this many days it is deleted. 0 is off.",
"daysFromUploadingToHiding": "This many days after uploading a file is hidden",
},
}
func (f *Fs) lifecycleCommand(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
var newRule api.LifecycleRule
if daysStr := opt["daysFromHidingToDeleting"]; daysStr != "" {
days, err := strconv.Atoi(daysStr)
if err != nil {
return nil, fmt.Errorf("bad daysFromHidingToDeleting: %w", err)
}
newRule.DaysFromHidingToDeleting = &days
}
if daysStr := opt["daysFromUploadingToHiding"]; daysStr != "" {
days, err := strconv.Atoi(daysStr)
if err != nil {
return nil, fmt.Errorf("bad daysFromUploadingToHiding: %w", err)
}
newRule.DaysFromUploadingToHiding = &days
}
bucketName, _ := f.split("")
if bucketName == "" {
return nil, errors.New("bucket required")
}
var bucket *api.Bucket
if newRule.DaysFromHidingToDeleting != nil || newRule.DaysFromUploadingToHiding != nil {
bucketID, err := f.getBucketID(ctx, bucketName)
if err != nil {
return nil, err
}
opts := rest.Opts{
Method: "POST",
Path: "/b2_update_bucket",
}
var request = api.UpdateBucketRequest{
ID: bucketID,
AccountID: f.info.AccountID,
LifecycleRules: []api.LifecycleRule{newRule},
}
var response api.Bucket
err = f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CallJSON(ctx, &opts, &request, &response)
return f.shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, err
}
bucket = &response
} else {
err = f.listBucketsToFn(ctx, bucketName, func(b *api.Bucket) error {
bucket = b
return nil
})
if err != nil {
return nil, err
}
}
if bucket == nil {
return nil, fs.ErrorDirNotFound
}
return bucket.LifecycleRules, nil
}
var commandHelp = []fs.CommandHelp{
lifecycleHelp,
}
// Command the backend to run a named command
//
// The command run is name
// args may be used to read arguments from
// opts may be used to read optional arguments from
//
// The result should be capable of being JSON encoded
// If it is a string or a []string it will be shown to the user
// otherwise it will be JSON encoded and shown to the user like that
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
switch name {
case "lifecycle":
return f.lifecycleCommand(ctx, name, arg, opt)
default:
return nil, fs.ErrorCommandNotFound
}
}
// Check the interfaces are satisfied
var (
_ fs.Fs = &Fs{}
_ fs.Purger = &Fs{}
_ fs.Copier = &Fs{}
_ fs.PutStreamer = &Fs{}
_ fs.CleanUpper = &Fs{}
_ fs.ListRer = &Fs{}
_ fs.PublicLinker = &Fs{}
_ fs.Object = &Object{}
_ fs.MimeTyper = &Object{}
_ fs.IDer = &Object{}
_ fs.Fs = &Fs{}
_ fs.Purger = &Fs{}
_ fs.Copier = &Fs{}
_ fs.PutStreamer = &Fs{}
_ fs.CleanUpper = &Fs{}
_ fs.ListRer = &Fs{}
_ fs.PublicLinker = &Fs{}
_ fs.OpenChunkWriter = &Fs{}
_ fs.Commander = &Fs{}
_ fs.Object = &Object{}
_ fs.MimeTyper = &Object{}
_ fs.IDer = &Object{}
)

View File

@@ -5,6 +5,7 @@ import (
"time"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
)
// Test b2 string encoding
@@ -168,3 +169,10 @@ func TestParseTimeString(t *testing.T) {
}
}
// -run TestIntegration/FsMkdir/FsPutFiles/Internal
func (f *Fs) InternalTest(t *testing.T) {
// Internal tests go here
}
var _ fstests.InternalTester = (*Fs)(nil)

View File

@@ -28,7 +28,12 @@ func (f *Fs) SetUploadCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setUploadCutoff(cs)
}
func (f *Fs) SetCopyCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setCopyCutoff(cs)
}
var (
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
_ fstests.SetUploadCutoffer = (*Fs)(nil)
_ fstests.SetCopyCutoffer = (*Fs)(nil)
)

View File

@@ -5,7 +5,6 @@
package b2
import (
"bytes"
"context"
"crypto/sha1"
"encoding/hex"
@@ -14,7 +13,6 @@ import (
"io"
"strings"
"sync"
"time"
"github.com/rclone/rclone/backend/b2/api"
"github.com/rclone/rclone/fs"
@@ -80,12 +78,14 @@ type largeUpload struct {
wrap accounting.WrapFn // account parts being transferred
id string // ID of the file being uploaded
size int64 // total size
parts int64 // calculated number of parts, if known
parts int // calculated number of parts, if known
sha1smu sync.Mutex // mutex to protect sha1s
sha1s []string // slice of SHA1s for each part
uploadMu sync.Mutex // lock for upload variable
uploads []*api.GetUploadPartURLResponse // result of get upload URL calls
chunkSize int64 // chunk size to use
src *Object // if copying, object we are reading from
info *api.FileInfo // final response with info about the object
}
// newLargeUpload starts an upload of object o from in with metadata in src
@@ -93,18 +93,16 @@ type largeUpload struct {
// If newInfo is set then metadata from that will be used instead of reading it from src
func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs.ObjectInfo, defaultChunkSize fs.SizeSuffix, doCopy bool, newInfo *api.File) (up *largeUpload, err error) {
size := src.Size()
parts := int64(0)
sha1SliceSize := int64(maxParts)
parts := 0
chunkSize := defaultChunkSize
if size == -1 {
fs.Debugf(o, "Streaming upload with --b2-chunk-size %s allows uploads of up to %s and will fail only when that limit is reached.", f.opt.ChunkSize, maxParts*f.opt.ChunkSize)
} else {
chunkSize = chunksize.Calculator(o, size, maxParts, defaultChunkSize)
parts = size / int64(chunkSize)
parts = int(size / int64(chunkSize))
if size%int64(chunkSize) != 0 {
parts++
}
sha1SliceSize = parts
}
opts := rest.Opts{
@@ -152,7 +150,7 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
id: response.ID,
size: size,
parts: parts,
sha1s: make([]string, sha1SliceSize),
sha1s: make([]string, 0, 16),
chunkSize: int64(chunkSize),
}
// unwrap the accounting from the input, we use wrap to put it
@@ -171,24 +169,26 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
// This should be returned with returnUploadURL when finished
func (up *largeUpload) getUploadURL(ctx context.Context) (upload *api.GetUploadPartURLResponse, err error) {
up.uploadMu.Lock()
defer up.uploadMu.Unlock()
if len(up.uploads) == 0 {
opts := rest.Opts{
Method: "POST",
Path: "/b2_get_upload_part_url",
}
var request = api.GetUploadPartURLRequest{
ID: up.id,
}
err := up.f.pacer.Call(func() (bool, error) {
resp, err := up.f.srv.CallJSON(ctx, &opts, &request, &upload)
return up.f.shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, fmt.Errorf("failed to get upload URL: %w", err)
}
} else {
if len(up.uploads) > 0 {
upload, up.uploads = up.uploads[0], up.uploads[1:]
up.uploadMu.Unlock()
return upload, nil
}
up.uploadMu.Unlock()
opts := rest.Opts{
Method: "POST",
Path: "/b2_get_upload_part_url",
}
var request = api.GetUploadPartURLRequest{
ID: up.id,
}
err = up.f.pacer.Call(func() (bool, error) {
resp, err := up.f.srv.CallJSON(ctx, &opts, &request, &upload)
return up.f.shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, fmt.Errorf("failed to get upload URL: %w", err)
}
return upload, nil
}
@@ -203,10 +203,39 @@ func (up *largeUpload) returnUploadURL(upload *api.GetUploadPartURLResponse) {
up.uploadMu.Unlock()
}
// Transfer a chunk
func (up *largeUpload) transferChunk(ctx context.Context, part int64, body []byte) error {
err := up.f.pacer.Call(func() (bool, error) {
fs.Debugf(up.o, "Sending chunk %d length %d", part, len(body))
// Add an sha1 to the being built up sha1s
func (up *largeUpload) addSha1(chunkNumber int, sha1 string) {
up.sha1smu.Lock()
defer up.sha1smu.Unlock()
if len(up.sha1s) < chunkNumber+1 {
up.sha1s = append(up.sha1s, make([]string, chunkNumber+1-len(up.sha1s))...)
}
up.sha1s[chunkNumber] = sha1
}
// WriteChunk will write chunk number with reader bytes, where chunk number >= 0
func (up *largeUpload) WriteChunk(ctx context.Context, chunkNumber int, reader io.ReadSeeker) (size int64, err error) {
// Only account after the checksum reads have been done
if do, ok := reader.(pool.DelayAccountinger); ok {
// To figure out this number, do a transfer and if the accounted size is 0 or a
// multiple of what it should be, increase or decrease this number.
do.DelayAccounting(1)
}
err = up.f.pacer.Call(func() (bool, error) {
// Discover the size by seeking to the end
size, err = reader.Seek(0, io.SeekEnd)
if err != nil {
return false, err
}
// rewind the reader on retry and after reading size
_, err = reader.Seek(0, io.SeekStart)
if err != nil {
return false, err
}
fs.Debugf(up.o, "Sending chunk %d length %d", chunkNumber, size)
// Get upload URL
upload, err := up.getUploadURL(ctx)
@@ -214,8 +243,8 @@ func (up *largeUpload) transferChunk(ctx context.Context, part int64, body []byt
return false, err
}
in := newHashAppendingReader(bytes.NewReader(body), sha1.New())
size := int64(len(body)) + int64(in.AdditionalLength())
in := newHashAppendingReader(reader, sha1.New())
sizeWithHash := size + int64(in.AdditionalLength())
// Authorization
//
@@ -245,10 +274,10 @@ func (up *largeUpload) transferChunk(ctx context.Context, part int64, body []byt
Body: up.wrap(in),
ExtraHeaders: map[string]string{
"Authorization": upload.AuthorizationToken,
"X-Bz-Part-Number": fmt.Sprintf("%d", part),
"X-Bz-Part-Number": fmt.Sprintf("%d", chunkNumber+1),
sha1Header: "hex_digits_at_end",
},
ContentLength: &size,
ContentLength: &sizeWithHash,
}
var response api.UploadPartResponse
@@ -256,7 +285,7 @@ func (up *largeUpload) transferChunk(ctx context.Context, part int64, body []byt
resp, err := up.f.srv.CallJSON(ctx, &opts, nil, &response)
retry, err := up.f.shouldRetry(ctx, resp, err)
if err != nil {
fs.Debugf(up.o, "Error sending chunk %d (retry=%v): %v: %#v", part, retry, err, err)
fs.Debugf(up.o, "Error sending chunk %d (retry=%v): %v: %#v", chunkNumber, retry, err, err)
}
// On retryable error clear PartUploadURL
if retry {
@@ -264,30 +293,30 @@ func (up *largeUpload) transferChunk(ctx context.Context, part int64, body []byt
upload = nil
}
up.returnUploadURL(upload)
up.sha1s[part-1] = in.HexSum()
up.addSha1(chunkNumber, in.HexSum())
return retry, err
})
if err != nil {
fs.Debugf(up.o, "Error sending chunk %d: %v", part, err)
fs.Debugf(up.o, "Error sending chunk %d: %v", chunkNumber, err)
} else {
fs.Debugf(up.o, "Done sending chunk %d", part)
fs.Debugf(up.o, "Done sending chunk %d", chunkNumber)
}
return err
return size, err
}
// Copy a chunk
func (up *largeUpload) copyChunk(ctx context.Context, part int64, partSize int64) error {
func (up *largeUpload) copyChunk(ctx context.Context, part int, partSize int64) error {
err := up.f.pacer.Call(func() (bool, error) {
fs.Debugf(up.o, "Copying chunk %d length %d", part, partSize)
opts := rest.Opts{
Method: "POST",
Path: "/b2_copy_part",
}
offset := (part - 1) * up.chunkSize // where we are in the source file
offset := int64(part) * up.chunkSize // where we are in the source file
var request = api.CopyPartRequest{
SourceID: up.src.id,
LargeFileID: up.id,
PartNumber: part,
PartNumber: int64(part + 1),
Range: fmt.Sprintf("bytes=%d-%d", offset, offset+partSize-1),
}
var response api.UploadPartResponse
@@ -296,7 +325,7 @@ func (up *largeUpload) copyChunk(ctx context.Context, part int64, partSize int64
if err != nil {
fs.Debugf(up.o, "Error copying chunk %d (retry=%v): %v: %#v", part, retry, err, err)
}
up.sha1s[part-1] = response.SHA1
up.addSha1(part, response.SHA1)
return retry, err
})
if err != nil {
@@ -307,8 +336,8 @@ func (up *largeUpload) copyChunk(ctx context.Context, part int64, partSize int64
return err
}
// finish closes off the large upload
func (up *largeUpload) finish(ctx context.Context) error {
// Close closes off the large upload
func (up *largeUpload) Close(ctx context.Context) error {
fs.Debugf(up.o, "Finishing large file %s with %d parts", up.what, up.parts)
opts := rest.Opts{
Method: "POST",
@@ -326,11 +355,12 @@ func (up *largeUpload) finish(ctx context.Context) error {
if err != nil {
return err
}
return up.o.decodeMetaDataFileInfo(&response)
up.info = &response
return nil
}
// cancel aborts the large upload
func (up *largeUpload) cancel(ctx context.Context) error {
// Abort aborts the large upload
func (up *largeUpload) Abort(ctx context.Context) error {
fs.Debugf(up.o, "Cancelling large file %s", up.what)
opts := rest.Opts{
Method: "POST",
@@ -355,157 +385,105 @@ func (up *largeUpload) cancel(ctx context.Context) error {
// reaches EOF.
//
// Note that initialUploadBlock must be returned to f.putBuf()
func (up *largeUpload) Stream(ctx context.Context, initialUploadBlock []byte) (err error) {
defer atexit.OnError(&err, func() { _ = up.cancel(ctx) })()
func (up *largeUpload) Stream(ctx context.Context, initialUploadBlock *pool.RW) (err error) {
defer atexit.OnError(&err, func() { _ = up.Abort(ctx) })()
fs.Debugf(up.o, "Starting streaming of large file (id %q)", up.id)
var (
g, gCtx = errgroup.WithContext(ctx)
hasMoreParts = true
)
up.size = int64(len(initialUploadBlock))
g.Go(func() error {
for part := int64(1); hasMoreParts; part++ {
// Get a block of memory from the pool and token which limits concurrency.
var buf []byte
if part == 1 {
buf = initialUploadBlock
} else {
buf = up.f.getBuf(false)
}
// Fail fast, in case an errgroup managed function returns an error
// gCtx is cancelled. There is no point in uploading all the other parts.
if gCtx.Err() != nil {
up.f.putBuf(buf, false)
return nil
}
// Read the chunk
var n int
if part == 1 {
n = len(buf)
} else {
n, err = io.ReadFull(up.in, buf)
if err == io.ErrUnexpectedEOF {
fs.Debugf(up.o, "Read less than a full chunk, making this the last one.")
buf = buf[:n]
hasMoreParts = false
} else if err == io.EOF {
fs.Debugf(up.o, "Could not read any more bytes, previous chunk was the last.")
up.f.putBuf(buf, false)
return nil
} else if err != nil {
// other kinds of errors indicate failure
up.f.putBuf(buf, false)
return err
}
}
// Keep stats up to date
up.parts = part
up.size += int64(n)
if part > maxParts {
up.f.putBuf(buf, false)
return fmt.Errorf("%q too big (%d bytes so far) makes too many parts %d > %d - increase --b2-chunk-size", up.o, up.size, up.parts, maxParts)
}
part := part // for the closure
g.Go(func() (err error) {
defer up.f.putBuf(buf, false)
return up.transferChunk(gCtx, part, buf)
})
up.size = initialUploadBlock.Size()
up.parts = 0
for part := 0; hasMoreParts; part++ {
// Get a block of memory from the pool and token which limits concurrency.
var rw *pool.RW
if part == 0 {
rw = initialUploadBlock
} else {
rw = up.f.getRW(false)
}
return nil
})
// Fail fast, in case an errgroup managed function returns an error
// gCtx is cancelled. There is no point in uploading all the other parts.
if gCtx.Err() != nil {
up.f.putRW(rw)
break
}
// Read the chunk
var n int64
if part == 0 {
n = rw.Size()
} else {
n, err = io.CopyN(rw, up.in, up.chunkSize)
if err == io.EOF {
if n == 0 {
fs.Debugf(up.o, "Not sending empty chunk after EOF - ending.")
up.f.putRW(rw)
break
} else {
fs.Debugf(up.o, "Read less than a full chunk %d, making this the last one.", n)
}
hasMoreParts = false
} else if err != nil {
// other kinds of errors indicate failure
up.f.putRW(rw)
return err
}
}
// Keep stats up to date
up.parts += 1
up.size += n
if part > maxParts {
up.f.putRW(rw)
return fmt.Errorf("%q too big (%d bytes so far) makes too many parts %d > %d - increase --b2-chunk-size", up.o, up.size, up.parts, maxParts)
}
part := part // for the closure
g.Go(func() (err error) {
defer up.f.putRW(rw)
_, err = up.WriteChunk(gCtx, part, rw)
return err
})
}
err = g.Wait()
if err != nil {
return err
}
up.sha1s = up.sha1s[:up.parts]
return up.finish(ctx)
return up.Close(ctx)
}
// Upload uploads the chunks from the input
func (up *largeUpload) Upload(ctx context.Context) (err error) {
defer atexit.OnError(&err, func() { _ = up.cancel(ctx) })()
// Copy the chunks from the source to the destination
func (up *largeUpload) Copy(ctx context.Context) (err error) {
defer atexit.OnError(&err, func() { _ = up.Abort(ctx) })()
fs.Debugf(up.o, "Starting %s of large file in %d chunks (id %q)", up.what, up.parts, up.id)
var (
g, gCtx = errgroup.WithContext(ctx)
remaining = up.size
uploadPool *pool.Pool
ci = fs.GetConfig(ctx)
g, gCtx = errgroup.WithContext(ctx)
remaining = up.size
)
// If using large chunk size then make a temporary pool
if up.chunkSize <= int64(up.f.opt.ChunkSize) {
uploadPool = up.f.pool
} else {
uploadPool = pool.New(
time.Duration(up.f.opt.MemoryPoolFlushTime),
int(up.chunkSize),
ci.Transfers,
up.f.opt.MemoryPoolUseMmap,
)
defer uploadPool.Flush()
}
// Get an upload token and a buffer
getBuf := func() (buf []byte) {
up.f.getBuf(true)
if !up.doCopy {
buf = uploadPool.Get()
g.SetLimit(up.f.opt.UploadConcurrency)
for part := 0; part < up.parts; part++ {
// Fail fast, in case an errgroup managed function returns an error
// gCtx is cancelled. There is no point in copying all the other parts.
if gCtx.Err() != nil {
break
}
return buf
}
// Put an upload token and a buffer
putBuf := func(buf []byte) {
if !up.doCopy {
uploadPool.Put(buf)
reqSize := remaining
if reqSize >= up.chunkSize {
reqSize = up.chunkSize
}
up.f.putBuf(nil, true)
part := part // for the closure
g.Go(func() (err error) {
return up.copyChunk(gCtx, part, reqSize)
})
remaining -= reqSize
}
g.Go(func() error {
for part := int64(1); part <= up.parts; part++ {
// Get a block of memory from the pool and token which limits concurrency.
buf := getBuf()
// Fail fast, in case an errgroup managed function returns an error
// gCtx is cancelled. There is no point in uploading all the other parts.
if gCtx.Err() != nil {
putBuf(buf)
return nil
}
reqSize := remaining
if reqSize >= up.chunkSize {
reqSize = up.chunkSize
}
if !up.doCopy {
// Read the chunk
buf = buf[:reqSize]
_, err = io.ReadFull(up.in, buf)
if err != nil {
putBuf(buf)
return err
}
}
part := part // for the closure
g.Go(func() (err error) {
defer putBuf(buf)
if !up.doCopy {
err = up.transferChunk(gCtx, part, buf)
} else {
err = up.copyChunk(gCtx, part, reqSize)
}
return err
})
remaining -= reqSize
}
return nil
})
err = g.Wait()
if err != nil {
return err
}
return up.finish(ctx)
return up.Close(ctx)
}

View File

@@ -52,7 +52,7 @@ func (e *Error) Error() string {
out += ": " + e.Message
}
if e.ContextInfo != nil {
out += fmt.Sprintf(" (%+v)", e.ContextInfo)
out += fmt.Sprintf(" (%s)", string(e.ContextInfo))
}
return out
}
@@ -63,7 +63,7 @@ var _ error = (*Error)(nil)
// ItemFields are the fields needed for FileInfo
var ItemFields = "type,id,sequence_id,etag,sha1,name,size,created_at,modified_at,content_created_at,content_modified_at,item_status,shared_link,owned_by"
// Types of things in Item
// Types of things in Item/ItemMini
const (
ItemTypeFolder = "folder"
ItemTypeFile = "file"
@@ -72,20 +72,31 @@ const (
ItemStatusDeleted = "deleted"
)
// ItemMini is a subset of the elements in a full Item returned by some API calls
type ItemMini struct {
Type string `json:"type"`
ID string `json:"id"`
SequenceID int64 `json:"sequence_id,string"`
Etag string `json:"etag"`
SHA1 string `json:"sha1"`
Name string `json:"name"`
}
// Item describes a folder or a file as returned by Get Folder Items and others
type Item struct {
Type string `json:"type"`
ID string `json:"id"`
SequenceID string `json:"sequence_id"`
Etag string `json:"etag"`
SHA1 string `json:"sha1"`
Name string `json:"name"`
Size float64 `json:"size"` // box returns this in xEyy format for very large numbers - see #2261
CreatedAt Time `json:"created_at"`
ModifiedAt Time `json:"modified_at"`
ContentCreatedAt Time `json:"content_created_at"`
ContentModifiedAt Time `json:"content_modified_at"`
ItemStatus string `json:"item_status"` // active, trashed if the file has been moved to the trash, and deleted if the file has been permanently deleted
Type string `json:"type"`
ID string `json:"id"`
SequenceID int64 `json:"sequence_id,string"`
Etag string `json:"etag"`
SHA1 string `json:"sha1"`
Name string `json:"name"`
Size float64 `json:"size"` // box returns this in xEyy format for very large numbers - see #2261
CreatedAt Time `json:"created_at"`
ModifiedAt Time `json:"modified_at"`
ContentCreatedAt Time `json:"content_created_at"`
ContentModifiedAt Time `json:"content_modified_at"`
ItemStatus string `json:"item_status"` // active, trashed if the file has been moved to the trash, and deleted if the file has been permanently deleted
Parent ItemMini `json:"parent"`
SharedLink struct {
URL string `json:"url,omitempty"`
Access string `json:"access,omitempty"`
@@ -156,19 +167,7 @@ type PreUploadCheckResponse struct {
// PreUploadCheckConflict is returned in the ContextInfo error field
// from PreUploadCheck when the error code is "item_name_in_use"
type PreUploadCheckConflict struct {
Conflicts struct {
Type string `json:"type"`
ID string `json:"id"`
FileVersion struct {
Type string `json:"type"`
ID string `json:"id"`
Sha1 string `json:"sha1"`
} `json:"file_version"`
SequenceID string `json:"sequence_id"`
Etag string `json:"etag"`
Sha1 string `json:"sha1"`
Name string `json:"name"`
} `json:"conflicts"`
Conflicts ItemMini `json:"conflicts"`
}
// UpdateFileModTime is used in Update File Info
@@ -281,3 +280,30 @@ type User struct {
Address string `json:"address"`
AvatarURL string `json:"avatar_url"`
}
// FileTreeChangeEventTypes are the events that can require cache invalidation
var FileTreeChangeEventTypes = map[string]struct{}{
"ITEM_COPY": {},
"ITEM_CREATE": {},
"ITEM_MAKE_CURRENT_VERSION": {},
"ITEM_MODIFY": {},
"ITEM_MOVE": {},
"ITEM_RENAME": {},
"ITEM_TRASH": {},
"ITEM_UNDELETE_VIA_TRASH": {},
"ITEM_UPLOAD": {},
}
// Event is an array element in the response returned from /events
type Event struct {
EventType string `json:"event_type"`
EventID string `json:"event_id"`
Source Item `json:"source"`
}
// Events is returned from /events
type Events struct {
ChunkSize int64 `json:"chunk_size"`
Entries []Event `json:"entries"`
NextStreamPosition int64 `json:"next_stream_position"`
}

View File

@@ -77,7 +77,7 @@ var (
)
type boxCustomClaims struct {
jwt.RegisteredClaims
jwt.StandardClaims
BoxSubType string `json:"box_sub_type,omitempty"`
}
@@ -107,16 +107,18 @@ func init() {
return nil, nil
},
Options: append(oauthutil.SharedOptions, []fs.Option{{
Name: "root_folder_id",
Help: "Fill in for rclone to use a non root folder as its starting point.",
Default: "0",
Advanced: true,
Name: "root_folder_id",
Help: "Fill in for rclone to use a non root folder as its starting point.",
Default: "0",
Advanced: true,
Sensitive: true,
}, {
Name: "box_config_file",
Help: "Box App config.json location\n\nLeave blank normally." + env.ShellExpandHelp,
}, {
Name: "access_token",
Help: "Box App Primary Access Token\n\nLeave blank normally.",
Name: "access_token",
Help: "Box App Primary Access Token\n\nLeave blank normally.",
Sensitive: true,
}, {
Name: "box_sub_type",
Default: "user",
@@ -147,6 +149,23 @@ func init() {
Default: "",
Help: "Only show items owned by the login (email address) passed in.",
Advanced: true,
}, {
Name: "impersonate",
Default: "",
Help: `Impersonate this user ID when using a service account.
Setting this flag allows rclone, when using a JWT service account, to
act on behalf of another user by setting the as-user header.
The user ID is the Box identifier for a user. User IDs can found for
any user via the GET /users endpoint, which is only available to
admins, or by calling the GET /users/me endpoint with an authenticated
user session.
See: https://developer.box.com/guides/authentication/jwt/as-user/
`,
Advanced: true,
Sensitive: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
@@ -206,12 +225,14 @@ func getClaims(boxConfig *api.ConfigJSON, boxSubType string) (claims *boxCustomC
}
claims = &boxCustomClaims{
RegisteredClaims: jwt.RegisteredClaims{
ID: val,
//lint:ignore SA1019 since we need to use jwt.StandardClaims even if deprecated in jwt-go v4 until a more permanent solution is ready in time before jwt-go v5 where it is removed entirely
//nolint:staticcheck // Don't include staticcheck when running golangci-lint to avoid SA1019
StandardClaims: jwt.StandardClaims{
Id: val,
Issuer: boxConfig.BoxAppSettings.ClientID,
Subject: boxConfig.EnterpriseID,
Audience: jwt.ClaimStrings{tokenURL},
ExpiresAt: jwt.NewNumericDate(time.Now().Add(time.Second * 45)),
Audience: tokenURL,
ExpiresAt: time.Now().Add(time.Second * 45).Unix(),
},
BoxSubType: boxSubType,
}
@@ -258,19 +279,29 @@ type Options struct {
AccessToken string `config:"access_token"`
ListChunk int `config:"list_chunk"`
OwnedBy string `config:"owned_by"`
Impersonate string `config:"impersonate"`
}
// ItemMeta defines metadata we cache for each Item ID
type ItemMeta struct {
SequenceID int64 // the most recent event processed for this item
ParentID string // ID of the parent directory of this item
Name string // leaf name of this item
}
// Fs represents a remote box
type Fs struct {
name string // name of this remote
root string // the path we are working on
opt Options // parsed options
features *fs.Features // optional features
srv *rest.Client // the connection to the server
dirCache *dircache.DirCache // Map of directory path to directory id
pacer *fs.Pacer // pacer for API calls
tokenRenewer *oauthutil.Renew // renew the token on expiry
uploadToken *pacer.TokenDispenser // control concurrency
name string // name of this remote
root string // the path we are working on
opt Options // parsed options
features *fs.Features // optional features
srv *rest.Client // the connection to the server
dirCache *dircache.DirCache // Map of directory path to directory id
pacer *fs.Pacer // pacer for API calls
tokenRenewer *oauthutil.Renew // renew the token on expiry
uploadToken *pacer.TokenDispenser // control concurrency
itemMetaCacheMu *sync.Mutex // protects itemMetaCache
itemMetaCache map[string]ItemMeta // map of Item ID to selected metadata
}
// Object describes a box object
@@ -349,7 +380,7 @@ func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, err
// readMetaDataForPath reads the metadata from the path
func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.Item, err error) {
// defer fs.Trace(f, "path=%q", path)("info=%+v, err=%v", &info, &err)
// defer log.Trace(f, "path=%q", path)("info=%+v, err=%v", &info, &err)
leaf, directoryID, err := f.dirCache.FindPath(ctx, path, false)
if err != nil {
if err == fs.ErrorDirNotFound {
@@ -358,20 +389,30 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.It
return nil, err
}
found, err := f.listAll(ctx, directoryID, false, true, true, func(item *api.Item) bool {
if strings.EqualFold(item.Name, leaf) {
info = item
return true
}
return false
// Use preupload to find the ID
itemMini, err := f.preUploadCheck(ctx, leaf, directoryID, -1)
if err != nil {
return nil, err
}
if itemMini == nil {
return nil, fs.ErrorObjectNotFound
}
// Now we have the ID we can look up the object proper
opts := rest.Opts{
Method: "GET",
Path: "/files/" + itemMini.ID,
Parameters: fieldsValue(),
}
var item api.Item
err = f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CallJSON(ctx, &opts, nil, &item)
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, err
}
if !found {
return nil, fs.ErrorObjectNotFound
}
return info, nil
return &item, nil
}
// errorHandler parses a non 2xx error response into an error
@@ -418,12 +459,14 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
ci := fs.GetConfig(ctx)
f := &Fs{
name: name,
root: root,
opt: *opt,
srv: rest.NewClient(client).SetRoot(rootURL),
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
uploadToken: pacer.NewTokenDispenser(ci.Transfers),
name: name,
root: root,
opt: *opt,
srv: rest.NewClient(client).SetRoot(rootURL),
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
uploadToken: pacer.NewTokenDispenser(ci.Transfers),
itemMetaCacheMu: new(sync.Mutex),
itemMetaCache: make(map[string]ItemMeta),
}
f.features = (&fs.Features{
CaseInsensitive: true,
@@ -436,6 +479,11 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
f.srv.SetHeader("Authorization", "Bearer "+f.opt.AccessToken)
}
// If using impersonate set an as-user header
if f.opt.Impersonate != "" {
f.srv.SetHeader("as-user", f.opt.Impersonate)
}
jsonFile, ok := m.Get("box_config_file")
boxSubType, boxSubTypeOk := m.Get("box_sub_type")
@@ -678,6 +726,17 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
}
entries = append(entries, o)
}
// Cache some metadata for this Item to help us process events later
// on. In particular, the box event API does not provide the old path
// of the Item when it is renamed/deleted/moved/etc.
f.itemMetaCacheMu.Lock()
cachedItemMeta, found := f.itemMetaCache[info.ID]
if !found || cachedItemMeta.SequenceID < info.SequenceID {
f.itemMetaCache[info.ID] = ItemMeta{SequenceID: info.SequenceID, ParentID: directoryID, Name: info.Name}
}
f.itemMetaCacheMu.Unlock()
return false
})
if err != nil {
@@ -713,7 +772,7 @@ func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time,
//
// It returns "", nil if the file is good to go
// It returns "ID", nil if the file must be updated
func (f *Fs) preUploadCheck(ctx context.Context, leaf, directoryID string, size int64) (ID string, err error) {
func (f *Fs) preUploadCheck(ctx context.Context, leaf, directoryID string, size int64) (item *api.ItemMini, err error) {
check := api.PreUploadCheck{
Name: f.opt.Enc.FromStandardName(leaf),
Parent: api.Parent{
@@ -738,16 +797,16 @@ func (f *Fs) preUploadCheck(ctx context.Context, leaf, directoryID string, size
var conflict api.PreUploadCheckConflict
err = json.Unmarshal(apiErr.ContextInfo, &conflict)
if err != nil {
return "", fmt.Errorf("pre-upload check: JSON decode failed: %w", err)
return nil, fmt.Errorf("pre-upload check: JSON decode failed: %w", err)
}
if conflict.Conflicts.Type != api.ItemTypeFile {
return "", fmt.Errorf("pre-upload check: can't overwrite non file with file: %w", err)
return nil, fs.ErrorIsDir
}
return conflict.Conflicts.ID, nil
return &conflict.Conflicts, nil
}
return "", fmt.Errorf("pre-upload check: %w", err)
return nil, fmt.Errorf("pre-upload check: %w", err)
}
return "", nil
return nil, nil
}
// Put the object
@@ -768,11 +827,11 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
// Preflight check the upload, which returns the ID if the
// object already exists
ID, err := f.preUploadCheck(ctx, leaf, directoryID, src.Size())
item, err := f.preUploadCheck(ctx, leaf, directoryID, src.Size())
if err != nil {
return nil, err
}
if ID == "" {
if item == nil {
return f.PutUnchecked(ctx, in, src, options...)
}
@@ -780,7 +839,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
o := &Object{
fs: f,
remote: remote,
id: ID,
id: item.ID,
}
return o, o.Update(ctx, in, src, options...)
}
@@ -1117,7 +1176,7 @@ func (f *Fs) deletePermanently(ctx context.Context, itemType, id string) error {
// CleanUp empties the trash
func (f *Fs) CleanUp(ctx context.Context) (err error) {
var (
deleteErrors = int64(0)
deleteErrors atomic.Uint64
concurrencyControl = make(chan struct{}, fs.GetConfig(ctx).Checkers)
wg sync.WaitGroup
)
@@ -1133,7 +1192,7 @@ func (f *Fs) CleanUp(ctx context.Context) (err error) {
err := f.deletePermanently(ctx, item.Type, item.ID)
if err != nil {
fs.Errorf(f, "failed to delete trash item %q (%q): %v", item.Name, item.ID, err)
atomic.AddInt64(&deleteErrors, 1)
deleteErrors.Add(1)
}
}()
} else {
@@ -1142,12 +1201,277 @@ func (f *Fs) CleanUp(ctx context.Context) (err error) {
return false
})
wg.Wait()
if deleteErrors != 0 {
return fmt.Errorf("failed to delete %d trash items", deleteErrors)
if deleteErrors.Load() != 0 {
return fmt.Errorf("failed to delete %d trash items", deleteErrors.Load())
}
return err
}
// ChangeNotify calls the passed function with a path that has had changes.
// If the implementation uses polling, it should adhere to the given interval.
//
// Automatically restarts itself in case of unexpected behavior of the remote.
//
// Close the returned channel to stop being notified.
func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) {
go func() {
// get the `stream_position` early so all changes from now on get processed
streamPosition, err := f.changeNotifyStreamPosition(ctx)
if err != nil {
fs.Infof(f, "Failed to get StreamPosition: %s", err)
}
// box can send duplicate Event IDs. Use this map to track and filter
// the ones we've already processed.
processedEventIDs := make(map[string]time.Time)
var ticker *time.Ticker
var tickerC <-chan time.Time
for {
select {
case pollInterval, ok := <-pollIntervalChan:
if !ok {
if ticker != nil {
ticker.Stop()
}
return
}
if ticker != nil {
ticker.Stop()
ticker, tickerC = nil, nil
}
if pollInterval != 0 {
ticker = time.NewTicker(pollInterval)
tickerC = ticker.C
}
case <-tickerC:
if streamPosition == "" {
streamPosition, err = f.changeNotifyStreamPosition(ctx)
if err != nil {
fs.Infof(f, "Failed to get StreamPosition: %s", err)
continue
}
}
// Garbage collect EventIDs older than 1 minute
for eventID, timestamp := range processedEventIDs {
if time.Since(timestamp) > time.Minute {
delete(processedEventIDs, eventID)
}
}
streamPosition, err = f.changeNotifyRunner(ctx, notifyFunc, streamPosition, processedEventIDs)
if err != nil {
fs.Infof(f, "Change notify listener failure: %s", err)
}
}
}
}()
}
func (f *Fs) changeNotifyStreamPosition(ctx context.Context) (streamPosition string, err error) {
opts := rest.Opts{
Method: "GET",
Path: "/events",
Parameters: fieldsValue(),
}
opts.Parameters.Set("stream_position", "now")
opts.Parameters.Set("stream_type", "changes")
var result api.Events
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
return shouldRetry(ctx, resp, err)
})
if err != nil {
return "", err
}
return strconv.FormatInt(result.NextStreamPosition, 10), nil
}
// Attempts to construct the full path for an object, given the ID of its
// parent directory and the name of the object.
//
// Can return "" if the parentID is not currently in the directory cache.
func (f *Fs) getFullPath(parentID string, childName string) (fullPath string) {
fullPath = ""
name := f.opt.Enc.ToStandardName(childName)
if parentID != "" {
if parentDir, ok := f.dirCache.GetInv(parentID); ok {
if len(parentDir) > 0 {
fullPath = parentDir + "/" + name
} else {
fullPath = name
}
}
} else {
// No parent, this object is at the root
fullPath = name
}
return fullPath
}
func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.EntryType), streamPosition string, processedEventIDs map[string]time.Time) (nextStreamPosition string, err error) {
nextStreamPosition = streamPosition
for {
limit := f.opt.ListChunk
// box only allows a max of 500 events
if limit > 500 {
limit = 500
}
opts := rest.Opts{
Method: "GET",
Path: "/events",
Parameters: fieldsValue(),
}
opts.Parameters.Set("stream_position", nextStreamPosition)
opts.Parameters.Set("stream_type", "changes")
opts.Parameters.Set("limit", strconv.Itoa(limit))
var result api.Events
var resp *http.Response
fs.Debugf(f, "Checking for changes on remote (next_stream_position: %q)", nextStreamPosition)
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
return shouldRetry(ctx, resp, err)
})
if err != nil {
return "", err
}
if result.ChunkSize != int64(len(result.Entries)) {
return "", fmt.Errorf("invalid response to event request, chunk_size (%v) not equal to number of entries (%v)", result.ChunkSize, len(result.Entries))
}
nextStreamPosition = strconv.FormatInt(result.NextStreamPosition, 10)
if result.ChunkSize == 0 {
return nextStreamPosition, nil
}
type pathToClear struct {
path string
entryType fs.EntryType
}
var pathsToClear []pathToClear
newEventIDs := 0
for _, entry := range result.Entries {
eventDetails := fmt.Sprintf("[%q(%d)|%s|%s|%s|%s]", entry.Source.Name, entry.Source.SequenceID,
entry.Source.Type, entry.EventType, entry.Source.ID, entry.EventID)
if entry.EventID == "" {
fs.Debugf(f, "%s ignored due to missing EventID", eventDetails)
continue
}
if _, ok := processedEventIDs[entry.EventID]; ok {
fs.Debugf(f, "%s ignored due to duplicate EventID", eventDetails)
continue
}
processedEventIDs[entry.EventID] = time.Now()
newEventIDs++
if entry.Source.ID == "" { // missing File or Folder ID
fs.Debugf(f, "%s ignored due to missing SourceID", eventDetails)
continue
}
if entry.Source.Type != api.ItemTypeFile && entry.Source.Type != api.ItemTypeFolder { // event is not for a file or folder
fs.Debugf(f, "%s ignored due to unsupported SourceType", eventDetails)
continue
}
// Only interested in event types that result in a file tree change
if _, found := api.FileTreeChangeEventTypes[entry.EventType]; !found {
fs.Debugf(f, "%s ignored due to unsupported EventType", eventDetails)
continue
}
f.itemMetaCacheMu.Lock()
itemMeta, cachedItemMetaFound := f.itemMetaCache[entry.Source.ID]
if cachedItemMetaFound {
if itemMeta.SequenceID >= entry.Source.SequenceID {
// Item in the cache has the same or newer SequenceID than
// this event. Ignore this event, it must be old.
f.itemMetaCacheMu.Unlock()
fs.Debugf(f, "%s ignored due to old SequenceID (%q)", eventDetails, itemMeta.SequenceID)
continue
}
// This event is newer. Delete its entry from the cache,
// we'll notify about its change below, then it's up to a
// future list operation to repopulate the cache.
delete(f.itemMetaCache, entry.Source.ID)
}
f.itemMetaCacheMu.Unlock()
entryType := fs.EntryDirectory
if entry.Source.Type == api.ItemTypeFile {
entryType = fs.EntryObject
}
// The box event only includes the new path for the object (e.g.
// the path after the object was moved). If there was an old path
// saved in our cache, it must be cleared.
if cachedItemMetaFound {
path := f.getFullPath(itemMeta.ParentID, itemMeta.Name)
if path != "" {
fs.Debugf(f, "%s added old path (%q) for notify", eventDetails, path)
pathsToClear = append(pathsToClear, pathToClear{path: path, entryType: entryType})
} else {
fs.Debugf(f, "%s old parent not cached", eventDetails)
}
// If this is a directory, also delete it from the dir cache.
// This will effectively invalidate the item metadata cache
// entries for all descendents of this directory, since we
// will no longer be able to construct a full path for them.
// This is exactly what we want, since we don't want to notify
// on the paths of these descendents if one of their ancestors
// has been renamed/deleted.
if entry.Source.Type == api.ItemTypeFolder {
f.dirCache.FlushDir(path)
}
}
// If the item is "active", then it is not trashed or deleted, so
// it potentially has a valid parent.
//
// Construct the new path of the object, based on the Parent ID
// and its name. If we get an empty result, it means we don't
// currently know about this object so notification is unnecessary.
if entry.Source.ItemStatus == api.ItemStatusActive {
path := f.getFullPath(entry.Source.Parent.ID, entry.Source.Name)
if path != "" {
fs.Debugf(f, "%s added new path (%q) for notify", eventDetails, path)
pathsToClear = append(pathsToClear, pathToClear{path: path, entryType: entryType})
} else {
fs.Debugf(f, "%s new parent not found", eventDetails)
}
}
}
// box can sometimes repeatedly return the same Event IDs within a
// short period of time. If it stops giving us new ones, treat it
// the same as if it returned us none at all.
if newEventIDs == 0 {
return nextStreamPosition, nil
}
notifiedPaths := make(map[string]bool)
for _, p := range pathsToClear {
if _, ok := notifiedPaths[p.path]; ok {
continue
}
notifiedPaths[p.path] = true
notifyFunc(p.path, p.entryType)
}
fs.Debugf(f, "Received %v events, resulting in %v paths and %v notifications", len(result.Entries), len(pathsToClear), len(notifiedPaths))
}
}
// DirCacheFlush resets the directory cache - used in testing as an
// optional interface
func (f *Fs) DirCacheFlush() {

View File

@@ -76,17 +76,19 @@ func init() {
Name: "plex_url",
Help: "The URL of the Plex server.",
}, {
Name: "plex_username",
Help: "The username of the Plex user.",
Name: "plex_username",
Help: "The username of the Plex user.",
Sensitive: true,
}, {
Name: "plex_password",
Help: "The password of the Plex user.",
IsPassword: true,
}, {
Name: "plex_token",
Help: "The plex token for authentication - auto set normally.",
Hide: fs.OptionHideBoth,
Advanced: true,
Name: "plex_token",
Help: "The plex token for authentication - auto set normally.",
Hide: fs.OptionHideBoth,
Advanced: true,
Sensitive: true,
}, {
Name: "plex_insecure",
Help: "Skip all certificate verification when connecting to the Plex server.",

View File

@@ -18,7 +18,7 @@ func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestCache:",
NilObject: (*cache.Object)(nil),
UnimplementableFsMethods: []string{"PublicLink", "OpenWriterAt"},
UnimplementableFsMethods: []string{"PublicLink", "OpenWriterAt", "OpenChunkWriter"},
UnimplementableObjectMethods: []string{"MimeType", "ID", "GetTier", "SetTier", "Metadata"},
SkipInvalidUTF8: true, // invalid UTF-8 confuses the cache
})

View File

@@ -160,11 +160,11 @@ func TestInternalUploadQueueMoreFiles(t *testing.T) {
minSize := 5242880
maxSize := 10485760
totalFiles := 10
rand.Seed(time.Now().Unix())
randInstance := rand.New(rand.NewSource(time.Now().Unix()))
lastFile := ""
for i := 0; i < totalFiles; i++ {
size := int64(rand.Intn(maxSize-minSize) + minSize)
size := int64(randInstance.Intn(maxSize-minSize) + minSize)
testReader := runInstance.randomReader(t, size)
remote := "test/" + strconv.Itoa(i) + ".bin"
runInstance.writeRemoteReader(t, rootFs, remote, testReader)

View File

@@ -40,6 +40,7 @@ func TestIntegration(t *testing.T) {
UnimplementableFsMethods: []string{
"PublicLink",
"OpenWriterAt",
"OpenChunkWriter",
"MergeDirs",
"DirCacheFlush",
"UserInfo",

View File

@@ -914,7 +914,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
return do(ctx, uRemote, expire, unlink)
}
// Put in to the remote path with the modTime given of the given size
// PutUnchecked in to the remote path with the modTime given of the given size
//
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return

View File

@@ -11,7 +11,7 @@ import (
)
var (
unimplementableFsMethods = []string{"UnWrap", "WrapFs", "SetWrapper", "UserInfo", "Disconnect"}
unimplementableFsMethods = []string{"UnWrap", "WrapFs", "SetWrapper", "UserInfo", "Disconnect", "OpenChunkWriter"}
unimplementableObjectMethods = []string{}
)

View File

@@ -257,6 +257,16 @@ func isMetadataFile(filename string) bool {
return strings.HasSuffix(filename, metaFileExt)
}
// Checks whether a file is a metadata file and returns the original
// file name and a flag indicating whether it was a metadata file or
// not.
func unwrapMetadataFile(filename string) (string, bool) {
if !isMetadataFile(filename) {
return "", false
}
return filename[:len(filename)-len(metaFileExt)], true
}
// makeDataName generates the file name for a data file with specified compression mode
func makeDataName(remote string, size int64, mode int) (newRemote string) {
if mode != Uncompressed {
@@ -979,7 +989,8 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
wrappedNotifyFunc := func(path string, entryType fs.EntryType) {
fs.Logf(f, "path %q entryType %d", path, entryType)
var (
wrappedPath string
wrappedPath string
isMetadataFile bool
)
switch entryType {
case fs.EntryDirectory:
@@ -987,7 +998,10 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
case fs.EntryObject:
// Note: All we really need to do to monitor the object is to check whether the metadata changed,
// as the metadata contains the hash. This will work unless there's a hash collision and the sizes stay the same.
wrappedPath = makeMetadataName(path)
wrappedPath, isMetadataFile = unwrapMetadataFile(path)
if !isMetadataFile {
return
}
default:
fs.Errorf(path, "press ChangeNotify: ignoring unknown EntryType %d", entryType)
return

View File

@@ -14,23 +14,26 @@ import (
"github.com/rclone/rclone/fstest/fstests"
)
var defaultOpt = fstests.Opt{
RemoteName: "TestCompress:",
NilObject: (*Object)(nil),
UnimplementableFsMethods: []string{
"OpenWriterAt",
"OpenChunkWriter",
"MergeDirs",
"DirCacheFlush",
"PutUnchecked",
"PutStream",
"UserInfo",
"Disconnect",
},
TiersToTest: []string{"STANDARD", "STANDARD_IA"},
UnimplementableObjectMethods: []string{},
}
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
opt := fstests.Opt{
RemoteName: *fstest.RemoteName,
NilObject: (*Object)(nil),
UnimplementableFsMethods: []string{
"OpenWriterAt",
"MergeDirs",
"DirCacheFlush",
"PutUnchecked",
"PutStream",
"UserInfo",
"Disconnect",
},
TiersToTest: []string{"STANDARD", "STANDARD_IA"},
UnimplementableObjectMethods: []string{}}
fstests.Run(t, &opt)
fstests.Run(t, &defaultOpt)
}
// TestRemoteGzip tests GZIP compression
@@ -40,27 +43,13 @@ func TestRemoteGzip(t *testing.T) {
}
tempdir := filepath.Join(os.TempDir(), "rclone-compress-test-gzip")
name := "TestCompressGzip"
fstests.Run(t, &fstests.Opt{
RemoteName: name + ":",
NilObject: (*Object)(nil),
UnimplementableFsMethods: []string{
"OpenWriterAt",
"MergeDirs",
"DirCacheFlush",
"PutUnchecked",
"PutStream",
"UserInfo",
"Disconnect",
},
UnimplementableObjectMethods: []string{
"GetTier",
"SetTier",
},
ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "type", Value: "compress"},
{Name: name, Key: "remote", Value: tempdir},
{Name: name, Key: "compression_mode", Value: "gzip"},
},
QuickTestOK: true,
})
opt := defaultOpt
opt.RemoteName = name + ":"
opt.ExtraConfig = []fstests.ExtraConfigItem{
{Name: name, Key: "type", Value: "compress"},
{Name: name, Key: "remote", Value: tempdir},
{Name: name, Key: "compression_mode", Value: "gzip"},
}
opt.QuickTestOK = true
fstests.Run(t, &opt)
}

View File

@@ -24,7 +24,7 @@ func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: *fstest.RemoteName,
NilObject: (*crypt.Object)(nil),
UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"},
UnimplementableObjectMethods: []string{"MimeType"},
})
}
@@ -45,7 +45,7 @@ func TestStandardBase32(t *testing.T) {
{Name: name, Key: "password", Value: obscure.MustObscure("potato")},
{Name: name, Key: "filename_encryption", Value: "standard"},
},
UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"},
UnimplementableObjectMethods: []string{"MimeType"},
QuickTestOK: true,
})
@@ -67,7 +67,7 @@ func TestStandardBase64(t *testing.T) {
{Name: name, Key: "filename_encryption", Value: "standard"},
{Name: name, Key: "filename_encoding", Value: "base64"},
},
UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"},
UnimplementableObjectMethods: []string{"MimeType"},
QuickTestOK: true,
})
@@ -89,7 +89,7 @@ func TestStandardBase32768(t *testing.T) {
{Name: name, Key: "filename_encryption", Value: "standard"},
{Name: name, Key: "filename_encoding", Value: "base32768"},
},
UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"},
UnimplementableObjectMethods: []string{"MimeType"},
QuickTestOK: true,
})
@@ -111,7 +111,7 @@ func TestOff(t *testing.T) {
{Name: name, Key: "password", Value: obscure.MustObscure("potato2")},
{Name: name, Key: "filename_encryption", Value: "off"},
},
UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"},
UnimplementableObjectMethods: []string{"MimeType"},
QuickTestOK: true,
})
@@ -137,7 +137,7 @@ func TestObfuscate(t *testing.T) {
{Name: name, Key: "filename_encryption", Value: "obfuscate"},
},
SkipBadWindowsCharacters: true,
UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"},
UnimplementableObjectMethods: []string{"MimeType"},
QuickTestOK: true,
})
@@ -164,7 +164,7 @@ func TestNoDataObfuscate(t *testing.T) {
{Name: name, Key: "no_data_encryption", Value: "true"},
},
SkipBadWindowsCharacters: true,
UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"},
UnimplementableObjectMethods: []string{"MimeType"},
QuickTestOK: true,
})

View File

@@ -71,7 +71,7 @@ const (
// 1<<18 is the minimum size supported by the Google uploader, and there is no maximum.
minChunkSize = fs.SizeSuffix(googleapi.MinUploadChunkSize)
defaultChunkSize = 8 * fs.Mebi
partialFields = "id,name,size,md5Checksum,trashed,explicitlyTrashed,modifiedTime,createdTime,mimeType,parents,webViewLink,shortcutDetails,exportLinks,resourceKey"
partialFields = "id,name,size,md5Checksum,sha1Checksum,sha256Checksum,trashed,explicitlyTrashed,modifiedTime,createdTime,mimeType,parents,webViewLink,shortcutDetails,exportLinks,resourceKey"
listRGrouping = 50 // number of IDs to search at once when using ListR
listRInputBuffer = 1000 // size of input buffer when using ListR
defaultXDGIcon = "text-html"
@@ -143,6 +143,41 @@ var (
_linkTemplates map[string]*template.Template // available link types
)
// rwChoices type for fs.Bits
type rwChoices struct{}
func (rwChoices) Choices() []fs.BitsChoicesInfo {
return []fs.BitsChoicesInfo{
{Bit: uint64(rwOff), Name: "off"},
{Bit: uint64(rwRead), Name: "read"},
{Bit: uint64(rwWrite), Name: "write"},
}
}
// rwChoice type alias
type rwChoice = fs.Bits[rwChoices]
const (
rwRead rwChoice = 1 << iota
rwWrite
rwOff rwChoice = 0
)
// Examples for the options
var rwExamples = fs.OptionExamples{{
Value: rwOff.String(),
Help: "Do not read or write the value",
}, {
Value: rwRead.String(),
Help: "Read the value only",
}, {
Value: rwWrite.String(),
Help: "Write the value only",
}, {
Value: (rwRead | rwWrite).String(),
Help: "Read and Write the value.",
}}
// Parse the scopes option returning a slice of scopes
func driveScopes(scopesString string) (scopes []string) {
if scopesString == "" {
@@ -250,9 +285,13 @@ func init() {
}
return nil, fmt.Errorf("unknown state %q", config.State)
},
MetadataInfo: &fs.MetadataInfo{
System: systemMetadataInfo,
Help: `User metadata is stored in the properties field of the drive object.`,
},
Options: append(driveOAuthOptions(), []fs.Option{{
Name: "scope",
Help: "Scope that rclone should use when requesting access from drive.",
Help: "Comma separated list of scopes that rclone should use when requesting access from drive.",
Examples: []fs.OptionExample{{
Value: "drive",
Help: "Full access all files, excluding Application Data Folder.",
@@ -277,20 +316,23 @@ Leave blank normally.
Fill in to access "Computers" folders (see docs), or for rclone to use
a non root folder as its starting point.
`,
Advanced: true,
Advanced: true,
Sensitive: true,
}, {
Name: "service_account_file",
Help: "Service Account Credentials JSON file path.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login." + env.ShellExpandHelp,
}, {
Name: "service_account_credentials",
Help: "Service Account Credentials JSON blob.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
Hide: fs.OptionHideConfigurator,
Advanced: true,
Name: "service_account_credentials",
Help: "Service Account Credentials JSON blob.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
Hide: fs.OptionHideConfigurator,
Advanced: true,
Sensitive: true,
}, {
Name: "team_drive",
Help: "ID of the Shared Drive (Team Drive).",
Hide: fs.OptionHideConfigurator,
Advanced: true,
Name: "team_drive",
Help: "ID of the Shared Drive (Team Drive).",
Hide: fs.OptionHideConfigurator,
Advanced: true,
Sensitive: true,
}, {
Name: "auth_owner_only",
Default: false,
@@ -317,16 +359,35 @@ rather than shortcuts themselves when doing server side copies.`,
Default: false,
Help: "Skip google documents in all listings.\n\nIf given, gdocs practically become invisible to rclone.",
Advanced: true,
}, {
Name: "show_all_gdocs",
Default: false,
Help: `Show all Google Docs including non-exportable ones in listings.
If you try a server side copy on a Google Form without this flag, you
will get this error:
No export formats found for "application/vnd.google-apps.form"
However adding this flag will allow the form to be server side copied.
Note that rclone doesn't add extensions to the Google Docs file names
in this mode.
Do **not** use this flag when trying to download Google Docs - rclone
will fail to download them.
`,
Advanced: true,
}, {
Name: "skip_checksum_gphotos",
Default: false,
Help: `Skip MD5 checksum on Google photos and videos only.
Help: `Skip checksums on Google photos and videos only.
Use this if you get checksum errors when transferring Google photos or
videos.
Setting this flag will cause Google photos and videos to return a
blank MD5 checksum.
blank checksums.
Google photos are identified by being in the "photos" space.
@@ -416,10 +477,11 @@ date is used.`,
Help: "Size of listing chunk 100-1000, 0 to disable.",
Advanced: true,
}, {
Name: "impersonate",
Default: "",
Help: `Impersonate this user when using a service account.`,
Advanced: true,
Name: "impersonate",
Default: "",
Help: `Impersonate this user when using a service account.`,
Advanced: true,
Sensitive: true,
}, {
Name: "alternate_export",
Default: false,
@@ -590,9 +652,82 @@ This resource key requirement only applies to a subset of old files.
Note also that opening the folder once in the web interface (with the
user you've authenticated rclone with) seems to be enough so that the
resource key is no needed.
resource key is not needed.
`,
Advanced: true,
Sensitive: true,
}, {
Name: "fast_list_bug_fix",
Help: `Work around a bug in Google Drive listing.
Normally rclone will work around a bug in Google Drive when using
--fast-list (ListR) where the search "(A in parents) or (B in
parents)" returns nothing sometimes. See #3114, #4289 and
https://issuetracker.google.com/issues/149522397
Rclone detects this by finding no items in more than one directory
when listing and retries them as lists of individual directories.
This means that if you have a lot of empty directories rclone will end
up listing them all individually and this can take many more API
calls.
This flag allows the work-around to be disabled. This is **not**
recommended in normal use - only if you have a particular case you are
having trouble with like many empty directories.
`,
Advanced: true,
Default: true,
}, {
Name: "metadata_owner",
Help: `Control whether owner should be read or written in metadata.
Owner is a standard part of the file metadata so is easy to read. But it
isn't always desirable to set the owner from the metadata.
Note that you can't set the owner on Shared Drives, and that setting
ownership will generate an email to the new owner (this can't be
disabled), and you can't transfer ownership to someone outside your
organization.
`,
Advanced: true,
Default: rwRead,
Examples: rwExamples,
}, {
Name: "metadata_permissions",
Help: `Control whether permissions should be read or written in metadata.
Reading permissions metadata from files can be done quickly, but it
isn't always desirable to set the permissions from the metadata.
Note that rclone drops any inherited permissions on Shared Drives and
any owner permission on My Drives as these are duplicated in the owner
metadata.
`,
Advanced: true,
Default: rwOff,
Examples: rwExamples,
}, {
Name: "metadata_labels",
Help: `Control whether labels should be read or written in metadata.
Reading labels metadata from files takes an extra API transaction and
will slow down listings. It isn't always desirable to set the labels
from the metadata.
The format of labels is documented in the drive API documentation at
https://developers.google.com/drive/api/reference/rest/v3/Label -
rclone just provides a JSON dump of this format.
When setting labels, the label and fields must already exist - rclone
will not create them. This means that if you are transferring labels
from two different accounts you will have to create the labels in
advance and use the metadata mapper to translate the IDs between the
two accounts.
`,
Advanced: true,
Default: rwOff,
Examples: rwExamples,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
@@ -640,6 +775,7 @@ type Options struct {
UseTrash bool `config:"use_trash"`
CopyShortcutContent bool `config:"copy_shortcut_content"`
SkipGdocs bool `config:"skip_gdocs"`
ShowAllGdocs bool `config:"show_all_gdocs"`
SkipChecksumGphotos bool `config:"skip_checksum_gphotos"`
SharedWithMe bool `config:"shared_with_me"`
TrashedOnly bool `config:"trashed_only"`
@@ -667,6 +803,10 @@ type Options struct {
SkipShortcuts bool `config:"skip_shortcuts"`
SkipDanglingShortcuts bool `config:"skip_dangling_shortcuts"`
ResourceKey string `config:"resource_key"`
FastListBugFix bool `config:"fast_list_bug_fix"`
MetadataOwner rwChoice `config:"metadata_owner"`
MetadataPermissions rwChoice `config:"metadata_permissions"`
MetadataLabels rwChoice `config:"metadata_labels"`
Enc encoder.MultiEncoder `config:"encoding"`
EnvAuth bool `config:"env_auth"`
}
@@ -688,23 +828,25 @@ type Fs struct {
exportExtensions []string // preferred extensions to download docs
importMimeTypes []string // MIME types to convert to docs
isTeamDrive bool // true if this is a team drive
fileFields googleapi.Field // fields to fetch file info with
m configmap.Mapper
grouping int32 // number of IDs to search at once in ListR - read with atomic
listRmu *sync.Mutex // protects listRempties
listRempties map[string]struct{} // IDs of supposedly empty directories which triggered grouping disable
dirResourceKeys *sync.Map // map directory ID to resource key
grouping int32 // number of IDs to search at once in ListR - read with atomic
listRmu *sync.Mutex // protects listRempties
listRempties map[string]struct{} // IDs of supposedly empty directories which triggered grouping disable
dirResourceKeys *sync.Map // map directory ID to resource key
permissionsMu *sync.Mutex // protect the below
permissions map[string]*drive.Permission // map permission IDs to Permissions
}
type baseObject struct {
fs *Fs // what this object is part of
remote string // The remote path
id string // Drive Id of this object
modifiedDate string // RFC3339 time it was last modified
mimeType string // The object MIME type
bytes int64 // size of the object
parents []string // IDs of the parent directories
resourceKey *string // resourceKey is needed for link shared objects
fs *Fs // what this object is part of
remote string // The remote path
id string // Drive Id of this object
modifiedDate string // RFC3339 time it was last modified
mimeType string // The object MIME type
bytes int64 // size of the object
parents []string // IDs of the parent directories
resourceKey *string // resourceKey is needed for link shared objects
metadata *fs.Metadata // metadata if known
}
type documentObject struct {
baseObject
@@ -723,6 +865,8 @@ type Object struct {
baseObject
url string // Download URL of this object
md5sum string // md5sum of the object
sha1sum string // sha1sum of the object
sha256sum string // sha256sum of the object
v2Download bool // generate v2 download link ondemand
}
@@ -951,7 +1095,7 @@ func (f *Fs) list(ctx context.Context, dirIDs []string, title string, directorie
list.Header().Add("X-Goog-Drive-Resource-Keys", resourceKeysHeader)
}
fields := fmt.Sprintf("files(%s),nextPageToken,incompleteSearch", f.fileFields)
fields := fmt.Sprintf("files(%s),nextPageToken,incompleteSearch", f.getFileFields(ctx))
OUTER:
for {
@@ -1225,9 +1369,10 @@ func newFs(ctx context.Context, name, path string, m configmap.Mapper) (*Fs, err
listRmu: new(sync.Mutex),
listRempties: make(map[string]struct{}),
dirResourceKeys: new(sync.Map),
permissionsMu: new(sync.Mutex),
permissions: make(map[string]*drive.Permission),
}
f.isTeamDrive = opt.TeamDriveID != ""
f.fileFields = f.getFileFields()
f.features = (&fs.Features{
DuplicateFiles: true,
ReadMimeType: true,
@@ -1235,6 +1380,9 @@ func newFs(ctx context.Context, name, path string, m configmap.Mapper) (*Fs, err
CanHaveEmptyDirectories: true,
ServerSideAcrossConfigs: opt.ServerSideAcrossConfigs,
FilterAware: true,
ReadMetadata: true,
WriteMetadata: true,
UserMetadata: true,
}).Fill(ctx, f)
// Create a new authorized Drive client.
@@ -1339,7 +1487,7 @@ func NewFs(ctx context.Context, name, path string, m configmap.Mapper) (fs.Fs, e
return f, nil
}
func (f *Fs) newBaseObject(remote string, info *drive.File) baseObject {
func (f *Fs) newBaseObject(ctx context.Context, remote string, info *drive.File) (o baseObject, err error) {
modifiedDate := info.ModifiedTime
if f.opt.UseCreatedDate {
modifiedDate = info.CreatedTime
@@ -1350,7 +1498,7 @@ func (f *Fs) newBaseObject(remote string, info *drive.File) baseObject {
if f.opt.SizeAsQuota {
size = info.QuotaBytesUsed
}
return baseObject{
o = baseObject{
fs: f,
remote: remote,
id: info.Id,
@@ -1359,10 +1507,15 @@ func (f *Fs) newBaseObject(remote string, info *drive.File) baseObject {
bytes: size,
parents: info.Parents,
}
err = nil
if fs.GetConfig(ctx).Metadata {
err = o.parseMetadata(ctx, info)
}
return o, err
}
// getFileFields gets the fields for a normal file Get or List
func (f *Fs) getFileFields() (fields googleapi.Field) {
func (f *Fs) getFileFields(ctx context.Context) (fields googleapi.Field) {
fields = partialFields
if f.opt.AuthOwnerOnly {
fields += ",owners"
@@ -1376,40 +1529,53 @@ func (f *Fs) getFileFields() (fields googleapi.Field) {
if f.opt.SizeAsQuota {
fields += ",quotaBytesUsed"
}
if fs.GetConfig(ctx).Metadata {
fields += "," + metadataFields
}
return fields
}
// newRegularObject creates an fs.Object for a normal drive.File
func (f *Fs) newRegularObject(remote string, info *drive.File) fs.Object {
func (f *Fs) newRegularObject(ctx context.Context, remote string, info *drive.File) (obj fs.Object, err error) {
// wipe checksum if SkipChecksumGphotos and file is type Photo or Video
if f.opt.SkipChecksumGphotos {
for _, space := range info.Spaces {
if space == "photos" {
info.Md5Checksum = ""
info.Sha1Checksum = ""
info.Sha256Checksum = ""
break
}
}
}
o := &Object{
baseObject: f.newBaseObject(remote, info),
url: fmt.Sprintf("%sfiles/%s?alt=media", f.svc.BasePath, actualID(info.Id)),
md5sum: strings.ToLower(info.Md5Checksum),
sha1sum: strings.ToLower(info.Sha1Checksum),
sha256sum: strings.ToLower(info.Sha256Checksum),
v2Download: f.opt.V2DownloadMinSize != -1 && info.Size >= int64(f.opt.V2DownloadMinSize),
}
o.baseObject, err = f.newBaseObject(ctx, remote, info)
if err != nil {
return nil, err
}
if info.ResourceKey != "" {
o.resourceKey = &info.ResourceKey
}
return o
return o, nil
}
// newDocumentObject creates an fs.Object for a google docs drive.File
func (f *Fs) newDocumentObject(remote string, info *drive.File, extension, exportMimeType string) (fs.Object, error) {
func (f *Fs) newDocumentObject(ctx context.Context, remote string, info *drive.File, extension, exportMimeType string) (fs.Object, error) {
mediaType, _, err := mime.ParseMediaType(exportMimeType)
if err != nil {
return nil, err
}
url := info.ExportLinks[mediaType]
baseObject := f.newBaseObject(remote+extension, info)
baseObject, err := f.newBaseObject(ctx, remote+extension, info)
if err != nil {
return nil, err
}
baseObject.bytes = -1
baseObject.mimeType = exportMimeType
return &documentObject{
@@ -1421,7 +1587,7 @@ func (f *Fs) newDocumentObject(remote string, info *drive.File, extension, expor
}
// newLinkObject creates an fs.Object that represents a link a google docs drive.File
func (f *Fs) newLinkObject(remote string, info *drive.File, extension, exportMimeType string) (fs.Object, error) {
func (f *Fs) newLinkObject(ctx context.Context, remote string, info *drive.File, extension, exportMimeType string) (fs.Object, error) {
t := linkTemplate(exportMimeType)
if t == nil {
return nil, fmt.Errorf("unsupported link type %s", exportMimeType)
@@ -1440,7 +1606,10 @@ func (f *Fs) newLinkObject(remote string, info *drive.File, extension, exportMim
return nil, fmt.Errorf("executing template failed: %w", err)
}
baseObject := f.newBaseObject(remote+extension, info)
baseObject, err := f.newBaseObject(ctx, remote+extension, info)
if err != nil {
return nil, err
}
baseObject.bytes = int64(buf.Len())
baseObject.mimeType = exportMimeType
return &linkObject{
@@ -1456,7 +1625,7 @@ func (f *Fs) newLinkObject(remote string, info *drive.File, extension, exportMim
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *drive.File) (fs.Object, error) {
// If item has MD5 sum it is a file stored on drive
if info.Md5Checksum != "" {
return f.newRegularObject(remote, info), nil
return f.newRegularObject(ctx, remote, info)
}
extension, exportName, exportMimeType, isDocument := f.findExportFormat(ctx, info)
@@ -1487,13 +1656,15 @@ func (f *Fs) newObjectWithExportInfo(
case info.MimeType == shortcutMimeTypeDangling:
// Pretend a dangling shortcut is a regular object
// It will error if used, but appear in listings so it can be deleted
return f.newRegularObject(remote, info), nil
return f.newRegularObject(ctx, remote, info)
case info.Md5Checksum != "":
// If item has MD5 sum it is a file stored on drive
return f.newRegularObject(remote, info), nil
return f.newRegularObject(ctx, remote, info)
case f.opt.SkipGdocs:
fs.Debugf(remote, "Skipping google document type %q", info.MimeType)
return nil, fs.ErrorObjectNotFound
case f.opt.ShowAllGdocs:
return f.newDocumentObject(ctx, remote, info, "", info.MimeType)
default:
// If item MimeType is in the ExportFormats then it is a google doc
if !isDocument {
@@ -1505,9 +1676,9 @@ func (f *Fs) newObjectWithExportInfo(
return nil, fs.ErrorObjectNotFound
}
if isLinkMimeType(exportMimeType) {
return f.newLinkObject(remote, info, extension, exportMimeType)
return f.newLinkObject(ctx, remote, info, extension, exportMimeType)
}
return f.newDocumentObject(remote, info, extension, exportMimeType)
return f.newDocumentObject(ctx, remote, info, extension, exportMimeType)
}
}
@@ -1886,7 +2057,7 @@ func (f *Fs) listRRunner(ctx context.Context, wg *sync.WaitGroup, in chan listRE
// drive where (A in parents) or (B in parents) returns nothing
// sometimes. See #3114, #4289 and
// https://issuetracker.google.com/issues/149522397
if len(dirs) > 1 && !foundItems {
if f.opt.FastListBugFix && len(dirs) > 1 && !foundItems {
if atomic.SwapInt32(&f.grouping, 1) != 1 {
fs.Debugf(f, "Disabling ListR to work around bug in drive as multi listing (%d) returned no entries", len(dirs))
}
@@ -2135,7 +2306,7 @@ func (f *Fs) resolveShortcut(ctx context.Context, item *drive.File) (newItem *dr
fs.Errorf(nil, "Expecting shortcutDetails in %v", item)
return item, nil
}
newItem, err = f.getFile(ctx, item.ShortcutDetails.TargetId, f.fileFields)
newItem, err = f.getFile(ctx, item.ShortcutDetails.TargetId, f.getFileFields(ctx))
if err != nil {
var gerr *googleapi.Error
if errors.As(err, &gerr) && gerr.Code == 404 {
@@ -2267,6 +2438,10 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
} else {
createInfo.MimeType = fs.MimeTypeFromName(remote)
}
updateMetadata, err := f.fetchAndUpdateMetadata(ctx, src, options, createInfo, false)
if err != nil {
return nil, err
}
var info *drive.File
if size >= 0 && size < int64(f.opt.UploadCutoff) {
@@ -2291,6 +2466,10 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
return nil, err
}
}
err = updateMetadata(ctx, info)
if err != nil {
return nil, err
}
return f.newObjectWithInfo(ctx, remote, info)
}
@@ -2979,7 +3158,7 @@ func (f *Fs) DirCacheFlush() {
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.MD5)
return hash.NewHashSet(hash.MD5, hash.SHA1, hash.SHA256)
}
func (f *Fs) changeChunkSize(chunkSizeString string) (err error) {
@@ -3208,7 +3387,7 @@ func (f *Fs) unTrashDir(ctx context.Context, dir string, recurse bool) (r unTras
// copy file with id to dest
func (f *Fs) copyID(ctx context.Context, id, dest string) (err error) {
info, err := f.getFile(ctx, id, f.fileFields)
info, err := f.getFile(ctx, id, f.getFileFields(ctx))
if err != nil {
return fmt.Errorf("couldn't find id: %w", err)
}
@@ -3540,10 +3719,16 @@ func (o *baseObject) Remote() string {
// Hash returns the Md5sum of an object returning a lowercase hex string
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
if t != hash.MD5 {
return "", hash.ErrUnsupported
if t == hash.MD5 {
return o.md5sum, nil
}
return o.md5sum, nil
if t == hash.SHA1 {
return o.sha1sum, nil
}
if t == hash.SHA256 {
return o.sha256sum, nil
}
return "", hash.ErrUnsupported
}
func (o *baseObject) Hash(ctx context.Context, t hash.Type) (string, error) {
if t != hash.MD5 {
@@ -3882,10 +4067,20 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
MimeType: srcMimeType,
ModifiedTime: src.ModTime(ctx).Format(timeFormatOut),
}
updateMetadata, err := o.fs.fetchAndUpdateMetadata(ctx, src, options, updateInfo, true)
if err != nil {
return err
}
info, err := o.baseObject.update(ctx, updateInfo, srcMimeType, in, src)
if err != nil {
return err
}
err = updateMetadata(ctx, info)
if err != nil {
return err
}
newO, err := o.fs.newObjectWithInfo(ctx, o.remote, info)
if err != nil {
return err
@@ -3971,6 +4166,26 @@ func (o *baseObject) ParentID() string {
return ""
}
// Metadata returns metadata for an object
//
// It should return nil if there is no Metadata
func (o *baseObject) Metadata(ctx context.Context) (metadata fs.Metadata, err error) {
if o.metadata != nil {
return *o.metadata, nil
}
fs.Debugf(o, "Fetching metadata")
id := actualID(o.id)
info, err := o.fs.getFile(ctx, id, o.fs.getFileFields(ctx))
if err != nil {
return nil, err
}
err = o.parseMetadata(ctx, info)
if err != nil {
return nil, err
}
return *o.metadata, nil
}
func (o *documentObject) ext() string {
return o.baseObject.remote[len(o.baseObject.remote)-o.extLen:]
}
@@ -4032,6 +4247,7 @@ var (
_ fs.MimeTyper = (*Object)(nil)
_ fs.IDer = (*Object)(nil)
_ fs.ParentIDer = (*Object)(nil)
_ fs.Metadataer = (*Object)(nil)
_ fs.Object = (*documentObject)(nil)
_ fs.MimeTyper = (*documentObject)(nil)
_ fs.IDer = (*documentObject)(nil)

608
backend/drive/metadata.go Normal file
View File

@@ -0,0 +1,608 @@
package drive
import (
"context"
"encoding/json"
"fmt"
"strconv"
"strings"
"sync"
"github.com/rclone/rclone/fs"
"golang.org/x/sync/errgroup"
drive "google.golang.org/api/drive/v3"
"google.golang.org/api/googleapi"
)
// system metadata keys which this backend owns
var systemMetadataInfo = map[string]fs.MetadataHelp{
"content-type": {
Help: "The MIME type of the file.",
Type: "string",
Example: "text/plain",
},
"mtime": {
Help: "Time of last modification with mS accuracy.",
Type: "RFC 3339",
Example: "2006-01-02T15:04:05.999Z07:00",
},
"btime": {
Help: "Time of file birth (creation) with mS accuracy. Note that this is only writable on fresh uploads - it can't be written for updates.",
Type: "RFC 3339",
Example: "2006-01-02T15:04:05.999Z07:00",
},
"copy-requires-writer-permission": {
Help: "Whether the options to copy, print, or download this file, should be disabled for readers and commenters.",
Type: "boolean",
Example: "true",
},
"writers-can-share": {
Help: "Whether users with only writer permission can modify the file's permissions. Not populated for items in shared drives.",
Type: "boolean",
Example: "false",
},
"viewed-by-me": {
Help: "Whether the file has been viewed by this user.",
Type: "boolean",
Example: "true",
ReadOnly: true,
},
"owner": {
Help: "The owner of the file. Usually an email address. Enable with --drive-metadata-owner.",
Type: "string",
Example: "user@example.com",
},
"permissions": {
Help: "Permissions in a JSON dump of Google drive format. On shared drives these will only be present if they aren't inherited. Enable with --drive-metadata-permissions.",
Type: "JSON",
Example: "{}",
},
"folder-color-rgb": {
Help: "The color for a folder or a shortcut to a folder as an RGB hex string.",
Type: "string",
Example: "881133",
},
"description": {
Help: "A short description of the file.",
Type: "string",
Example: "Contract for signing",
},
"starred": {
Help: "Whether the user has starred the file.",
Type: "boolean",
Example: "false",
},
"labels": {
Help: "Labels attached to this file in a JSON dump of Googled drive format. Enable with --drive-metadata-labels.",
Type: "JSON",
Example: "[]",
},
}
// Extra fields we need to fetch to implement the system metadata above
var metadataFields = googleapi.Field(strings.Join([]string{
"copyRequiresWriterPermission",
"description",
"folderColorRgb",
"hasAugmentedPermissions",
"owners",
"permissionIds",
"permissions",
"properties",
"starred",
"viewedByMe",
"viewedByMeTime",
"writersCanShare",
}, ","))
// Fields we need to read from permissions
var permissionsFields = googleapi.Field(strings.Join([]string{
"*",
"permissionDetails/*",
}, ","))
// getPermission returns permissions for the fileID and permissionID passed in
func (f *Fs) getPermission(ctx context.Context, fileID, permissionID string, useCache bool) (perm *drive.Permission, inherited bool, err error) {
f.permissionsMu.Lock()
defer f.permissionsMu.Unlock()
if useCache {
perm = f.permissions[permissionID]
if perm != nil {
return perm, false, nil
}
}
fs.Debugf(f, "Fetching permission %q", permissionID)
err = f.pacer.Call(func() (bool, error) {
perm, err = f.svc.Permissions.Get(fileID, permissionID).
Fields(permissionsFields).
SupportsAllDrives(true).
Context(ctx).Do()
return f.shouldRetry(ctx, err)
})
if err != nil {
return nil, false, err
}
inherited = len(perm.PermissionDetails) > 0 && perm.PermissionDetails[0].Inherited
cleanPermission(perm)
// cache the permission
f.permissions[permissionID] = perm
return perm, inherited, err
}
// Set the permissions on the info
func (f *Fs) setPermissions(ctx context.Context, info *drive.File, permissions []*drive.Permission) (err error) {
for _, perm := range permissions {
if perm.Role == "owner" {
// ignore owner permissions - these are set with owner
continue
}
cleanPermissionForWrite(perm)
err = f.pacer.Call(func() (bool, error) {
_, err = f.svc.Permissions.Create(info.Id, perm).
SupportsAllDrives(true).
Context(ctx).Do()
return f.shouldRetry(ctx, err)
})
if err != nil {
return fmt.Errorf("failed to set permission: %w", err)
}
}
return nil
}
// Clean attributes from permissions which we can't write
func cleanPermissionForWrite(perm *drive.Permission) {
perm.Deleted = false
perm.DisplayName = ""
perm.Id = ""
perm.Kind = ""
perm.PermissionDetails = nil
perm.TeamDrivePermissionDetails = nil
}
// Clean and cache the permission if not already cached
func (f *Fs) cleanAndCachePermission(perm *drive.Permission) {
f.permissionsMu.Lock()
defer f.permissionsMu.Unlock()
cleanPermission(perm)
if _, found := f.permissions[perm.Id]; !found {
f.permissions[perm.Id] = perm
}
}
// Clean fields we don't need to keep from the permission
func cleanPermission(perm *drive.Permission) {
// DisplayName: Output only. The "pretty" name of the value of the
// permission. The following is a list of examples for each type of
// permission: * `user` - User's full name, as defined for their Google
// account, such as "Joe Smith." * `group` - Name of the Google Group,
// such as "The Company Administrators." * `domain` - String domain
// name, such as "thecompany.com." * `anyone` - No `displayName` is
// present.
perm.DisplayName = ""
// Kind: Output only. Identifies what kind of resource this is. Value:
// the fixed string "drive#permission".
perm.Kind = ""
// PermissionDetails: Output only. Details of whether the permissions on
// this shared drive item are inherited or directly on this item. This
// is an output-only field which is present only for shared drive items.
perm.PermissionDetails = nil
// PhotoLink: Output only. A link to the user's profile photo, if
// available.
perm.PhotoLink = ""
// TeamDrivePermissionDetails: Output only. Deprecated: Output only. Use
// `permissionDetails` instead.
perm.TeamDrivePermissionDetails = nil
}
// Fields we need to read from labels
var labelsFields = googleapi.Field(strings.Join([]string{
"*",
}, ","))
// getLabels returns labels for the fileID passed in
func (f *Fs) getLabels(ctx context.Context, fileID string) (labels []*drive.Label, err error) {
fs.Debugf(f, "Fetching labels for %q", fileID)
listLabels := f.svc.Files.ListLabels(fileID).
Fields(labelsFields).
Context(ctx)
for {
var info *drive.LabelList
err = f.pacer.Call(func() (bool, error) {
info, err = listLabels.Do()
return f.shouldRetry(ctx, err)
})
if err != nil {
return nil, err
}
labels = append(labels, info.Labels...)
if info.NextPageToken == "" {
break
}
listLabels.PageToken(info.NextPageToken)
}
for _, label := range labels {
cleanLabel(label)
}
return labels, nil
}
// Set the labels on the info
func (f *Fs) setLabels(ctx context.Context, info *drive.File, labels []*drive.Label) (err error) {
if len(labels) == 0 {
return nil
}
req := drive.ModifyLabelsRequest{}
for _, label := range labels {
req.LabelModifications = append(req.LabelModifications, &drive.LabelModification{
FieldModifications: labelFieldsToFieldModifications(label.Fields),
LabelId: label.Id,
})
}
err = f.pacer.Call(func() (bool, error) {
_, err = f.svc.Files.ModifyLabels(info.Id, &req).
Context(ctx).Do()
return f.shouldRetry(ctx, err)
})
if err != nil {
return fmt.Errorf("failed to set owner: %w", err)
}
return nil
}
// Convert label fields into something which can set the fields
func labelFieldsToFieldModifications(fields map[string]drive.LabelField) (out []*drive.LabelFieldModification) {
for id, field := range fields {
var emails []string
for _, user := range field.User {
emails = append(emails, user.EmailAddress)
}
out = append(out, &drive.LabelFieldModification{
// FieldId: The ID of the field to be modified.
FieldId: id,
// SetDateValues: Replaces the value of a dateString Field with these
// new values. The string must be in the RFC 3339 full-date format:
// YYYY-MM-DD.
SetDateValues: field.DateString,
// SetIntegerValues: Replaces the value of an `integer` field with these
// new values.
SetIntegerValues: field.Integer,
// SetSelectionValues: Replaces a `selection` field with these new
// values.
SetSelectionValues: field.Selection,
// SetTextValues: Sets the value of a `text` field.
SetTextValues: field.Text,
// SetUserValues: Replaces a `user` field with these new values. The
// values must be valid email addresses.
SetUserValues: emails,
})
}
return out
}
// Clean fields we don't need to keep from the label
func cleanLabel(label *drive.Label) {
// Kind: This is always drive#label
label.Kind = ""
for name, field := range label.Fields {
// Kind: This is always drive#labelField.
field.Kind = ""
// Note the fields are copies so we need to write them
// back to the map
label.Fields[name] = field
}
}
// Parse the metadata from drive item
//
// It should return nil if there is no Metadata
func (o *baseObject) parseMetadata(ctx context.Context, info *drive.File) (err error) {
metadata := make(fs.Metadata, 16)
// Dump user metadata first as it overrides system metadata
for k, v := range info.Properties {
metadata[k] = v
}
// System metadata
metadata["copy-requires-writer-permission"] = fmt.Sprint(info.CopyRequiresWriterPermission)
metadata["writers-can-share"] = fmt.Sprint(info.WritersCanShare)
metadata["viewed-by-me"] = fmt.Sprint(info.ViewedByMe)
metadata["content-type"] = info.MimeType
// Owners: Output only. The owner of this file. Only certain legacy
// files may have more than one owner. This field isn't populated for
// items in shared drives.
if o.fs.opt.MetadataOwner.IsSet(rwRead) && len(info.Owners) > 0 {
user := info.Owners[0]
if len(info.Owners) > 1 {
fs.Logf(o, "Ignoring more than 1 owner")
}
if user != nil {
id := user.EmailAddress
if id == "" {
id = user.DisplayName
}
metadata["owner"] = id
}
}
if o.fs.opt.MetadataPermissions.IsSet(rwRead) {
// We only write permissions out if they are not inherited.
//
// On My Drives permissions seem to be attached to every item
// so they will always be written out.
//
// On Shared Drives only non-inherited permissions will be
// written out.
// To read the inherited permissions flag will mean we need to
// read the permissions for each object and the cache will be
// useless. However shared drives don't return permissions
// only permissionIds so will need to fetch them for each
// object. We use HasAugmentedPermissions to see if there are
// special permissions before fetching them to save transactions.
// HasAugmentedPermissions: Output only. Whether there are permissions
// directly on this file. This field is only populated for items in
// shared drives.
if o.fs.isTeamDrive && !info.HasAugmentedPermissions {
// Don't process permissions if there aren't any specifically set
info.Permissions = nil
info.PermissionIds = nil
}
// PermissionIds: Output only. List of permission IDs for users with
// access to this file.
//
// Only process these if we have no Permissions
if len(info.PermissionIds) > 0 && len(info.Permissions) == 0 {
info.Permissions = make([]*drive.Permission, 0, len(info.PermissionIds))
g, gCtx := errgroup.WithContext(ctx)
g.SetLimit(o.fs.ci.Checkers)
var mu sync.Mutex // protect the info.Permissions from concurrent writes
for _, permissionID := range info.PermissionIds {
permissionID := permissionID
g.Go(func() error {
// must fetch the team drive ones individually to check the inherited flag
perm, inherited, err := o.fs.getPermission(gCtx, actualID(info.Id), permissionID, !o.fs.isTeamDrive)
if err != nil {
return fmt.Errorf("failed to read permission: %w", err)
}
// Don't write inherited permissions out
if inherited {
return nil
}
// Don't write owner role out - these are covered by the owner metadata
if perm.Role == "owner" {
return nil
}
mu.Lock()
info.Permissions = append(info.Permissions, perm)
mu.Unlock()
return nil
})
}
err = g.Wait()
if err != nil {
return err
}
} else {
// Clean the fetched permissions
for _, perm := range info.Permissions {
o.fs.cleanAndCachePermission(perm)
}
}
// Permissions: Output only. The full list of permissions for the file.
// This is only available if the requesting user can share the file. Not
// populated for items in shared drives.
if len(info.Permissions) > 0 {
buf, err := json.Marshal(info.Permissions)
if err != nil {
return fmt.Errorf("failed to marshal permissions: %w", err)
}
metadata["permissions"] = string(buf)
}
// Permission propagation
// https://developers.google.com/drive/api/guides/manage-sharing#permission-propagation
// Leads me to believe that in non shared drives, permissions
// are added to each item when you set permissions for a
// folder whereas in shared drives they are inherited and
// placed on the item directly.
}
if info.FolderColorRgb != "" {
metadata["folder-color-rgb"] = info.FolderColorRgb
}
if info.Description != "" {
metadata["description"] = info.Description
}
metadata["starred"] = fmt.Sprint(info.Starred)
metadata["btime"] = info.CreatedTime
metadata["mtime"] = info.ModifiedTime
if o.fs.opt.MetadataLabels.IsSet(rwRead) {
// FIXME would be really nice if we knew if files had labels
// before listing but we need to know all possible label IDs
// to get it in the listing.
labels, err := o.fs.getLabels(ctx, actualID(info.Id))
if err != nil {
return fmt.Errorf("failed to fetch labels: %w", err)
}
buf, err := json.Marshal(labels)
if err != nil {
return fmt.Errorf("failed to marshal labels: %w", err)
}
metadata["labels"] = string(buf)
}
o.metadata = &metadata
return nil
}
// Set the owner on the info
func (f *Fs) setOwner(ctx context.Context, info *drive.File, owner string) (err error) {
perm := drive.Permission{
Role: "owner",
EmailAddress: owner,
// Type: The type of the grantee. Valid values are: * `user` * `group` *
// `domain` * `anyone` When creating a permission, if `type` is `user`
// or `group`, you must provide an `emailAddress` for the user or group.
// When `type` is `domain`, you must provide a `domain`. There isn't
// extra information required for an `anyone` type.
Type: "user",
}
err = f.pacer.Call(func() (bool, error) {
_, err = f.svc.Permissions.Create(info.Id, &perm).
SupportsAllDrives(true).
TransferOwnership(true).
// SendNotificationEmail(false). - required apparently!
Context(ctx).Do()
return f.shouldRetry(ctx, err)
})
if err != nil {
return fmt.Errorf("failed to set owner: %w", err)
}
return nil
}
// Call back to set metadata that can't be set on the upload/update
//
// The *drive.File passed in holds the current state of the drive.File
// and this should update it with any modifications.
type updateMetadataFn func(context.Context, *drive.File) error
// read the metadata from meta and write it into updateInfo
//
// update should be true if this is being used to create metadata for
// an update/PATCH call as the rules on what can be updated are
// slightly different there.
//
// It returns a callback which should be called to finish the updates
// after the data is uploaded.
func (f *Fs) updateMetadata(ctx context.Context, updateInfo *drive.File, meta fs.Metadata, update bool) (callback updateMetadataFn, err error) {
callbackFns := []updateMetadataFn{}
callback = func(ctx context.Context, info *drive.File) error {
for _, fn := range callbackFns {
err := fn(ctx, info)
if err != nil {
return err
}
}
return nil
}
// merge metadata into request and user metadata
for k, v := range meta {
k, v := k, v
// parse a boolean from v and write into out
parseBool := func(out *bool) error {
b, err := strconv.ParseBool(v)
if err != nil {
return fmt.Errorf("can't parse metadata %q = %q: %w", k, v, err)
}
*out = b
return nil
}
switch k {
case "copy-requires-writer-permission":
if err := parseBool(&updateInfo.CopyRequiresWriterPermission); err != nil {
return nil, err
}
case "writers-can-share":
if err := parseBool(&updateInfo.WritersCanShare); err != nil {
return nil, err
}
case "viewed-by-me":
// Can't write this
case "content-type":
updateInfo.MimeType = v
case "owner":
if !f.opt.MetadataOwner.IsSet(rwWrite) {
continue
}
// Can't set Owner on upload so need to set afterwards
callbackFns = append(callbackFns, func(ctx context.Context, info *drive.File) error {
return f.setOwner(ctx, info, v)
})
case "permissions":
if !f.opt.MetadataPermissions.IsSet(rwWrite) {
continue
}
var perms []*drive.Permission
err := json.Unmarshal([]byte(v), &perms)
if err != nil {
return nil, fmt.Errorf("failed to unmarshal permissions: %w", err)
}
// Can't set Permissions on upload so need to set afterwards
callbackFns = append(callbackFns, func(ctx context.Context, info *drive.File) error {
return f.setPermissions(ctx, info, perms)
})
case "labels":
if !f.opt.MetadataLabels.IsSet(rwWrite) {
continue
}
var labels []*drive.Label
err := json.Unmarshal([]byte(v), &labels)
if err != nil {
return nil, fmt.Errorf("failed to unmarshal labels: %w", err)
}
// Can't set Labels on upload so need to set afterwards
callbackFns = append(callbackFns, func(ctx context.Context, info *drive.File) error {
return f.setLabels(ctx, info, labels)
})
case "folder-color-rgb":
updateInfo.FolderColorRgb = v
case "description":
updateInfo.Description = v
case "starred":
if err := parseBool(&updateInfo.Starred); err != nil {
return nil, err
}
case "btime":
if update {
fs.Debugf(f, "Skipping btime metadata as can't update it on an existing file: %v", v)
} else {
updateInfo.CreatedTime = v
}
case "mtime":
updateInfo.ModifiedTime = v
default:
if updateInfo.Properties == nil {
updateInfo.Properties = make(map[string]string, 1)
}
updateInfo.Properties[k] = v
}
}
return callback, nil
}
// Fetch metadata and update updateInfo if --metadata is in use
func (f *Fs) fetchAndUpdateMetadata(ctx context.Context, src fs.ObjectInfo, options []fs.OpenOption, updateInfo *drive.File, update bool) (callback updateMetadataFn, err error) {
meta, err := fs.GetMetadataOptions(ctx, f, src, options)
if err != nil {
return nil, fmt.Errorf("failed to read metadata from source object: %w", err)
}
callback, err = f.updateMetadata(ctx, updateInfo, meta, update)
if err != nil {
return nil, fmt.Errorf("failed to update metadata from source object: %w", err)
}
return callback, nil
}

View File

@@ -8,121 +8,19 @@ package dropbox
import (
"context"
"errors"
"fmt"
"sync"
"time"
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/files"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/lib/atexit"
)
const (
maxBatchSize = 1000 // max size the batch can be
defaultTimeoutSync = 500 * time.Millisecond // kick off the batch if nothing added for this long (sync)
defaultTimeoutAsync = 10 * time.Second // kick off the batch if nothing added for this long (ssync)
defaultBatchSizeAsync = 100 // default batch size if async
)
// batcher holds info about the current items waiting for upload
type batcher struct {
f *Fs // Fs this batch is part of
mode string // configured batch mode
size int // maximum size for batch
timeout time.Duration // idle timeout for batch
async bool // whether we are using async batching
in chan batcherRequest // incoming items to batch
closed chan struct{} // close to indicate batcher shut down
atexit atexit.FnHandle // atexit handle
shutOnce sync.Once // make sure we shutdown once only
wg sync.WaitGroup // wait for shutdown
}
// batcherRequest holds an incoming request with a place for a reply
type batcherRequest struct {
commitInfo *files.UploadSessionFinishArg
result chan<- batcherResponse
}
// Return true if batcherRequest is the quit request
func (br *batcherRequest) isQuit() bool {
return br.commitInfo == nil
}
// Send this to get the engine to quit
var quitRequest = batcherRequest{}
// batcherResponse holds a response to be delivered to clients waiting
// for a batch to complete.
type batcherResponse struct {
err error
entry *files.FileMetadata
}
// newBatcher creates a new batcher structure
func newBatcher(ctx context.Context, f *Fs, mode string, size int, timeout time.Duration) (*batcher, error) {
// fs.Debugf(f, "Creating batcher with mode %q, size %d, timeout %v", mode, size, timeout)
if size > maxBatchSize || size < 0 {
return nil, fmt.Errorf("dropbox: batch size must be < %d and >= 0 - it is currently %d", maxBatchSize, size)
}
async := false
switch mode {
case "sync":
if size <= 0 {
ci := fs.GetConfig(ctx)
size = ci.Transfers
}
if timeout <= 0 {
timeout = defaultTimeoutSync
}
case "async":
if size <= 0 {
size = defaultBatchSizeAsync
}
if timeout <= 0 {
timeout = defaultTimeoutAsync
}
async = true
case "off":
size = 0
default:
return nil, fmt.Errorf("dropbox: batch mode must be sync|async|off not %q", mode)
}
b := &batcher{
f: f,
mode: mode,
size: size,
timeout: timeout,
async: async,
in: make(chan batcherRequest, size),
closed: make(chan struct{}),
}
if b.Batching() {
b.atexit = atexit.Register(b.Shutdown)
b.wg.Add(1)
go b.commitLoop(context.Background())
}
return b, nil
}
// Batching returns true if batching is active
func (b *batcher) Batching() bool {
return b.size > 0
}
// finishBatch commits the batch, returning a batch status to poll or maybe complete
func (b *batcher) finishBatch(ctx context.Context, items []*files.UploadSessionFinishArg) (complete *files.UploadSessionFinishBatchResult, err error) {
func (f *Fs) finishBatch(ctx context.Context, items []*files.UploadSessionFinishArg) (complete *files.UploadSessionFinishBatchResult, err error) {
var arg = &files.UploadSessionFinishBatchArg{
Entries: items,
}
err = b.f.pacer.Call(func() (bool, error) {
complete, err = b.f.srv.UploadSessionFinishBatchV2(arg)
err = f.pacer.Call(func() (bool, error) {
complete, err = f.srv.UploadSessionFinishBatchV2(arg)
// If error is insufficient space then don't retry
if e, ok := err.(files.UploadSessionFinishAPIError); ok {
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.WriteErrorInsufficientSpace {
@@ -139,23 +37,10 @@ func (b *batcher) finishBatch(ctx context.Context, items []*files.UploadSessionF
return complete, nil
}
// commit a batch
func (b *batcher) commitBatch(ctx context.Context, items []*files.UploadSessionFinishArg, results []chan<- batcherResponse) (err error) {
// If commit fails then signal clients if sync
var signalled = b.async
defer func() {
if err != nil && !signalled {
// Signal to clients that there was an error
for _, result := range results {
result <- batcherResponse{err: err}
}
}
}()
desc := fmt.Sprintf("%s batch length %d starting with: %s", b.mode, len(items), items[0].Commit.Path)
fs.Debugf(b.f, "Committing %s", desc)
// Called by the batcher to commit a batch
func (f *Fs) commitBatch(ctx context.Context, items []*files.UploadSessionFinishArg, results []*files.FileMetadata, errors []error) (err error) {
// finalise the batch getting either a result or a job id to poll
complete, err := b.finishBatch(ctx, items)
complete, err := f.finishBatch(ctx, items)
if err != nil {
return err
}
@@ -166,19 +51,13 @@ func (b *batcher) commitBatch(ctx context.Context, items []*files.UploadSessionF
return fmt.Errorf("expecting %d items in batch but got %d", len(results), len(entries))
}
// Report results to clients
var (
errorTag = ""
errorCount = 0
)
// Format results for return
for i := range results {
item := entries[i]
resp := batcherResponse{}
if item.Tag == "success" {
resp.entry = item.Success
results[i] = item.Success
} else {
errorCount++
errorTag = item.Tag
errorTag := item.Tag
if item.Failure != nil {
errorTag = item.Failure.Tag
if item.Failure.LookupFailed != nil {
@@ -191,112 +70,9 @@ func (b *batcher) commitBatch(ctx context.Context, items []*files.UploadSessionF
errorTag += "/" + item.Failure.PropertiesError.Tag
}
}
resp.err = fmt.Errorf("batch upload failed: %s", errorTag)
}
if !b.async {
results[i] <- resp
errors[i] = fmt.Errorf("upload failed: %s", errorTag)
}
}
// Show signalled so no need to report error to clients from now on
signalled = true
// Report an error if any failed in the batch
if errorTag != "" {
return fmt.Errorf("batch had %d errors: last error: %s", errorCount, errorTag)
}
fs.Debugf(b.f, "Committed %s", desc)
return nil
}
// commitLoop runs the commit engine in the background
func (b *batcher) commitLoop(ctx context.Context) {
var (
items []*files.UploadSessionFinishArg // current batch of uncommitted files
results []chan<- batcherResponse // current batch of clients awaiting results
idleTimer = time.NewTimer(b.timeout)
commit = func() {
err := b.commitBatch(ctx, items, results)
if err != nil {
fs.Errorf(b.f, "%s batch commit: failed to commit batch length %d: %v", b.mode, len(items), err)
}
items, results = nil, nil
}
)
defer b.wg.Done()
defer idleTimer.Stop()
idleTimer.Stop()
outer:
for {
select {
case req := <-b.in:
if req.isQuit() {
break outer
}
items = append(items, req.commitInfo)
results = append(results, req.result)
idleTimer.Stop()
if len(items) >= b.size {
commit()
} else {
idleTimer.Reset(b.timeout)
}
case <-idleTimer.C:
if len(items) > 0 {
fs.Debugf(b.f, "Batch idle for %v so committing", b.timeout)
commit()
}
}
}
// commit any remaining items
if len(items) > 0 {
commit()
}
}
// Shutdown finishes any pending batches then shuts everything down
//
// Can be called from atexit handler
func (b *batcher) Shutdown() {
if !b.Batching() {
return
}
b.shutOnce.Do(func() {
atexit.Unregister(b.atexit)
fs.Infof(b.f, "Committing uploads - please wait...")
// show that batcher is shutting down
close(b.closed)
// quit the commitLoop by sending a quitRequest message
//
// Note that we don't close b.in because that will
// cause write to closed channel in Commit when we are
// exiting due to a signal.
b.in <- quitRequest
b.wg.Wait()
})
}
// Commit commits the file using a batch call, first adding it to the
// batch and then waiting for the batch to complete in a synchronous
// way if async is not set.
func (b *batcher) Commit(ctx context.Context, commitInfo *files.UploadSessionFinishArg) (entry *files.FileMetadata, err error) {
select {
case <-b.closed:
return nil, fserrors.FatalError(errors.New("batcher is shutting down"))
default:
}
fs.Debugf(b.f, "Adding %q to batch", commitInfo.Commit.Path)
resp := make(chan batcherResponse, 1)
b.in <- batcherRequest{
commitInfo: commitInfo,
result: resp,
}
// If running async then don't wait for the result
if b.async {
return nil, nil
}
result := <-resp
return result.entry, result.err
}

View File

@@ -47,6 +47,7 @@ import (
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/batcher"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/oauthutil"
"github.com/rclone/rclone/lib/pacer"
@@ -121,6 +122,14 @@ var (
// Errors
errNotSupportedInSharedMode = fserrors.NoRetryError(errors.New("not supported in shared files mode"))
// Configure the batcher
defaultBatcherOptions = batcher.Options{
MaxBatchSize: 1000,
DefaultTimeoutSync: 500 * time.Millisecond,
DefaultTimeoutAsync: 10 * time.Second,
DefaultBatchSizeAsync: 100,
}
)
// Gets an oauth config with the right scopes
@@ -152,7 +161,7 @@ func init() {
},
})
},
Options: append(oauthutil.SharedOptions, []fs.Option{{
Options: append(append(oauthutil.SharedOptions, []fs.Option{{
Name: "chunk_size",
Help: fmt.Sprintf(`Upload chunk size (< %v).
@@ -182,8 +191,9 @@ client_secret) to use this option as currently rclone's default set of
permissions doesn't include "members.read". This can be added once
v1.55 or later is in use everywhere.
`,
Default: "",
Advanced: true,
Default: "",
Advanced: true,
Sensitive: true,
}, {
Name: "shared_files",
Help: `Instructs rclone to work on individual shared files.
@@ -209,68 +219,6 @@ Note that we don't unmount the shared folder afterwards so the
shared folder.`,
Default: false,
Advanced: true,
}, {
Name: "batch_mode",
Help: `Upload file batching sync|async|off.
This sets the batch mode used by rclone.
For full info see [the main docs](https://rclone.org/dropbox/#batch-mode)
This has 3 possible values
- off - no batching
- sync - batch uploads and check completion (default)
- async - batch upload and don't check completion
Rclone will close any outstanding batches when it exits which may make
a delay on quit.
`,
Default: "sync",
Advanced: true,
}, {
Name: "batch_size",
Help: `Max number of files in upload batch.
This sets the batch size of files to upload. It has to be less than 1000.
By default this is 0 which means rclone which calculate the batch size
depending on the setting of batch_mode.
- batch_mode: async - default batch_size is 100
- batch_mode: sync - default batch_size is the same as --transfers
- batch_mode: off - not in use
Rclone will close any outstanding batches when it exits which may make
a delay on quit.
Setting this is a great idea if you are uploading lots of small files
as it will make them a lot quicker. You can use --transfers 32 to
maximise throughput.
`,
Default: 0,
Advanced: true,
}, {
Name: "batch_timeout",
Help: `Max time to allow an idle upload batch before uploading.
If an upload batch is idle for more than this long then it will be
uploaded.
The default for this is 0 which means rclone will choose a sensible
default based on the batch_mode in use.
- batch_mode: async - default batch_timeout is 10s
- batch_mode: sync - default batch_timeout is 500ms
- batch_mode: off - not in use
`,
Default: fs.Duration(0),
Advanced: true,
}, {
Name: "batch_commit_timeout",
Help: `Max time to wait for a batch to finish committing`,
Default: fs.Duration(10 * time.Minute),
Advanced: true,
}, {
Name: "pacer_min_sleep",
Default: defaultMinSleep,
@@ -289,23 +237,22 @@ default based on the batch_mode in use.
encoder.EncodeDel |
encoder.EncodeRightSpace |
encoder.EncodeInvalidUtf8,
}}...),
}}...), defaultBatcherOptions.FsOptions("For full info see [the main docs](https://rclone.org/dropbox/#batch-mode)\n\n")...),
})
}
// Options defines the configuration for this backend
type Options struct {
ChunkSize fs.SizeSuffix `config:"chunk_size"`
Impersonate string `config:"impersonate"`
SharedFiles bool `config:"shared_files"`
SharedFolders bool `config:"shared_folders"`
BatchMode string `config:"batch_mode"`
BatchSize int `config:"batch_size"`
BatchTimeout fs.Duration `config:"batch_timeout"`
BatchCommitTimeout fs.Duration `config:"batch_commit_timeout"`
AsyncBatch bool `config:"async_batch"`
PacerMinSleep fs.Duration `config:"pacer_min_sleep"`
Enc encoder.MultiEncoder `config:"encoding"`
ChunkSize fs.SizeSuffix `config:"chunk_size"`
Impersonate string `config:"impersonate"`
SharedFiles bool `config:"shared_files"`
SharedFolders bool `config:"shared_folders"`
BatchMode string `config:"batch_mode"`
BatchSize int `config:"batch_size"`
BatchTimeout fs.Duration `config:"batch_timeout"`
AsyncBatch bool `config:"async_batch"`
PacerMinSleep fs.Duration `config:"pacer_min_sleep"`
Enc encoder.MultiEncoder `config:"encoding"`
}
// Fs represents a remote dropbox server
@@ -324,7 +271,7 @@ type Fs struct {
slashRootSlash string // root with "/" prefix and postfix, lowercase
pacer *fs.Pacer // To pace the API calls
ns string // The namespace we are using or "" for none
batcher *batcher // batch builder
batcher *batcher.Batcher[*files.UploadSessionFinishArg, *files.FileMetadata]
}
// Object describes a dropbox object
@@ -450,7 +397,11 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
ci: ci,
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(opt.PacerMinSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
}
f.batcher, err = newBatcher(ctx, f, f.opt.BatchMode, f.opt.BatchSize, time.Duration(f.opt.BatchTimeout))
batcherOptions := defaultBatcherOptions
batcherOptions.Mode = f.opt.BatchMode
batcherOptions.Size = f.opt.BatchSize
batcherOptions.Timeout = time.Duration(f.opt.BatchTimeout)
f.batcher, err = batcher.New(ctx, f, f.commitBatch, batcherOptions)
if err != nil {
return nil, err
}
@@ -995,6 +946,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error)
if root == "/" {
return errors.New("can't remove root directory")
}
encRoot := f.opt.Enc.FromStandardPath(root)
if check {
// check directory exists
@@ -1003,10 +955,9 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error)
return fmt.Errorf("Rmdir: %w", err)
}
root = f.opt.Enc.FromStandardPath(root)
// check directory empty
arg := files.ListFolderArg{
Path: root,
Path: encRoot,
Recursive: false,
}
if root == "/" {
@@ -1027,7 +978,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error)
// remove it
err = f.pacer.Call(func() (bool, error) {
_, err = f.srv.DeleteV2(&files.DeleteArg{Path: root})
_, err = f.srv.DeleteV2(&files.DeleteArg{Path: encRoot})
return shouldRetry(ctx, err)
})
return err
@@ -1721,7 +1672,7 @@ func (o *Object) uploadChunked(ctx context.Context, in0 io.Reader, commitInfo *f
// If we are batching then we should have written all the data now
// store the commit info now for a batch commit
if o.fs.batcher.Batching() {
return o.fs.batcher.Commit(ctx, args)
return o.fs.batcher.Commit(ctx, o.remote, args)
}
err = o.fs.pacer.Call(func() (bool, error) {

View File

@@ -28,14 +28,14 @@ var retryErrorCodes = []int{
509, // Bandwidth Limit Exceeded
}
var errorRegex = regexp.MustCompile(`#\d{1,3}`)
var errorRegex = regexp.MustCompile(`#(\d{1,3})`)
func parseFichierError(err error) int {
matches := errorRegex.FindStringSubmatch(err.Error())
if len(matches) == 0 {
return 0
}
code, err := strconv.Atoi(matches[0])
code, err := strconv.Atoi(matches[1])
if err != nil {
fs.Debugf(nil, "failed parsing fichier error: %v", err)
return 0
@@ -408,6 +408,32 @@ func (f *Fs) moveFile(ctx context.Context, url string, folderID int, rename stri
return response, nil
}
func (f *Fs) moveDir(ctx context.Context, folderID int, newLeaf string, destinationFolderID int) (response *MoveDirResponse, err error) {
request := &MoveDirRequest{
FolderID: folderID,
DestinationFolderID: destinationFolderID,
Rename: newLeaf,
// DestinationUser: destinationUser,
}
opts := rest.Opts{
Method: "POST",
Path: "/folder/mv.cgi",
}
response = &MoveDirResponse{}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, request, response)
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, fmt.Errorf("couldn't move dir: %w", err)
}
return response, nil
}
func (f *Fs) copyFile(ctx context.Context, url string, folderID int, rename string) (response *CopyFileResponse, err error) {
request := &CopyFileRequest{
URLs: []string{url},

View File

@@ -38,8 +38,9 @@ func init() {
Description: "1Fichier",
NewFs: NewFs,
Options: []fs.Option{{
Help: "Your API Key, get it from https://1fichier.com/console/params.pl.",
Name: "api_key",
Help: "Your API Key, get it from https://1fichier.com/console/params.pl.",
Name: "api_key",
Sensitive: true,
}, {
Help: "If you want to download a shared folder, add this parameter.",
Name: "shared_folder",
@@ -487,6 +488,51 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
return dstObj, nil
}
// DirMove moves src, srcRemote to this remote at dstRemote
// using server-side move operations.
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantDirMove.
//
// If destination exists then return fs.ErrorDirExists.
//
// This is complicated by the fact that we can't use moveDir to move
// to a different directory AND rename at the same time as it can
// overwrite files in the source directory.
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
srcFs, ok := src.(*Fs)
if !ok {
fs.Debugf(srcFs, "Can't move directory - not same remote type")
return fs.ErrorCantDirMove
}
srcID, _, _, dstDirectoryID, dstLeaf, err := f.dirCache.DirMove(ctx, srcFs.dirCache, srcFs.root, srcRemote, f.root, dstRemote)
if err != nil {
return err
}
srcIDnumeric, err := strconv.Atoi(srcID)
if err != nil {
return err
}
dstDirectoryIDnumeric, err := strconv.Atoi(dstDirectoryID)
if err != nil {
return err
}
var resp *MoveDirResponse
resp, err = f.moveDir(ctx, srcIDnumeric, dstLeaf, dstDirectoryIDnumeric)
if err != nil {
return fmt.Errorf("couldn't rename leaf: %w", err)
}
if resp.Status != "OK" {
return fmt.Errorf("couldn't rename leaf: %s", resp.Message)
}
srcFs.dirCache.FlushDir(srcRemote)
return nil
}
// Copy src to this remote using server side move operations.
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object)
@@ -560,6 +606,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
var (
_ fs.Fs = (*Fs)(nil)
_ fs.Mover = (*Fs)(nil)
_ fs.DirMover = (*Fs)(nil)
_ fs.Copier = (*Fs)(nil)
_ fs.PublicLinker = (*Fs)(nil)
_ fs.PutUncheckeder = (*Fs)(nil)

View File

@@ -70,6 +70,22 @@ type MoveFileResponse struct {
URLs []string `json:"urls"`
}
// MoveDirRequest is the request structure of the corresponding request
type MoveDirRequest struct {
FolderID int `json:"folder_id"`
DestinationFolderID int `json:"destination_folder_id,omitempty"`
DestinationUser string `json:"destination_user"`
Rename string `json:"rename,omitempty"`
}
// MoveDirResponse is the response structure of the corresponding request
type MoveDirResponse struct {
Status string `json:"status"`
Message string `json:"message"`
OldName string `json:"old_name"`
NewName string `json:"new_name"`
}
// CopyFileRequest is the request structure of the corresponding request
type CopyFileRequest struct {
URLs []string `json:"urls"`

View File

@@ -84,6 +84,7 @@ Leave blank normally.
Fill in to make rclone start with directory of a given ID.
`,
Sensitive: true,
}, {
Name: "permanent_token",
Help: `Permanent Authentication Token.
@@ -97,6 +98,7 @@ These tokens are normally valid for several years.
For more info see: https://docs.storagemadeeasy.com/organisationcloud/api-tokens
`,
Sensitive: true,
}, {
Name: "token",
Help: `Session Token.
@@ -106,7 +108,8 @@ usually valid for 1 hour.
Don't set this value - rclone will set it automatically.
`,
Advanced: true,
Advanced: true,
Sensitive: true,
}, {
Name: "token_expiry",
Help: `Token expiry time.
@@ -155,9 +158,9 @@ type Fs struct {
tokenMu sync.Mutex // hold when reading the token
token string // current access token
tokenExpiry time.Time // time the current token expires
tokenExpired int32 // read and written with atomic
canCopyWithName bool // set if detected that can use fi_name in copy
precision time.Duration // precision reported
tokenExpired atomic.Int32
canCopyWithName bool // set if detected that can use fi_name in copy
precision time.Duration // precision reported
}
// Object describes a filefabric object
@@ -240,7 +243,7 @@ func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error, st
err = status // return the error from the RPC
code := status.GetCode()
if code == "login_token_expired" {
atomic.AddInt32(&f.tokenExpired, 1)
f.tokenExpired.Add(1)
} else {
for _, retryCode := range retryStatusCodes {
if code == retryCode.code {
@@ -320,12 +323,12 @@ func (f *Fs) getToken(ctx context.Context) (token string, err error) {
var refreshed = false
defer func() {
if refreshed {
atomic.StoreInt32(&f.tokenExpired, 0)
f.tokenExpired.Store(0)
}
f.tokenMu.Unlock()
}()
expired := atomic.LoadInt32(&f.tokenExpired) != 0
expired := f.tokenExpired.Load() != 0
if expired {
fs.Debugf(f, "Token invalid - refreshing")
}

View File

@@ -15,7 +15,7 @@ import (
"sync"
"time"
"github.com/rclone/ftp"
"github.com/jlaffaye/ftp"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/config"
@@ -28,6 +28,7 @@ import (
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/env"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/proxy"
"github.com/rclone/rclone/lib/readers"
)
@@ -48,13 +49,15 @@ func init() {
Description: "FTP",
NewFs: NewFs,
Options: []fs.Option{{
Name: "host",
Help: "FTP host to connect to.\n\nE.g. \"ftp.example.com\".",
Required: true,
Name: "host",
Help: "FTP host to connect to.\n\nE.g. \"ftp.example.com\".",
Required: true,
Sensitive: true,
}, {
Name: "user",
Help: "FTP username.",
Default: currentUser,
Name: "user",
Help: "FTP username.",
Default: currentUser,
Sensitive: true,
}, {
Name: "port",
Help: "FTP port number.",
@@ -172,6 +175,18 @@ Enabled by default. Use 0 to disable.`,
If this is set and no password is supplied then rclone will ask for a password
`,
Advanced: true,
}, {
Name: "socks_proxy",
Default: "",
Help: `Socks 5 proxy host.
Supports the format user:pass@host:port, user@host:port, host:port.
Example:
myUser:myPass@localhost:9005
`,
Advanced: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
@@ -216,6 +231,7 @@ type Options struct {
ShutTimeout fs.Duration `config:"shut_timeout"`
AskPassword bool `config:"ask_password"`
Enc encoder.MultiEncoder `config:"encoding"`
SocksProxy string `config:"socks_proxy"`
}
// Fs represents a remote FTP server
@@ -233,7 +249,6 @@ type Fs struct {
pool []*ftp.ServerConn
drain *time.Timer // used to drain the pool when we stop using the connections
tokens *pacer.TokenDispenser
tlsConf *tls.Config
pacer *fs.Pacer // pacer for FTP connections
fGetTime bool // true if the ftp library accepts GetTime
fSetTime bool // true if the ftp library accepts SetTime
@@ -346,10 +361,36 @@ func shouldRetry(ctx context.Context, err error) (bool, error) {
return fserrors.ShouldRetry(err), err
}
// Get a TLS config with a unique session cache.
//
// We can't share session caches between connections.
//
// See: https://github.com/rclone/rclone/issues/7234
func (f *Fs) tlsConfig() *tls.Config {
var tlsConfig *tls.Config
if f.opt.TLS || f.opt.ExplicitTLS {
tlsConfig = &tls.Config{
ServerName: f.opt.Host,
InsecureSkipVerify: f.opt.SkipVerifyTLSCert,
}
if f.opt.TLSCacheSize > 0 {
tlsConfig.ClientSessionCache = tls.NewLRUClientSessionCache(f.opt.TLSCacheSize)
}
if f.opt.DisableTLS13 {
tlsConfig.MaxVersion = tls.VersionTLS12
}
}
return tlsConfig
}
// Open a new connection to the FTP server.
func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
fs.Debugf(f, "Connecting to FTP server")
// tls.Config for this connection only. Will be used for data
// and control connections.
tlsConfig := f.tlsConfig()
// Make ftp library dial with fshttp dialer optionally using TLS
initialConnection := true
dial := func(network, address string) (conn net.Conn, err error) {
@@ -357,12 +398,17 @@ func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
defer func() {
fs.Debugf(f, "> dial: conn=%T, err=%v", conn, err)
}()
conn, err = fshttp.NewDialer(ctx).Dial(network, address)
baseDialer := fshttp.NewDialer(ctx)
if f.opt.SocksProxy != "" {
conn, err = proxy.SOCKS5Dial(network, address, f.opt.SocksProxy, baseDialer)
} else {
conn, err = baseDialer.Dial(network, address)
}
if err != nil {
return nil, err
}
// Connect using cleartext only for non TLS
if f.tlsConf == nil {
if tlsConfig == nil {
return conn, nil
}
// Initial connection only needs to be cleartext for explicit TLS
@@ -371,7 +417,7 @@ func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
return conn, nil
}
// Upgrade connection to TLS
tlsConn := tls.Client(conn, f.tlsConf)
tlsConn := tls.Client(conn, tlsConfig)
// Do the initial handshake - tls.Client doesn't do it for us
// If we do this then connections to proftpd/pureftpd lock up
// See: https://github.com/rclone/rclone/issues/6426
@@ -393,9 +439,9 @@ func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
if f.opt.TLS {
// Our dialer takes care of TLS but ftp library also needs tlsConf
// as a trigger for sending PSBZ and PROT options to server.
ftpConfig = append(ftpConfig, ftp.DialWithTLS(f.tlsConf))
ftpConfig = append(ftpConfig, ftp.DialWithTLS(tlsConfig))
} else if f.opt.ExplicitTLS {
ftpConfig = append(ftpConfig, ftp.DialWithExplicitTLS(f.tlsConf))
ftpConfig = append(ftpConfig, ftp.DialWithExplicitTLS(tlsConfig))
}
if f.opt.DisableEPSV {
ftpConfig = append(ftpConfig, ftp.DialWithDisabledEPSV(true))
@@ -550,19 +596,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
if opt.TLS && opt.ExplicitTLS {
return nil, errors.New("implicit TLS and explicit TLS are mutually incompatible, please revise your config")
}
var tlsConfig *tls.Config
if opt.TLS || opt.ExplicitTLS {
tlsConfig = &tls.Config{
ServerName: opt.Host,
InsecureSkipVerify: opt.SkipVerifyTLSCert,
}
if opt.TLSCacheSize > 0 {
tlsConfig.ClientSessionCache = tls.NewLRUClientSessionCache(opt.TLSCacheSize)
}
if opt.DisableTLS13 {
tlsConfig.MaxVersion = tls.VersionTLS12
}
}
u := protocol + path.Join(dialAddr+"/", root)
ci := fs.GetConfig(ctx)
f := &Fs{
@@ -575,7 +608,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
pass: pass,
dialAddr: dialAddr,
tokens: pacer.NewTokenDispenser(opt.Concurrency),
tlsConf: tlsConfig,
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
}
f.features = (&fs.Features{

View File

@@ -91,18 +91,21 @@ func init() {
})
},
Options: append(oauthutil.SharedOptions, []fs.Option{{
Name: "project_number",
Help: "Project number.\n\nOptional - needed only for list/create/delete buckets - see your developer console.",
Name: "project_number",
Help: "Project number.\n\nOptional - needed only for list/create/delete buckets - see your developer console.",
Sensitive: true,
}, {
Name: "user_project",
Help: "User project.\n\nOptional - needed only for requester pays.",
Name: "user_project",
Help: "User project.\n\nOptional - needed only for requester pays.",
Sensitive: true,
}, {
Name: "service_account_file",
Help: "Service Account Credentials JSON file path.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login." + env.ShellExpandHelp,
}, {
Name: "service_account_credentials",
Help: "Service Account Credentials JSON blob.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
Hide: fs.OptionHideBoth,
Name: "service_account_credentials",
Help: "Service Account Credentials JSON blob.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
Hide: fs.OptionHideBoth,
Sensitive: true,
}, {
Name: "anonymous",
Help: "Access public buckets and objects without credentials.\n\nSet to 'true' if you just want to download files and don't configure credentials.",
@@ -1307,10 +1310,11 @@ func (o *Object) Storable() bool {
// Open an object for read
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
url := o.url
if o.fs.opt.UserProject != "" {
o.url = o.url + "&userProject=" + o.fs.opt.UserProject
url += "&userProject=" + o.fs.opt.UserProject
}
req, err := http.NewRequestWithContext(ctx, "GET", o.url, nil)
req, err := http.NewRequestWithContext(ctx, "GET", url, nil)
if err != nil {
return nil, err
}

View File

@@ -29,6 +29,7 @@ import (
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/log"
"github.com/rclone/rclone/lib/batcher"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/oauthutil"
"github.com/rclone/rclone/lib/pacer"
@@ -71,6 +72,14 @@ var (
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.RedirectURL,
}
// Configure the batcher
defaultBatcherOptions = batcher.Options{
MaxBatchSize: 50,
DefaultTimeoutSync: 1000 * time.Millisecond,
DefaultTimeoutAsync: 10 * time.Second,
DefaultBatchSizeAsync: 50,
}
)
// Register with Fs
@@ -111,7 +120,7 @@ will count towards storage in your Google Account.`)
}
return nil, fmt.Errorf("unknown state %q", config.State)
},
Options: append(oauthutil.SharedOptions, []fs.Option{{
Options: append(append(oauthutil.SharedOptions, []fs.Option{{
Name: "read_only",
Default: false,
Help: `Set to make the Google Photos backend read only.
@@ -158,7 +167,7 @@ listings and won't be transferred.`,
Default: (encoder.Base |
encoder.EncodeCrLf |
encoder.EncodeInvalidUtf8),
}}...),
}}...), defaultBatcherOptions.FsOptions("")...),
})
}
@@ -169,6 +178,9 @@ type Options struct {
StartYear int `config:"start_year"`
IncludeArchived bool `config:"include_archived"`
Enc encoder.MultiEncoder `config:"encoding"`
BatchMode string `config:"batch_mode"`
BatchSize int `config:"batch_size"`
BatchTimeout fs.Duration `config:"batch_timeout"`
}
// Fs represents a remote storage server
@@ -187,6 +199,7 @@ type Fs struct {
uploadedMu sync.Mutex // to protect the below
uploaded dirtree.DirTree // record of uploaded items
createMu sync.Mutex // held when creating albums to prevent dupes
batcher *batcher.Batcher[uploadedItem, *api.MediaItem]
}
// Object describes a storage object
@@ -312,6 +325,14 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
albums: map[bool]*albums{},
uploaded: dirtree.New(),
}
batcherOptions := defaultBatcherOptions
batcherOptions.Mode = f.opt.BatchMode
batcherOptions.Size = f.opt.BatchSize
batcherOptions.Timeout = time.Duration(f.opt.BatchTimeout)
f.batcher, err = batcher.New(ctx, f, f.commitBatch, batcherOptions)
if err != nil {
return nil, err
}
f.features = (&fs.Features{
ReadMimeType: true,
}).Fill(ctx, f)
@@ -781,6 +802,13 @@ func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.None)
}
// Shutdown the backend, closing any background tasks and any
// cached connections.
func (f *Fs) Shutdown(ctx context.Context) error {
f.batcher.Shutdown()
return nil
}
// ------------------------------------------------------------
// Fs returns the parent Fs
@@ -961,6 +989,82 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
return resp.Body, err
}
// input to the batcher
type uploadedItem struct {
AlbumID string // desired album
UploadToken string // upload ID
}
// Commit a batch of items to albumID returning the errors in errors
func (f *Fs) commitBatchAlbumID(ctx context.Context, items []uploadedItem, results []*api.MediaItem, errors []error, albumID string) {
// Create the media item from an UploadToken, optionally adding to an album
opts := rest.Opts{
Method: "POST",
Path: "/mediaItems:batchCreate",
}
var request = api.BatchCreateRequest{
AlbumID: albumID,
}
itemsInBatch := 0
for i := range items {
if items[i].AlbumID == albumID {
request.NewMediaItems = append(request.NewMediaItems, api.NewMediaItem{
SimpleMediaItem: api.SimpleMediaItem{
UploadToken: items[i].UploadToken,
},
})
itemsInBatch++
}
}
var result api.BatchCreateResponse
var resp *http.Response
var err error
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, request, &result)
return shouldRetry(ctx, resp, err)
})
if err != nil {
err = fmt.Errorf("failed to create media item: %w", err)
}
if err == nil && len(result.NewMediaItemResults) != itemsInBatch {
err = fmt.Errorf("bad response to BatchCreate expecting %d items but got %d", itemsInBatch, len(result.NewMediaItemResults))
}
j := 0
for i := range items {
if items[i].AlbumID == albumID {
if err == nil {
media := &result.NewMediaItemResults[j]
if media.Status.Code != 0 {
errors[i] = fmt.Errorf("upload failed: %s (%d)", media.Status.Message, media.Status.Code)
} else {
results[i] = &media.MediaItem
}
} else {
errors[i] = err
}
j++
}
}
}
// Called by the batcher to commit a batch
func (f *Fs) commitBatch(ctx context.Context, items []uploadedItem, results []*api.MediaItem, errors []error) (err error) {
// Discover all the AlbumIDs as we have to upload these separately
//
// Should maybe have one batcher per AlbumID
albumIDs := map[string]struct{}{}
for i := range items {
albumIDs[items[i].AlbumID] = struct{}{}
}
// batch the albums
for albumID := range albumIDs {
// errors returned in errors
f.commitBatchAlbumID(ctx, items, results, errors, albumID)
}
return nil
}
// Update the object with the contents of the io.Reader, modTime and size
//
// The new object may have been created if an error is returned
@@ -1021,37 +1125,26 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return errors.New("empty upload token")
}
// Create the media item from an UploadToken, optionally adding to an album
opts = rest.Opts{
Method: "POST",
Path: "/mediaItems:batchCreate",
uploaded := uploadedItem{
AlbumID: albumID,
UploadToken: uploadToken,
}
var request = api.BatchCreateRequest{
AlbumID: albumID,
NewMediaItems: []api.NewMediaItem{
{
SimpleMediaItem: api.SimpleMediaItem{
UploadToken: uploadToken,
},
},
},
// Save the upload into an album
var info *api.MediaItem
if o.fs.batcher.Batching() {
info, err = o.fs.batcher.Commit(ctx, o.remote, uploaded)
} else {
errors := make([]error, 1)
results := make([]*api.MediaItem, 1)
err = o.fs.commitBatch(ctx, []uploadedItem{uploaded}, results, errors)
if err != nil {
err = errors[0]
info = results[0]
}
}
var result api.BatchCreateResponse
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.CallJSON(ctx, &opts, request, &result)
return shouldRetry(ctx, resp, err)
})
if err != nil {
return fmt.Errorf("failed to create media item: %w", err)
}
if len(result.NewMediaItemResults) != 1 {
return errors.New("bad response to BatchCreate wrong number of items")
}
mediaItemResult := result.NewMediaItemResults[0]
if mediaItemResult.Status.Code != 0 {
return fmt.Errorf("upload failed: %s (%d)", mediaItemResult.Status.Message, mediaItemResult.Status.Code)
}
o.setMetaData(&mediaItemResult.MediaItem)
o.setMetaData(info)
// Add upload to internal storage
if pattern.isUpload {

View File

@@ -23,6 +23,7 @@ func TestIntegration(t *testing.T) {
NilObject: (*hasher.Object)(nil),
UnimplementableFsMethods: []string{
"OpenWriterAt",
"OpenChunkWriter",
},
UnimplementableObjectMethods: []string{},
}

View File

@@ -21,6 +21,7 @@ import (
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/pacer"
)
// Fs represents a HDFS server
@@ -31,8 +32,15 @@ type Fs struct {
opt Options // options for this backend
ci *fs.ConfigInfo // global config
client *hdfs.Client
pacer *fs.Pacer // pacer for API calls
}
const (
minSleep = 20 * time.Millisecond
maxSleep = 10 * time.Second
decayConstant = 2 // bigger for slower decay, exponential
)
// copy-paste from https://github.com/colinmarc/hdfs/blob/master/cmd/hdfs/kerberos.go
func getKerberosClient() (*krb.Client, error) {
configPath := os.Getenv("KRB5_CONFIG")
@@ -85,7 +93,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
}
options := hdfs.ClientOptions{
Addresses: []string{opt.Namenode},
Addresses: opt.Namenode,
UseDatanodeHostname: false,
}
@@ -114,6 +122,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
opt: *opt,
ci: fs.GetConfig(ctx),
client: client,
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
}
f.features = (&fs.Features{

View File

@@ -19,9 +19,11 @@ func init() {
Description: "Hadoop distributed file system",
NewFs: NewFs,
Options: []fs.Option{{
Name: "namenode",
Help: "Hadoop name node and port.\n\nE.g. \"namenode:8020\" to connect to host namenode at port 8020.",
Required: true,
Name: "namenode",
Help: "Hadoop name nodes and ports.\n\nE.g. \"namenode-1:8020,namenode-2:8020,...\" to connect to host namenodes at port 8020.",
Required: true,
Sensitive: true,
Default: fs.CommaSepList{},
}, {
Name: "username",
Help: "Hadoop user name.",
@@ -29,6 +31,7 @@ func init() {
Value: "root",
Help: "Connect to hdfs as root.",
}},
Sensitive: true,
}, {
Name: "service_principal_name",
Help: `Kerberos service principal name for the namenode.
@@ -36,7 +39,8 @@ func init() {
Enables KERBEROS authentication. Specifies the Service Principal Name
(SERVICE/FQDN) for the namenode. E.g. \"hdfs/namenode.hadoop.docker\"
for namenode running as service 'hdfs' with FQDN 'namenode.hadoop.docker'.`,
Advanced: true,
Advanced: true,
Sensitive: true,
}, {
Name: "data_transfer_protection",
Help: `Kerberos data transfer protection: authentication|integrity|privacy.
@@ -62,7 +66,7 @@ and 'privacy'. Used only with KERBEROS enabled.`,
// Options for this backend
type Options struct {
Namenode string `config:"namenode"`
Namenode fs.CommaSepList `config:"namenode"`
Username string `config:"username"`
ServicePrincipalName string `config:"service_principal_name"`
DataTransferProtection string `config:"data_transfer_protection"`

View File

@@ -5,10 +5,12 @@ package hdfs
import (
"context"
"errors"
"io"
"path"
"time"
"github.com/colinmarc/hdfs/v2"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/readers"
@@ -106,7 +108,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
// Update object
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
realpath := o.fs.realpath(src.Remote())
realpath := o.fs.realpath(o.remote)
dirname := path.Dir(realpath)
fs.Debugf(o.fs, "update [%s]", realpath)
@@ -141,7 +143,23 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return err
}
err = out.Close()
// If the datanodes have acknowledged all writes but not yet
// to the namenode, FileWriter.Close can return ErrReplicating
// (wrapped in an os.PathError). This indicates that all data
// has been written, but the lease is still open for the file.
//
// It is safe in this case to either ignore the error (and let
// the lease expire on its own) or to call Close multiple
// times until it completes without an error. The Java client,
// for context, always chooses to retry, with exponential
// backoff.
err = o.fs.pacer.Call(func() (bool, error) {
err := out.Close()
if err == nil {
return false, nil
}
return errors.Is(err, hdfs.ErrReplicating), err
})
if err != nil {
cleanup()
return err

View File

@@ -36,6 +36,7 @@ func init() {
Name: "http",
Description: "HTTP",
NewFs: NewFs,
CommandHelp: commandHelp,
Options: []fs.Option{{
Name: "url",
Help: "URL of HTTP host to connect to.\n\nE.g. \"https://example.com\", or \"https://user:pass@example.com\" to use a username and password.",
@@ -210,6 +211,42 @@ func getFsEndpoint(ctx context.Context, client *http.Client, url string, opt *Op
return createFileResult()
}
// Make the http connection with opt
func (f *Fs) httpConnection(ctx context.Context, opt *Options) (isFile bool, err error) {
if len(opt.Headers)%2 != 0 {
return false, errors.New("odd number of headers supplied")
}
if !strings.HasSuffix(opt.Endpoint, "/") {
opt.Endpoint += "/"
}
// Parse the endpoint and stick the root onto it
base, err := url.Parse(opt.Endpoint)
if err != nil {
return false, err
}
u, err := rest.URLJoin(base, rest.URLPathEscape(f.root))
if err != nil {
return false, err
}
client := fshttp.NewClient(ctx)
endpoint, isFile := getFsEndpoint(ctx, client, u.String(), opt)
fs.Debugf(nil, "Root: %s", endpoint)
u, err = url.Parse(endpoint)
if err != nil {
return false, err
}
// Update f with the new parameters
f.httpClient = client
f.endpoint = u
f.endpointURL = u.String()
return isFile, nil
}
// NewFs creates a new Fs object from the name and root. It connects to
// the host specified in the config file.
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
@@ -220,47 +257,23 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
return nil, err
}
if len(opt.Headers)%2 != 0 {
return nil, errors.New("odd number of headers supplied")
}
if !strings.HasSuffix(opt.Endpoint, "/") {
opt.Endpoint += "/"
}
// Parse the endpoint and stick the root onto it
base, err := url.Parse(opt.Endpoint)
if err != nil {
return nil, err
}
u, err := rest.URLJoin(base, rest.URLPathEscape(root))
if err != nil {
return nil, err
}
client := fshttp.NewClient(ctx)
endpoint, isFile := getFsEndpoint(ctx, client, u.String(), opt)
fs.Debugf(nil, "Root: %s", endpoint)
u, err = url.Parse(endpoint)
if err != nil {
return nil, err
}
ci := fs.GetConfig(ctx)
f := &Fs{
name: name,
root: root,
opt: *opt,
ci: ci,
httpClient: client,
endpoint: u,
endpointURL: u.String(),
name: name,
root: root,
opt: *opt,
ci: ci,
}
f.features = (&fs.Features{
CanHaveEmptyDirectories: true,
}).Fill(ctx, f)
// Make the http connection
isFile, err := f.httpConnection(ctx, opt)
if err != nil {
return nil, err
}
if isFile {
// return an error with an fs which points to the parent
return f, fs.ErrorIsFile
@@ -685,10 +698,66 @@ func (o *Object) MimeType(ctx context.Context) string {
return o.contentType
}
var commandHelp = []fs.CommandHelp{{
Name: "set",
Short: "Set command for updating the config parameters.",
Long: `This set command can be used to update the config parameters
for a running http backend.
Usage Examples:
rclone backend set remote: [-o opt_name=opt_value] [-o opt_name2=opt_value2]
rclone rc backend/command command=set fs=remote: [-o opt_name=opt_value] [-o opt_name2=opt_value2]
rclone rc backend/command command=set fs=remote: -o url=https://example.com
The option keys are named as they are in the config file.
This rebuilds the connection to the http backend when it is called with
the new parameters. Only new parameters need be passed as the values
will default to those currently in use.
It doesn't return anything.
`,
}}
// Command the backend to run a named command
//
// The command run is name
// args may be used to read arguments from
// opts may be used to read optional arguments from
//
// The result should be capable of being JSON encoded
// If it is a string or a []string it will be shown to the user
// otherwise it will be JSON encoded and shown to the user like that
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
switch name {
case "set":
newOpt := f.opt
err := configstruct.Set(configmap.Simple(opt), &newOpt)
if err != nil {
return nil, fmt.Errorf("reading config: %w", err)
}
_, err = f.httpConnection(ctx, &newOpt)
if err != nil {
return nil, fmt.Errorf("updating session: %w", err)
}
f.opt = newOpt
keys := []string{}
for k := range opt {
keys = append(keys, k)
}
fs.Logf(f, "Updated config values: %s", strings.Join(keys, ", "))
return nil, nil
default:
return nil, fs.ErrorCommandNotFound
}
}
// Check the interfaces are satisfied
var (
_ fs.Fs = &Fs{}
_ fs.PutStreamer = &Fs{}
_ fs.Object = &Object{}
_ fs.MimeTyper = &Object{}
_ fs.Commander = &Fs{}
)

View File

@@ -0,0 +1,66 @@
// Package client provides a client for interacting with the ImageKit API.
package client
import (
"context"
"fmt"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/lib/rest"
)
// ImageKit main struct
type ImageKit struct {
Prefix string
UploadPrefix string
Timeout int64
UploadTimeout int64
PrivateKey string
PublicKey string
URLEndpoint string
HTTPClient *rest.Client
}
// NewParams is a struct to define parameters to imagekit
type NewParams struct {
PrivateKey string
PublicKey string
URLEndpoint string
}
// New returns ImageKit object from environment variables
func New(ctx context.Context, params NewParams) (*ImageKit, error) {
privateKey := params.PrivateKey
publicKey := params.PublicKey
endpointURL := params.URLEndpoint
switch {
case privateKey == "":
return nil, fmt.Errorf("ImageKit.io URL endpoint is required")
case publicKey == "":
return nil, fmt.Errorf("ImageKit.io public key is required")
case endpointURL == "":
return nil, fmt.Errorf("ImageKit.io private key is required")
}
cliCtx, cliCfg := fs.AddConfig(ctx)
cliCfg.UserAgent = "rclone/imagekit"
client := rest.NewClient(fshttp.NewClient(cliCtx))
client.SetUserPass(privateKey, "")
client.SetHeader("Accept", "application/json")
return &ImageKit{
Prefix: "https://api.imagekit.io/v2",
UploadPrefix: "https://upload.imagekit.io/api/v2",
Timeout: 60,
UploadTimeout: 3600,
PrivateKey: params.PrivateKey,
PublicKey: params.PublicKey,
URLEndpoint: params.URLEndpoint,
HTTPClient: client,
}, nil
}

View File

@@ -0,0 +1,252 @@
package client
import (
"context"
"errors"
"fmt"
"net/http"
"net/url"
"time"
"github.com/rclone/rclone/lib/rest"
"gopkg.in/validator.v2"
)
// FilesOrFolderParam struct is a parameter type to ListFiles() function to search / list media library files.
type FilesOrFolderParam struct {
Path string `json:"path,omitempty"`
Limit int `json:"limit,omitempty"`
Skip int `json:"skip,omitempty"`
SearchQuery string `json:"searchQuery,omitempty"`
}
// AITag represents an AI tag for a media library file.
type AITag struct {
Name string `json:"name"`
Confidence float32 `json:"confidence"`
Source string `json:"source"`
}
// File represents media library File details.
type File struct {
FileID string `json:"fileId"`
Name string `json:"name"`
FilePath string `json:"filePath"`
Type string `json:"type"`
VersionInfo map[string]string `json:"versionInfo"`
IsPrivateFile *bool `json:"isPrivateFile"`
CustomCoordinates *string `json:"customCoordinates"`
URL string `json:"url"`
Thumbnail string `json:"thumbnail"`
FileType string `json:"fileType"`
Mime string `json:"mime"`
Height int `json:"height"`
Width int `json:"Width"`
Size uint64 `json:"size"`
HasAlpha bool `json:"hasAlpha"`
CustomMetadata map[string]any `json:"customMetadata,omitempty"`
EmbeddedMetadata map[string]any `json:"embeddedMetadata"`
CreatedAt time.Time `json:"createdAt"`
UpdatedAt time.Time `json:"updatedAt"`
Tags []string `json:"tags"`
AITags []AITag `json:"AITags"`
}
// Folder represents media library Folder details.
type Folder struct {
*File
FolderPath string `json:"folderPath"`
}
// CreateFolderParam represents parameter to create folder api
type CreateFolderParam struct {
FolderName string `validate:"nonzero" json:"folderName"`
ParentFolderPath string `validate:"nonzero" json:"parentFolderPath"`
}
// DeleteFolderParam represents parameter to delete folder api
type DeleteFolderParam struct {
FolderPath string `validate:"nonzero" json:"folderPath"`
}
// MoveFolderParam represents parameter to move folder api
type MoveFolderParam struct {
SourceFolderPath string `validate:"nonzero" json:"sourceFolderPath"`
DestinationPath string `validate:"nonzero" json:"destinationPath"`
}
// JobIDResponse respresents response struct with JobID for folder operations
type JobIDResponse struct {
JobID string `json:"jobId"`
}
// JobStatus represents response Data to job status api
type JobStatus struct {
JobID string `json:"jobId"`
Type string `json:"type"`
Status string `json:"status"`
}
// File represents media library File details.
func (ik *ImageKit) File(ctx context.Context, fileID string) (*http.Response, *File, error) {
data := &File{}
response, err := ik.HTTPClient.CallJSON(ctx, &rest.Opts{
Method: "GET",
Path: fmt.Sprintf("/files/%s/details", fileID),
RootURL: ik.Prefix,
IgnoreStatus: true,
}, nil, data)
return response, data, err
}
// Files retrieves media library files. Filter options can be supplied as FilesOrFolderParam.
func (ik *ImageKit) Files(ctx context.Context, params FilesOrFolderParam, includeVersion bool) (*http.Response, *[]File, error) {
var SearchQuery = `type = "file"`
if includeVersion {
SearchQuery = `type IN ["file", "file-version"]`
}
if params.SearchQuery != "" {
SearchQuery = params.SearchQuery
}
parameters := url.Values{}
parameters.Set("skip", fmt.Sprintf("%d", params.Skip))
parameters.Set("limit", fmt.Sprintf("%d", params.Limit))
parameters.Set("path", params.Path)
parameters.Set("searchQuery", SearchQuery)
data := &[]File{}
response, err := ik.HTTPClient.CallJSON(ctx, &rest.Opts{
Method: "GET",
Path: "/files",
RootURL: ik.Prefix,
Parameters: parameters,
}, nil, data)
return response, data, err
}
// DeleteFile removes file by FileID from media library
func (ik *ImageKit) DeleteFile(ctx context.Context, fileID string) (*http.Response, error) {
var err error
if fileID == "" {
return nil, errors.New("fileID can not be empty")
}
response, err := ik.HTTPClient.CallJSON(ctx, &rest.Opts{
Method: "DELETE",
Path: fmt.Sprintf("/files/%s", fileID),
RootURL: ik.Prefix,
NoResponse: true,
}, nil, nil)
return response, err
}
// Folders retrieves media library files. Filter options can be supplied as FilesOrFolderParam.
func (ik *ImageKit) Folders(ctx context.Context, params FilesOrFolderParam) (*http.Response, *[]Folder, error) {
var SearchQuery = `type = "folder"`
if params.SearchQuery != "" {
SearchQuery = params.SearchQuery
}
parameters := url.Values{}
parameters.Set("skip", fmt.Sprintf("%d", params.Skip))
parameters.Set("limit", fmt.Sprintf("%d", params.Limit))
parameters.Set("path", params.Path)
parameters.Set("searchQuery", SearchQuery)
data := &[]Folder{}
resp, err := ik.HTTPClient.CallJSON(ctx, &rest.Opts{
Method: "GET",
Path: "/files",
RootURL: ik.Prefix,
Parameters: parameters,
}, nil, data)
if err != nil {
return resp, data, err
}
return resp, data, err
}
// CreateFolder creates a new folder in media library
func (ik *ImageKit) CreateFolder(ctx context.Context, param CreateFolderParam) (*http.Response, error) {
var err error
if err = validator.Validate(&param); err != nil {
return nil, err
}
response, err := ik.HTTPClient.CallJSON(ctx, &rest.Opts{
Method: "POST",
Path: "/folder",
RootURL: ik.Prefix,
NoResponse: true,
}, param, nil)
return response, err
}
// DeleteFolder removes the folder from media library
func (ik *ImageKit) DeleteFolder(ctx context.Context, param DeleteFolderParam) (*http.Response, error) {
var err error
if err = validator.Validate(&param); err != nil {
return nil, err
}
response, err := ik.HTTPClient.CallJSON(ctx, &rest.Opts{
Method: "DELETE",
Path: "/folder",
RootURL: ik.Prefix,
NoResponse: true,
}, param, nil)
return response, err
}
// MoveFolder moves given folder to new path in media library
func (ik *ImageKit) MoveFolder(ctx context.Context, param MoveFolderParam) (*http.Response, *JobIDResponse, error) {
var err error
var response = &JobIDResponse{}
if err = validator.Validate(&param); err != nil {
return nil, nil, err
}
resp, err := ik.HTTPClient.CallJSON(ctx, &rest.Opts{
Method: "PUT",
Path: "bulkJobs/moveFolder",
RootURL: ik.Prefix,
}, param, response)
return resp, response, err
}
// BulkJobStatus retrieves the status of a bulk job by job ID.
func (ik *ImageKit) BulkJobStatus(ctx context.Context, jobID string) (*http.Response, *JobStatus, error) {
var err error
var response = &JobStatus{}
if jobID == "" {
return nil, nil, errors.New("jobId can not be blank")
}
resp, err := ik.HTTPClient.CallJSON(ctx, &rest.Opts{
Method: "GET",
Path: "bulkJobs/" + jobID,
RootURL: ik.Prefix,
}, nil, response)
return resp, response, err
}

View File

@@ -0,0 +1,96 @@
package client
import (
"context"
"errors"
"fmt"
"io"
"net/http"
"net/url"
"github.com/rclone/rclone/lib/rest"
)
// UploadParam defines upload parameters
type UploadParam struct {
FileName string `json:"fileName"`
Folder string `json:"folder,omitempty"` // default value: /
Tags string `json:"tags,omitempty"`
IsPrivateFile *bool `json:"isPrivateFile,omitempty"` // default: false
}
// UploadResult defines the response structure for the upload API
type UploadResult struct {
FileID string `json:"fileId"`
Name string `json:"name"`
URL string `json:"url"`
ThumbnailURL string `json:"thumbnailUrl"`
Height int `json:"height"`
Width int `json:"Width"`
Size uint64 `json:"size"`
FilePath string `json:"filePath"`
AITags []map[string]any `json:"AITags"`
VersionInfo map[string]string `json:"versionInfo"`
}
// Upload uploads an asset to a imagekit account.
//
// The asset can be:
// - the actual data (io.Reader)
// - the Data URI (Base64 encoded), max ~60 MB (62,910,000 chars)
// - the remote FTP, HTTP or HTTPS URL address of an existing file
//
// https://docs.imagekit.io/api-reference/upload-file-api/server-side-file-upload
func (ik *ImageKit) Upload(ctx context.Context, file io.Reader, param UploadParam) (*http.Response, *UploadResult, error) {
var err error
if param.FileName == "" {
return nil, nil, errors.New("Upload: Filename is required")
}
// Initialize URL values
formParams := url.Values{}
formParams.Add("useUniqueFileName", fmt.Sprint(false))
// Add individual fields to URL values
if param.FileName != "" {
formParams.Add("fileName", param.FileName)
}
if param.Tags != "" {
formParams.Add("tags", param.Tags)
}
if param.Folder != "" {
formParams.Add("folder", param.Folder)
}
if param.IsPrivateFile != nil {
formParams.Add("isPrivateFile", fmt.Sprintf("%v", *param.IsPrivateFile))
}
response := &UploadResult{}
formReader, contentType, _, err := rest.MultipartUpload(ctx, file, formParams, "file", param.FileName)
if err != nil {
return nil, nil, fmt.Errorf("failed to make multipart upload: %w", err)
}
opts := rest.Opts{
Method: "POST",
Path: "/files/upload",
RootURL: ik.UploadPrefix,
Body: formReader,
ContentType: contentType,
}
resp, err := ik.HTTPClient.CallJSON(ctx, &opts, nil, response)
if err != nil {
return resp, response, err
}
return resp, response, err
}

View File

@@ -0,0 +1,72 @@
package client
import (
"crypto/hmac"
"crypto/sha1"
"encoding/hex"
"fmt"
neturl "net/url"
"strconv"
"strings"
"time"
)
// URLParam represents parameters for generating url
type URLParam struct {
Path string
Src string
URLEndpoint string
Signed bool
ExpireSeconds int64
QueryParameters map[string]string
}
// URL generates url from URLParam
func (ik *ImageKit) URL(params URLParam) (string, error) {
var resultURL string
var url *neturl.URL
var err error
var endpoint = params.URLEndpoint
if endpoint == "" {
endpoint = ik.URLEndpoint
}
endpoint = strings.TrimRight(endpoint, "/") + "/"
if params.QueryParameters == nil {
params.QueryParameters = make(map[string]string)
}
if url, err = neturl.Parse(params.Src); err != nil {
return "", err
}
query := url.Query()
for k, v := range params.QueryParameters {
query.Set(k, v)
}
url.RawQuery = query.Encode()
resultURL = url.String()
if params.Signed {
now := time.Now().Unix()
var expires = strconv.FormatInt(now+params.ExpireSeconds, 10)
var path = strings.Replace(resultURL, endpoint, "", 1)
path = path + expires
mac := hmac.New(sha1.New, []byte(ik.PrivateKey))
mac.Write([]byte(path))
signature := hex.EncodeToString(mac.Sum(nil))
if strings.Contains(resultURL, "?") {
resultURL = resultURL + "&" + fmt.Sprintf("ik-t=%s&ik-s=%s", expires, signature)
} else {
resultURL = resultURL + "?" + fmt.Sprintf("ik-t=%s&ik-s=%s", expires, signature)
}
}
return resultURL, nil
}

View File

@@ -0,0 +1,828 @@
// Package imagekit provides an interface to the ImageKit.io media library.
package imagekit
import (
"context"
"errors"
"fmt"
"io"
"math"
"net/http"
"path"
"strconv"
"strings"
"time"
"github.com/rclone/rclone/backend/imagekit/client"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/readers"
"github.com/rclone/rclone/lib/version"
)
const (
minSleep = 1 * time.Millisecond
maxSleep = 100 * time.Millisecond
decayConstant = 2
)
var systemMetadataInfo = map[string]fs.MetadataHelp{
"btime": {
Help: "Time of file birth (creation) read from Last-Modified header",
Type: "RFC 3339",
Example: "2006-01-02T15:04:05.999999999Z07:00",
ReadOnly: true,
},
"size": {
Help: "Size of the object in bytes",
Type: "int64",
ReadOnly: true,
},
"file-type": {
Help: "Type of the file",
Type: "string",
Example: "image",
ReadOnly: true,
},
"height": {
Help: "Height of the image or video in pixels",
Type: "int",
ReadOnly: true,
},
"width": {
Help: "Width of the image or video in pixels",
Type: "int",
ReadOnly: true,
},
"has-alpha": {
Help: "Whether the image has alpha channel or not",
Type: "bool",
ReadOnly: true,
},
"tags": {
Help: "Tags associated with the file",
Type: "string",
Example: "tag1,tag2",
ReadOnly: true,
},
"google-tags": {
Help: "AI generated tags by Google Cloud Vision associated with the image",
Type: "string",
Example: "tag1,tag2",
ReadOnly: true,
},
"aws-tags": {
Help: "AI generated tags by AWS Rekognition associated with the image",
Type: "string",
Example: "tag1,tag2",
ReadOnly: true,
},
"is-private-file": {
Help: "Whether the file is private or not",
Type: "bool",
ReadOnly: true,
},
"custom-coordinates": {
Help: "Custom coordinates of the file",
Type: "string",
Example: "0,0,100,100",
ReadOnly: true,
},
}
// Register with Fs
func init() {
fs.Register(&fs.RegInfo{
Name: "imagekit",
Description: "ImageKit.io",
NewFs: NewFs,
MetadataInfo: &fs.MetadataInfo{
System: systemMetadataInfo,
Help: `Any metadata supported by the underlying remote is read and written.`,
},
Options: []fs.Option{
{
Name: "endpoint",
Help: "You can find your ImageKit.io URL endpoint in your [dashboard](https://imagekit.io/dashboard/developer/api-keys)",
Required: true,
},
{
Name: "public_key",
Help: "You can find your ImageKit.io public key in your [dashboard](https://imagekit.io/dashboard/developer/api-keys)",
Required: true,
Sensitive: true,
},
{
Name: "private_key",
Help: "You can find your ImageKit.io private key in your [dashboard](https://imagekit.io/dashboard/developer/api-keys)",
Required: true,
Sensitive: true,
},
{
Name: "only_signed",
Help: "If you have configured `Restrict unsigned image URLs` in your dashboard settings, set this to true.",
Default: false,
Advanced: true,
},
{
Name: "versions",
Help: "Include old versions in directory listings.",
Default: false,
Advanced: true,
},
{
Name: "upload_tags",
Help: "Tags to add to the uploaded files, e.g. \"tag1,tag2\".",
Default: "",
Advanced: true,
},
{
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
Advanced: true,
Default: (encoder.EncodeZero |
encoder.EncodeSlash |
encoder.EncodeQuestion |
encoder.EncodeHashPercent |
encoder.EncodeCtl |
encoder.EncodeDel |
encoder.EncodeDot |
encoder.EncodeDoubleQuote |
encoder.EncodePercent |
encoder.EncodeBackSlash |
encoder.EncodeDollar |
encoder.EncodeLtGt |
encoder.EncodeSquareBracket |
encoder.EncodeInvalidUtf8),
},
},
})
}
// Options defines the configuration for this backend
type Options struct {
Endpoint string `config:"endpoint"`
PublicKey string `config:"public_key"`
PrivateKey string `config:"private_key"`
OnlySigned bool `config:"only_signed"`
Versions bool `config:"versions"`
Enc encoder.MultiEncoder `config:"encoding"`
}
// Fs represents a remote to ImageKit
type Fs struct {
name string // name of remote
root string // root path
opt Options // parsed options
features *fs.Features // optional features
ik *client.ImageKit // ImageKit client
pacer *fs.Pacer // pacer for API calls
}
// Object describes a ImageKit file
type Object struct {
fs *Fs // The Fs this object is part of
remote string // The remote path
filePath string // The path to the file
contentType string // The content type of the object if known - may be ""
timestamp time.Time // The timestamp of the object if known - may be zero
file client.File // The media file if known - may be nil
versionID string // If present this points to an object version
}
// NewFs constructs an Fs from the path, container:path
func NewFs(ctx context.Context, name string, root string, m configmap.Mapper) (fs.Fs, error) {
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
ik, err := client.New(ctx, client.NewParams{
URLEndpoint: opt.Endpoint,
PublicKey: opt.PublicKey,
PrivateKey: opt.PrivateKey,
})
if err != nil {
return nil, err
}
f := &Fs{
name: name,
opt: *opt,
ik: ik,
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
}
f.root = path.Join("/", root)
f.features = (&fs.Features{
CaseInsensitive: false,
DuplicateFiles: false,
ReadMimeType: true,
WriteMimeType: false,
CanHaveEmptyDirectories: true,
BucketBased: false,
ServerSideAcrossConfigs: false,
IsLocal: false,
SlowHash: true,
ReadMetadata: true,
WriteMetadata: false,
UserMetadata: false,
FilterAware: true,
PartialUploads: false,
NoMultiThreading: false,
}).Fill(ctx, f)
if f.root != "/" {
r := f.root
folderPath := f.EncodePath(r[:strings.LastIndex(r, "/")+1])
fileName := f.EncodeFileName(r[strings.LastIndex(r, "/")+1:])
file := f.getFileByName(ctx, folderPath, fileName)
if file != nil {
newRoot := path.Dir(f.root)
f.root = newRoot
return f, fs.ErrorIsFile
}
}
return f, nil
}
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string {
return f.name
}
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
return strings.TrimLeft(f.root, "/")
}
// String returns a description of the FS
func (f *Fs) String() string {
return fmt.Sprintf("FS imagekit: %s", f.root)
}
// Precision of the ModTimes in this Fs
func (f *Fs) Precision() time.Duration {
return fs.ModTimeNotSupported
}
// Hashes returns the supported hash types of the filesystem.
func (f *Fs) Hashes() hash.Set {
return hash.NewHashSet()
}
// Features returns the optional features of this Fs.
func (f *Fs) Features() *fs.Features {
return f.features
}
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
remote := path.Join(f.root, dir)
remote = f.EncodePath(remote)
if remote != "/" {
parentFolderPath, folderName := path.Split(remote)
folderExists, err := f.getFolderByName(ctx, parentFolderPath, folderName)
if err != nil {
return make(fs.DirEntries, 0), err
}
if folderExists == nil {
return make(fs.DirEntries, 0), fs.ErrorDirNotFound
}
}
folders, folderError := f.getFolders(ctx, remote)
if folderError != nil {
return make(fs.DirEntries, 0), folderError
}
files, fileError := f.getFiles(ctx, remote, f.opt.Versions)
if fileError != nil {
return make(fs.DirEntries, 0), fileError
}
res := make([]fs.DirEntry, 0, len(folders)+len(files))
for _, folder := range folders {
folderPath := f.DecodePath(strings.TrimLeft(strings.Replace(folder.FolderPath, f.EncodePath(f.root), "", 1), "/"))
res = append(res, fs.NewDir(folderPath, folder.UpdatedAt))
}
for _, file := range files {
res = append(res, f.newObject(ctx, remote, file))
}
return res, nil
}
func (f *Fs) newObject(ctx context.Context, remote string, file client.File) *Object {
remoteFile := strings.TrimLeft(strings.Replace(file.FilePath, f.EncodePath(f.root), "", 1), "/")
folderPath, fileName := path.Split(remoteFile)
folderPath = f.DecodePath(folderPath)
fileName = f.DecodeFileName(fileName)
remoteFile = path.Join(folderPath, fileName)
if file.Type == "file-version" {
remoteFile = version.Add(remoteFile, file.UpdatedAt)
return &Object{
fs: f,
remote: remoteFile,
filePath: file.FilePath,
contentType: file.Mime,
timestamp: file.UpdatedAt,
file: file,
versionID: file.VersionInfo["id"],
}
}
return &Object{
fs: f,
remote: remoteFile,
filePath: file.FilePath,
contentType: file.Mime,
timestamp: file.UpdatedAt,
file: file,
}
}
// NewObject finds the Object at remote. If it can't be found
// it returns the error ErrorObjectNotFound.
//
// If remote points to a directory then it should return
// ErrorIsDir if possible without doing any extra work,
// otherwise ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
r := path.Join(f.root, remote)
folderPath, fileName := path.Split(r)
folderPath = f.EncodePath(folderPath)
fileName = f.EncodeFileName(fileName)
isFolder, err := f.getFolderByName(ctx, folderPath, fileName)
if err != nil {
return nil, err
}
if isFolder != nil {
return nil, fs.ErrorIsDir
}
file := f.getFileByName(ctx, folderPath, fileName)
if file == nil {
return nil, fs.ErrorObjectNotFound
}
return f.newObject(ctx, r, *file), nil
}
// Put in to the remote path with the modTime given of the given size
//
// When called from outside an Fs by rclone, src.Size() will always be >= 0.
// But for unknown-sized objects (indicated by src.Size() == -1), Put should either
// return an error or upload it properly (rather than e.g. calling panic).
//
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
// nil and the error
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return uploadFile(ctx, f, in, src.Remote(), options...)
}
// Mkdir makes the directory (container, bucket)
//
// Shouldn't return an error if it already exists
func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
remote := path.Join(f.root, dir)
parentFolderPath, folderName := path.Split(remote)
parentFolderPath = f.EncodePath(parentFolderPath)
folderName = f.EncodeFileName(folderName)
err = f.pacer.Call(func() (bool, error) {
var res *http.Response
res, err = f.ik.CreateFolder(ctx, client.CreateFolderParam{
ParentFolderPath: parentFolderPath,
FolderName: folderName,
})
return f.shouldRetry(ctx, res, err)
})
return err
}
// Rmdir removes the directory (container, bucket) if empty
//
// Return an error if it doesn't exist or isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) (err error) {
entries, err := f.List(ctx, dir)
if err != nil {
return err
}
if len(entries) > 0 {
return errors.New("directory is not empty")
}
err = f.pacer.Call(func() (bool, error) {
var res *http.Response
res, err = f.ik.DeleteFolder(ctx, client.DeleteFolderParam{
FolderPath: f.EncodePath(path.Join(f.root, dir)),
})
if res.StatusCode == http.StatusNotFound {
return false, fs.ErrorDirNotFound
}
return f.shouldRetry(ctx, res, err)
})
return err
}
// Purge deletes all the files and the container
//
// Optional interface: Only implement this if you have a way of
// deleting all the files quicker than just running Remove() on the
// result of List()
func (f *Fs) Purge(ctx context.Context, dir string) (err error) {
remote := path.Join(f.root, dir)
err = f.pacer.Call(func() (bool, error) {
var res *http.Response
res, err = f.ik.DeleteFolder(ctx, client.DeleteFolderParam{
FolderPath: f.EncodePath(remote),
})
if res.StatusCode == http.StatusNotFound {
return false, fs.ErrorDirNotFound
}
return f.shouldRetry(ctx, res, err)
})
return err
}
// PublicLink generates a public link to the remote path (usually readable by anyone)
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (string, error) {
duration := time.Duration(math.Abs(float64(expire)))
expireSeconds := duration.Seconds()
fileRemote := path.Join(f.root, remote)
folderPath, fileName := path.Split(fileRemote)
folderPath = f.EncodePath(folderPath)
fileName = f.EncodeFileName(fileName)
file := f.getFileByName(ctx, folderPath, fileName)
if file == nil {
return "", fs.ErrorObjectNotFound
}
// Pacer not needed as this doesn't use the API
url, err := f.ik.URL(client.URLParam{
Src: file.URL,
Signed: *file.IsPrivateFile || f.opt.OnlySigned,
ExpireSeconds: int64(expireSeconds),
QueryParameters: map[string]string{
"updatedAt": file.UpdatedAt.String(),
},
})
if err != nil {
return "", err
}
return url, nil
}
// Fs returns read only access to the Fs that this object is part of
func (o *Object) Fs() fs.Info {
return o.fs
}
// Hash returns the selected checksum of the file
// If no checksum is available it returns ""
func (o *Object) Hash(ctx context.Context, ty hash.Type) (string, error) {
return "", hash.ErrUnsupported
}
// Storable says whether this object can be stored
func (o *Object) Storable() bool {
return true
}
// String returns a description of the Object
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.file.Name
}
// Remote returns the remote path
func (o *Object) Remote() string {
return o.remote
}
// ModTime returns the modification date of the file
// It should return a best guess if one isn't available
func (o *Object) ModTime(context.Context) time.Time {
return o.file.UpdatedAt
}
// Size returns the size of the file
func (o *Object) Size() int64 {
return int64(o.file.Size)
}
// MimeType returns the MIME type of the file
func (o *Object) MimeType(context.Context) string {
return o.contentType
}
// Open opens the file for read. Call Close() on the returned io.ReadCloser
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) {
// Offset and Count for range download
var offset int64
var count int64
fs.FixRangeOption(options, -1)
partialContent := false
for _, option := range options {
switch x := option.(type) {
case *fs.RangeOption:
offset, count = x.Decode(-1)
partialContent = true
case *fs.SeekOption:
offset = x.Offset
partialContent = true
default:
if option.Mandatory() {
fs.Logf(o, "Unsupported mandatory option: %v", option)
}
}
}
// Pacer not needed as this doesn't use the API
url, err := o.fs.ik.URL(client.URLParam{
Src: o.file.URL,
Signed: *o.file.IsPrivateFile || o.fs.opt.OnlySigned,
QueryParameters: map[string]string{
"tr": "orig-true",
"updatedAt": o.file.UpdatedAt.String(),
},
})
if err != nil {
return nil, err
}
client := &http.Client{}
req, _ := http.NewRequest("GET", url, nil)
req.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", offset, offset+count-1))
resp, err := client.Do(req)
if err != nil {
return nil, err
}
end := resp.ContentLength
if partialContent && resp.StatusCode == http.StatusOK {
skip := offset
if offset < 0 {
skip = end + offset + 1
}
_, err = io.CopyN(io.Discard, resp.Body, skip)
if err != nil {
if resp != nil {
_ = resp.Body.Close()
}
return nil, err
}
return readers.NewLimitedReadCloser(resp.Body, end-skip), nil
}
return resp.Body, nil
}
// Update in to the object with the modTime given of the given size
//
// When called from outside an Fs by rclone, src.Size() will always be >= 0.
// But for unknown-sized objects (indicated by src.Size() == -1), Upload should either
// return an error or update the object properly (rather than e.g. calling panic).
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
srcRemote := o.Remote()
remote := path.Join(o.fs.root, srcRemote)
folderPath, fileName := path.Split(remote)
UseUniqueFileName := new(bool)
*UseUniqueFileName = false
var resp *client.UploadResult
err = o.fs.pacer.Call(func() (bool, error) {
var res *http.Response
res, resp, err = o.fs.ik.Upload(ctx, in, client.UploadParam{
FileName: fileName,
Folder: folderPath,
IsPrivateFile: o.file.IsPrivateFile,
})
return o.fs.shouldRetry(ctx, res, err)
})
if err != nil {
return err
}
fileID := resp.FileID
_, file, err := o.fs.ik.File(ctx, fileID)
if err != nil {
return err
}
o.file = *file
return nil
}
// Remove this object
func (o *Object) Remove(ctx context.Context) (err error) {
err = o.fs.pacer.Call(func() (bool, error) {
var res *http.Response
res, err = o.fs.ik.DeleteFile(ctx, o.file.FileID)
return o.fs.shouldRetry(ctx, res, err)
})
return err
}
// SetModTime sets the metadata on the object to set the modification date
func (o *Object) SetModTime(ctx context.Context, t time.Time) error {
return fs.ErrorCantSetModTime
}
func uploadFile(ctx context.Context, f *Fs, in io.Reader, srcRemote string, options ...fs.OpenOption) (fs.Object, error) {
remote := path.Join(f.root, srcRemote)
folderPath, fileName := path.Split(remote)
folderPath = f.EncodePath(folderPath)
fileName = f.EncodeFileName(fileName)
UseUniqueFileName := new(bool)
*UseUniqueFileName = false
err := f.pacer.Call(func() (bool, error) {
var res *http.Response
var err error
res, _, err = f.ik.Upload(ctx, in, client.UploadParam{
FileName: fileName,
Folder: folderPath,
IsPrivateFile: &f.opt.OnlySigned,
})
return f.shouldRetry(ctx, res, err)
})
if err != nil {
return nil, err
}
return f.NewObject(ctx, srcRemote)
}
// Metadata returns the metadata for the object
func (o *Object) Metadata(ctx context.Context) (metadata fs.Metadata, err error) {
metadata.Set("btime", o.file.CreatedAt.Format(time.RFC3339))
metadata.Set("size", strconv.FormatUint(o.file.Size, 10))
metadata.Set("file-type", o.file.FileType)
metadata.Set("height", strconv.Itoa(o.file.Height))
metadata.Set("width", strconv.Itoa(o.file.Width))
metadata.Set("has-alpha", strconv.FormatBool(o.file.HasAlpha))
for k, v := range o.file.EmbeddedMetadata {
metadata.Set(k, fmt.Sprint(v))
}
if o.file.Tags != nil {
metadata.Set("tags", strings.Join(o.file.Tags, ","))
}
if o.file.CustomCoordinates != nil {
metadata.Set("custom-coordinates", *o.file.CustomCoordinates)
}
if o.file.IsPrivateFile != nil {
metadata.Set("is-private-file", strconv.FormatBool(*o.file.IsPrivateFile))
}
if o.file.AITags != nil {
googleTags := []string{}
awsTags := []string{}
for _, tag := range o.file.AITags {
if tag.Source == "google-auto-tagging" {
googleTags = append(googleTags, tag.Name)
} else if tag.Source == "aws-auto-tagging" {
awsTags = append(awsTags, tag.Name)
}
}
if len(googleTags) > 0 {
metadata.Set("google-tags", strings.Join(googleTags, ","))
}
if len(awsTags) > 0 {
metadata.Set("aws-tags", strings.Join(awsTags, ","))
}
}
return metadata, nil
}
// Copy src to this remote using server-side move operations.
//
// This is stored with the remote path given.
//
// It returns the destination Object and a possible error.
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantMove
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object)
if !ok {
return nil, fs.ErrorCantMove
}
file, err := srcObj.Open(ctx)
if err != nil {
return nil, err
}
return uploadFile(ctx, f, file, remote)
}
// Check the interfaces are satisfied.
var (
_ fs.Fs = &Fs{}
_ fs.Purger = &Fs{}
_ fs.PublicLinker = &Fs{}
_ fs.Object = &Object{}
_ fs.Copier = &Fs{}
)

View File

@@ -0,0 +1,18 @@
package imagekit
import (
"testing"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
)
func TestIntegration(t *testing.T) {
debug := true
fstest.Verbose = &debug
fstests.Run(t, &fstests.Opt{
RemoteName: "TestImageKit:",
NilObject: (*Object)(nil),
SkipFsCheckWrap: true,
})
}

193
backend/imagekit/util.go Normal file
View File

@@ -0,0 +1,193 @@
package imagekit
import (
"context"
"fmt"
"net/http"
"strconv"
"time"
"github.com/rclone/rclone/backend/imagekit/client"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/lib/pacer"
)
func (f *Fs) getFiles(ctx context.Context, path string, includeVersions bool) (files []client.File, err error) {
files = make([]client.File, 0)
var hasMore = true
for hasMore {
err = f.pacer.Call(func() (bool, error) {
var data *[]client.File
var res *http.Response
res, data, err = f.ik.Files(ctx, client.FilesOrFolderParam{
Skip: len(files),
Limit: 100,
Path: path,
}, includeVersions)
hasMore = !(len(*data) == 0 || len(*data) < 100)
if len(*data) > 0 {
files = append(files, *data...)
}
return f.shouldRetry(ctx, res, err)
})
}
if err != nil {
return make([]client.File, 0), err
}
return files, nil
}
func (f *Fs) getFolders(ctx context.Context, path string) (folders []client.Folder, err error) {
folders = make([]client.Folder, 0)
var hasMore = true
for hasMore {
err = f.pacer.Call(func() (bool, error) {
var data *[]client.Folder
var res *http.Response
res, data, err = f.ik.Folders(ctx, client.FilesOrFolderParam{
Skip: len(folders),
Limit: 100,
Path: path,
})
hasMore = !(len(*data) == 0 || len(*data) < 100)
if len(*data) > 0 {
folders = append(folders, *data...)
}
return f.shouldRetry(ctx, res, err)
})
}
if err != nil {
return make([]client.Folder, 0), err
}
return folders, nil
}
func (f *Fs) getFileByName(ctx context.Context, path string, name string) (file *client.File) {
err := f.pacer.Call(func() (bool, error) {
res, data, err := f.ik.Files(ctx, client.FilesOrFolderParam{
Limit: 1,
Path: path,
SearchQuery: fmt.Sprintf(`type = "file" AND name = %s`, strconv.Quote(name)),
}, false)
if len(*data) == 0 {
file = nil
} else {
file = &(*data)[0]
}
return f.shouldRetry(ctx, res, err)
})
if err != nil {
return nil
}
return file
}
func (f *Fs) getFolderByName(ctx context.Context, path string, name string) (folder *client.Folder, err error) {
err = f.pacer.Call(func() (bool, error) {
res, data, err := f.ik.Folders(ctx, client.FilesOrFolderParam{
Limit: 1,
Path: path,
SearchQuery: fmt.Sprintf(`type = "folder" AND name = %s`, strconv.Quote(name)),
})
if len(*data) == 0 {
folder = nil
} else {
folder = &(*data)[0]
}
return f.shouldRetry(ctx, res, err)
})
if err != nil {
return nil, err
}
return folder, nil
}
// retryErrorCodes is a slice of error codes that we will retry
var retryErrorCodes = []int{
401, // Unauthorized (e.g. "Token has expired")
408, // Request Timeout
429, // Rate exceeded.
500, // Get occasional 500 Internal Server Error
503, // Service Unavailable
504, // Gateway Time-out
}
func shouldRetryHTTP(resp *http.Response, retryErrorCodes []int) bool {
if resp == nil {
return false
}
for _, e := range retryErrorCodes {
if resp.StatusCode == e {
return true
}
}
return false
}
func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
if fserrors.ContextError(ctx, &err) {
return false, err
}
if resp != nil && (resp.StatusCode == 429 || resp.StatusCode == 503) {
var retryAfter = 1
retryAfterString := resp.Header.Get("X-RateLimit-Reset")
if retryAfterString != "" {
var err error
retryAfter, err = strconv.Atoi(retryAfterString)
if err != nil {
fs.Errorf(f, "Malformed %s header %q: %v", "X-RateLimit-Reset", retryAfterString, err)
}
}
return true, pacer.RetryAfterError(err, time.Duration(retryAfter)*time.Millisecond)
}
return fserrors.ShouldRetry(err) || shouldRetryHTTP(resp, retryErrorCodes), err
}
// EncodePath encapsulates the logic for encoding a path
func (f *Fs) EncodePath(str string) string {
return f.opt.Enc.FromStandardPath(str)
}
// DecodePath encapsulates the logic for decoding a path
func (f *Fs) DecodePath(str string) string {
return f.opt.Enc.ToStandardPath(str)
}
// EncodeFileName encapsulates the logic for encoding a file name
func (f *Fs) EncodeFileName(str string) string {
return f.opt.Enc.FromStandardName(str)
}
// DecodeFileName encapsulates the logic for decoding a file name
func (f *Fs) DecodeFileName(str string) string {
return f.opt.Enc.ToStandardName(str)
}

View File

@@ -133,11 +133,13 @@ Owner is able to add custom keys. Metadata feature grabs all the keys including
},
Options: []fs.Option{{
Name: "access_key_id",
Help: "IAS3 Access Key.\n\nLeave blank for anonymous access.\nYou can find one here: https://archive.org/account/s3.php",
Name: "access_key_id",
Help: "IAS3 Access Key.\n\nLeave blank for anonymous access.\nYou can find one here: https://archive.org/account/s3.php",
Sensitive: true,
}, {
Name: "secret_access_key",
Help: "IAS3 Secret Key (password).\n\nLeave blank for anonymous access.",
Name: "secret_access_key",
Help: "IAS3 Secret Key (password).\n\nLeave blank for anonymous access.",
Sensitive: true,
}, {
// their official client (https://github.com/jjjake/internetarchive) hardcodes following the two
Name: "endpoint",
@@ -800,7 +802,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
headers["x-archive-size-hint"] = fmt.Sprintf("%d", size)
}
var mdata fs.Metadata
mdata, err = fs.GetMetadataOptions(ctx, src, options)
mdata, err = fs.GetMetadataOptions(ctx, o.fs, src, options)
if err == nil && mdata != nil {
for mk, mv := range mdata {
mk = strings.ToLower(mk)

View File

@@ -395,7 +395,7 @@ type JottaFile struct {
State string `xml:"currentRevision>state"`
CreatedAt JottaTime `xml:"currentRevision>created"`
ModifiedAt JottaTime `xml:"currentRevision>modified"`
Updated JottaTime `xml:"currentRevision>updated"`
UpdatedAt JottaTime `xml:"currentRevision>updated"`
Size int64 `xml:"currentRevision>size"`
MimeType string `xml:"currentRevision>mime"`
MD5 string `xml:"currentRevision>md5"`

View File

@@ -67,9 +67,13 @@ const (
legacyEncryptedClientSecret = "Vp8eAv7eVElMnQwN-kgU9cbhgApNDaMqWdlDi5qFydlQoji4JBxrGMF2"
legacyConfigVersion = 0
teliaCloudTokenURL = "https://cloud-auth.telia.se/auth/realms/telia_se/protocol/openid-connect/token"
teliaCloudAuthURL = "https://cloud-auth.telia.se/auth/realms/telia_se/protocol/openid-connect/auth"
teliaCloudClientID = "desktop"
teliaseCloudTokenURL = "https://cloud-auth.telia.se/auth/realms/telia_se/protocol/openid-connect/token"
teliaseCloudAuthURL = "https://cloud-auth.telia.se/auth/realms/telia_se/protocol/openid-connect/auth"
teliaseCloudClientID = "desktop"
telianoCloudTokenURL = "https://sky-auth.telia.no/auth/realms/get/protocol/openid-connect/token"
telianoCloudAuthURL = "https://sky-auth.telia.no/auth/realms/get/protocol/openid-connect/auth"
telianoCloudClientID = "desktop"
tele2CloudTokenURL = "https://mittcloud-auth.tele2.se/auth/realms/comhem/protocol/openid-connect/token"
tele2CloudAuthURL = "https://mittcloud-auth.tele2.se/auth/realms/comhem/protocol/openid-connect/auth"
@@ -88,7 +92,34 @@ func init() {
Description: "Jottacloud",
NewFs: NewFs,
Config: Config,
Options: []fs.Option{{
MetadataInfo: &fs.MetadataInfo{
Help: `Jottacloud has limited support for metadata, currently an extended set of timestamps.`,
System: map[string]fs.MetadataHelp{
"btime": {
Help: "Time of file birth (creation), read from rclone metadata",
Type: "RFC 3339",
Example: "2006-01-02T15:04:05.999999999Z07:00",
},
"mtime": {
Help: "Time of last modification, read from rclone metadata",
Type: "RFC 3339",
Example: "2006-01-02T15:04:05.999999999Z07:00",
},
"utime": {
Help: "Time of last upload, when current revision was created, generated by backend",
Type: "RFC 3339",
Example: "2006-01-02T15:04:05.999999999Z07:00",
ReadOnly: true,
},
"content-type": {
Help: "MIME type, also known as media type",
Type: "string",
Example: "text/plain",
ReadOnly: true,
},
},
},
Options: append(oauthutil.SharedOptions, []fs.Option{{
Name: "md5_memory_limit",
Help: "Files bigger than this will be cached on disk to calculate the MD5 if required.",
Default: fs.SizeSuffix(10 * 1024 * 1024),
@@ -123,7 +154,7 @@ func init() {
Default: (encoder.Display |
encoder.EncodeWin | // :?"*<>|
encoder.EncodeInvalidUtf8),
}},
}}...),
})
}
@@ -138,8 +169,11 @@ func Config(ctx context.Context, name string, m configmap.Mapper, config fs.Conf
Value: "legacy",
Help: "Legacy authentication.\nThis is only required for certain whitelabel versions of Jottacloud and not recommended for normal users.",
}, {
Value: "telia",
Help: "Telia Cloud authentication.\nUse this if you are using Telia Cloud.",
Value: "telia_se",
Help: "Telia Cloud authentication.\nUse this if you are using Telia Cloud (Sweden).",
}, {
Value: "telia_no",
Help: "Telia Sky authentication.\nUse this if you are using Telia Sky (Norway).",
}, {
Value: "tele2",
Help: "Tele2 Cloud authentication.\nUse this if you are using Tele2 Cloud.",
@@ -238,17 +272,32 @@ machines.`)
return nil, fmt.Errorf("error while saving token: %w", err)
}
return fs.ConfigGoto("choose_device")
case "telia": // telia cloud config
case "telia_se": // telia_se cloud config
m.Set("configVersion", fmt.Sprint(configVersion))
m.Set(configClientID, teliaCloudClientID)
m.Set(configTokenURL, teliaCloudTokenURL)
m.Set(configClientID, teliaseCloudClientID)
m.Set(configTokenURL, teliaseCloudTokenURL)
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
OAuth2Config: &oauth2.Config{
Endpoint: oauth2.Endpoint{
AuthURL: teliaCloudAuthURL,
TokenURL: teliaCloudTokenURL,
AuthURL: teliaseCloudAuthURL,
TokenURL: teliaseCloudTokenURL,
},
ClientID: teliaCloudClientID,
ClientID: teliaseCloudClientID,
Scopes: []string{"openid", "jotta-default", "offline_access"},
RedirectURL: oauthutil.RedirectLocalhostURL,
},
})
case "telia_no": // telia_no cloud config
m.Set("configVersion", fmt.Sprint(configVersion))
m.Set(configClientID, telianoCloudClientID)
m.Set(configTokenURL, telianoCloudTokenURL)
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
OAuth2Config: &oauth2.Config{
Endpoint: oauth2.Endpoint{
AuthURL: telianoCloudAuthURL,
TokenURL: telianoCloudTokenURL,
},
ClientID: telianoCloudClientID,
Scopes: []string{"openid", "jotta-default", "offline_access"},
RedirectURL: oauthutil.RedirectLocalhostURL,
},
@@ -475,7 +524,9 @@ type Object struct {
remote string
hasMetaData bool
size int64
createTime time.Time
modTime time.Time
updateTime time.Time
md5 string
mimeType string
}
@@ -950,6 +1001,9 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
CanHaveEmptyDirectories: true,
ReadMimeType: true,
WriteMimeType: false,
ReadMetadata: true,
WriteMetadata: true,
UserMetadata: false,
}).Fill(ctx, f)
f.jfsSrv.SetErrorHandler(errorHandler)
if opt.TrashedOnly { // we cannot support showing Trashed Files when using ListR right now
@@ -1136,6 +1190,7 @@ func parseListRStream(ctx context.Context, r io.Reader, filesystem *Fs, callback
remote: filesystem.opt.Enc.ToStandardPath(path.Join(f.Path, f.Name)),
size: f.Size,
md5: f.Checksum,
createTime: time.Time(f.Created),
modTime: time.Time(f.Modified),
})
}
@@ -1365,7 +1420,7 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
// is currently in trash, but can be made to match, it will be
// restored. Returns ErrorObjectNotFound if upload will be necessary
// to get a matching remote file.
func (f *Fs) createOrUpdate(ctx context.Context, file string, modTime time.Time, size int64, md5 string) (info *api.JottaFile, err error) {
func (f *Fs) createOrUpdate(ctx context.Context, file string, createTime time.Time, modTime time.Time, size int64, md5 string) (info *api.JottaFile, err error) {
opts := rest.Opts{
Method: "POST",
Path: f.filePath(file),
@@ -1375,11 +1430,10 @@ func (f *Fs) createOrUpdate(ctx context.Context, file string, modTime time.Time,
opts.Parameters.Set("cphash", "true")
fileDate := api.JottaTime(modTime).String()
opts.ExtraHeaders["JSize"] = strconv.FormatInt(size, 10)
opts.ExtraHeaders["JMd5"] = md5
opts.ExtraHeaders["JCreated"] = fileDate
opts.ExtraHeaders["JModified"] = fileDate
opts.ExtraHeaders["JCreated"] = api.JottaTime(createTime).String()
opts.ExtraHeaders["JModified"] = api.JottaTime(modTime).String()
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
@@ -1442,7 +1496,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
// if destination was a trashed file then after a successful copy the copied file is still in trash (bug in api?)
if err == nil && bool(info.Deleted) && !f.opt.TrashedOnly && info.State == "COMPLETED" {
fs.Debugf(src, "Server-side copied to trashed destination, restoring")
info, err = f.createOrUpdate(ctx, remote, srcObj.modTime, srcObj.size, srcObj.md5)
info, err = f.createOrUpdate(ctx, remote, srcObj.createTime, srcObj.modTime, srcObj.size, srcObj.md5)
}
if err != nil {
@@ -1705,7 +1759,9 @@ func (o *Object) setMetaData(info *api.JottaFile) (err error) {
o.size = info.Size
o.md5 = info.MD5
o.mimeType = info.MimeType
o.createTime = time.Time(info.CreatedAt)
o.modTime = time.Time(info.ModifiedAt)
o.updateTime = time.Time(info.UpdatedAt)
return nil
}
@@ -1750,7 +1806,7 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
// (note that if size/md5 does not match, the file content will
// also be modified if deduplication is possible, i.e. it is
// important to use correct/latest values)
_, err = o.fs.createOrUpdate(ctx, o.remote, modTime, o.size, o.md5)
_, err = o.fs.createOrUpdate(ctx, o.remote, o.createTime, modTime, o.size, o.md5)
if err != nil {
if err == fs.ErrorObjectNotFound {
// file was modified (size/md5 changed) between readMetaData and createOrUpdate?
@@ -1887,6 +1943,37 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// Wrap the accounting back onto the stream
in = wrap(in)
}
// Fetch metadata if --metadata is in use
meta, err := fs.GetMetadataOptions(ctx, o.fs, src, options)
if err != nil {
return fmt.Errorf("failed to read metadata from source object: %w", err)
}
var createdTime string
var modTime string
if meta != nil {
if v, ok := meta["btime"]; ok {
t, err := time.Parse(time.RFC3339Nano, v) // metadata stores RFC3339Nano timestamps
if err != nil {
fs.Debugf(o, "failed to parse metadata btime: %q: %v", v, err)
} else {
createdTime = api.Rfc3339Time(t).String() // jottacloud api wants RFC3339 timestamps
}
}
if v, ok := meta["mtime"]; ok {
t, err := time.Parse(time.RFC3339Nano, v)
if err != nil {
fs.Debugf(o, "failed to parse metadata mtime: %q: %v", v, err)
} else {
modTime = api.Rfc3339Time(t).String()
}
}
}
if modTime == "" { // prefer mtime in meta as Modified time, fallback to source ModTime
modTime = api.Rfc3339Time(src.ModTime(ctx)).String()
}
if createdTime == "" { // if no Created time set same as Modified
createdTime = modTime
}
// use the api to allocate the file first and get resume / deduplication info
var resp *http.Response
@@ -1896,13 +1983,12 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
Options: options,
ExtraHeaders: make(map[string]string),
}
fileDate := api.Rfc3339Time(src.ModTime(ctx)).String()
// the allocate request
var request = api.AllocateFileRequest{
Bytes: size,
Created: fileDate,
Modified: fileDate,
Created: createdTime,
Modified: modTime,
Md5: md5String,
Path: o.fs.allocatePathRaw(o.remote, true),
}
@@ -1917,7 +2003,10 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return err
}
// If the file state is INCOMPLETE and CORRUPT, try to upload a then
// If the file state is INCOMPLETE and CORRUPT, we must upload it.
// Else, if the file state is COMPLETE, we don't need to upload it because
// the content is already there, possibly it was created with deduplication,
// and also any metadata changes are already performed by the allocate request.
if response.State != "COMPLETED" {
// how much do we still have to upload?
remainingBytes := size - response.ResumePos
@@ -1941,22 +2030,18 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
}
// send the remaining bytes
resp, err = o.fs.apiSrv.CallJSON(ctx, &opts, nil, &result)
_, err = o.fs.apiSrv.CallJSON(ctx, &opts, nil, &result)
if err != nil {
return err
}
// finally update the meta data
o.hasMetaData = true
o.size = result.Bytes
o.md5 = result.Md5
o.modTime = time.Unix(result.Modified/1000, 0)
} else {
// If the file state is COMPLETE we don't need to upload it because the file was already found but we still need to update our metadata
return o.readMetaData(ctx, true)
// Upload response contains main metadata properties (size, md5 and modTime)
// which could be set back to the object, but it does not contain the
// necessary information to set the createTime and updateTime properties,
// so must therefore perform a read instead.
}
return nil
// in any case we must update the object meta data
return o.readMetaData(ctx, true)
}
func (o *Object) remove(ctx context.Context, hard bool) error {
@@ -1991,6 +2076,22 @@ func (o *Object) Remove(ctx context.Context) error {
return o.remove(ctx, o.fs.opt.HardDelete)
}
// Metadata returns metadata for an object
//
// It should return nil if there is no Metadata
func (o *Object) Metadata(ctx context.Context) (metadata fs.Metadata, err error) {
err = o.readMetaData(ctx, false)
if err != nil {
fs.Logf(o, "Failed to read metadata: %v", err)
return nil, err
}
metadata.Set("btime", o.createTime.Format(time.RFC3339Nano)) // metadata timestamps should be RFC3339Nano
metadata.Set("mtime", o.modTime.Format(time.RFC3339Nano))
metadata.Set("utime", o.updateTime.Format(time.RFC3339Nano))
metadata.Set("content-type", o.mimeType)
return metadata, nil
}
// Check the interfaces are satisfied
var (
_ fs.Fs = (*Fs)(nil)
@@ -2005,4 +2106,5 @@ var (
_ fs.CleanUpper = (*Fs)(nil)
_ fs.Object = (*Object)(nil)
_ fs.MimeTyper = (*Object)(nil)
_ fs.Metadataer = (*Object)(nil)
)

View File

@@ -1,11 +1,17 @@
package jottacloud
import (
"context"
"crypto/md5"
"fmt"
"io"
"testing"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
"github.com/rclone/rclone/lib/random"
"github.com/rclone/rclone/lib/readers"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@@ -40,3 +46,48 @@ func TestReadMD5(t *testing.T) {
})
}
}
func (f *Fs) InternalTestMetadata(t *testing.T) {
ctx := context.Background()
contents := random.String(1000)
item := fstest.NewItem("test-metadata", contents, fstest.Time("2001-05-06T04:05:06.499999999Z"))
utime := time.Now()
metadata := fs.Metadata{
"btime": "2009-05-06T04:05:06.499999999Z",
"mtime": "2010-06-07T08:09:07.599999999Z",
//"utime" - read-only
//"content-type" - read-only
}
obj := fstests.PutTestContentsMetadata(ctx, t, f, &item, contents, true, "text/html", metadata)
defer func() {
assert.NoError(t, obj.Remove(ctx))
}()
o := obj.(*Object)
gotMetadata, err := o.Metadata(ctx)
require.NoError(t, err)
for k, v := range metadata {
got := gotMetadata[k]
switch k {
case "btime":
assert.True(t, fstest.Time(v).Truncate(f.Precision()).Equal(fstest.Time(got)), fmt.Sprintf("btime not equal want %v got %v", v, got))
case "mtime":
assert.True(t, fstest.Time(v).Truncate(f.Precision()).Equal(fstest.Time(got)), fmt.Sprintf("btime not equal want %v got %v", v, got))
case "utime":
gotUtime := fstest.Time(got)
dt := gotUtime.Sub(utime)
assert.True(t, dt < time.Minute && dt > -time.Minute, fmt.Sprintf("utime more than 1 minute out want %v got %v delta %v", utime, gotUtime, dt))
assert.True(t, fstest.Time(v).Equal(fstest.Time(got)))
case "content-type":
assert.True(t, o.MimeType(ctx) == got)
default:
assert.Equal(t, v, got, k)
}
}
}
func (f *Fs) InternalTest(t *testing.T) {
t.Run("Metadata", f.InternalTestMetadata)
}
var _ fstests.InternalTester = (*Fs)(nil)

View File

@@ -61,9 +61,10 @@ func init() {
Default: true,
Advanced: true,
}, {
Name: "user",
Help: "Your user name.",
Required: true,
Name: "user",
Help: "Your user name.",
Required: true,
Sensitive: true,
}, {
Name: "password",
Help: "Your password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password).",

897
backend/linkbox/linkbox.go Normal file
View File

@@ -0,0 +1,897 @@
// Package linkbox provides an interface to the linkbox.to Cloud storage system.
//
// API docs: https://www.linkbox.to/api-docs
package linkbox
/*
Extras
- PublicLink - NO - sharing doesn't share the actual file, only a page with it on
- Move - YES - have Move and Rename file APIs so is possible
- MoveDir - NO - probably not possible - have Move but no Rename
*/
import (
"bytes"
"context"
"crypto/md5"
"fmt"
"io"
"net/http"
"net/url"
"path"
"regexp"
"strconv"
"strings"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/dircache"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/rest"
)
const (
maxEntitiesPerPage = 1024
minSleep = 200 * time.Millisecond
maxSleep = 2 * time.Second
pacerBurst = 1
linkboxAPIURL = "https://www.linkbox.to/api/open/"
rootID = "0" // ID of root directory
)
func init() {
fsi := &fs.RegInfo{
Name: "linkbox",
Description: "Linkbox",
NewFs: NewFs,
Options: []fs.Option{{
Name: "token",
Help: "Token from https://www.linkbox.to/admin/account",
Sensitive: true,
Required: true,
}},
}
fs.Register(fsi)
}
// Options defines the configuration for this backend
type Options struct {
Token string `config:"token"`
}
// Fs stores the interface to the remote Linkbox files
type Fs struct {
name string
root string
opt Options // options for this backend
features *fs.Features // optional features
ci *fs.ConfigInfo // global config
srv *rest.Client // the connection to the server
dirCache *dircache.DirCache // Map of directory path to directory id
pacer *fs.Pacer
}
// Object is a remote object that has been stat'd (so it exists, but is not necessarily open for reading)
type Object struct {
fs *Fs
remote string
size int64
modTime time.Time
contentType string
fullURL string
dirID int64
itemID string // and these IDs are for files
id int64 // these IDs appear to apply to directories
isDir bool
}
// NewFs creates a new Fs object from the name and root. It connects to
// the host specified in the config file.
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
root = strings.Trim(root, "/")
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
ci := fs.GetConfig(ctx)
f := &Fs{
name: name,
opt: *opt,
root: root,
ci: ci,
srv: rest.NewClient(fshttp.NewClient(ctx)),
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep))),
}
f.dirCache = dircache.New(root, rootID, f)
f.features = (&fs.Features{
CanHaveEmptyDirectories: true,
CaseInsensitive: true,
}).Fill(ctx, f)
// Find the current root
err = f.dirCache.FindRoot(ctx, false)
if err != nil {
// Assume it is a file
newRoot, remote := dircache.SplitPath(root)
tempF := *f
tempF.dirCache = dircache.New(newRoot, rootID, &tempF)
tempF.root = newRoot
// Make new Fs which is the parent
err = tempF.dirCache.FindRoot(ctx, false)
if err != nil {
// No root so return old f
return f, nil
}
_, err := tempF.NewObject(ctx, remote)
if err != nil {
if err == fs.ErrorObjectNotFound {
// File doesn't exist so return old f
return f, nil
}
return nil, err
}
f.features.Fill(ctx, &tempF)
// XXX: update the old f here instead of returning tempF, since
// `features` were already filled with functions having *f as a receiver.
// See https://github.com/rclone/rclone/issues/2182
f.dirCache = tempF.dirCache
f.root = tempF.root
// return an error with an fs which points to the parent
return f, fs.ErrorIsFile
}
return f, nil
}
type entity struct {
Type string `json:"type"`
Name string `json:"name"`
URL string `json:"url"`
Ctime int64 `json:"ctime"`
Size int64 `json:"size"`
ID int64 `json:"id"`
Pid int64 `json:"pid"`
ItemID string `json:"item_id"`
}
// Return true if the entity is a directory
func (e *entity) isDir() bool {
return e.Type == "dir" || e.Type == "sdir"
}
type data struct {
Entities []entity `json:"list"`
}
type fileSearchRes struct {
response
SearchData data `json:"data"`
}
// Set an object info from an entity
func (o *Object) set(e *entity) {
o.modTime = time.Unix(e.Ctime, 0)
o.contentType = e.Type
o.size = e.Size
o.fullURL = e.URL
o.isDir = e.isDir()
o.id = e.ID
o.itemID = e.ItemID
o.dirID = e.Pid
}
// Call linkbox with the query in opts and return result
//
// This will be checked for error and an error will be returned if Status != 1
func getUnmarshaledResponse(ctx context.Context, f *Fs, opts *rest.Opts, result interface{}) error {
err := f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CallJSON(ctx, opts, nil, &result)
return f.shouldRetry(ctx, resp, err)
})
if err != nil {
return err
}
responser := result.(responser)
if responser.IsError() {
return responser
}
return nil
}
// list the objects into the function supplied
//
// If directories is set it only sends directories
// User function to process a File item from listAll
//
// Should return true to finish processing
type listAllFn func(*entity) bool
// Search is a bit fussy about which characters match
//
// If the name doesn't match this then do an dir list instead
var searchOK = regexp.MustCompile(`^[a-zA-Z0-9_ .]+$`)
// Lists the directory required calling the user function on each item found
//
// If the user fn ever returns true then it early exits with found = true
//
// If you set name then search ignores dirID. name is a substring
// search also so name="dir" matches "sub dir" also. This filters it
// down so it only returns items in dirID
func (f *Fs) listAll(ctx context.Context, dirID string, name string, fn listAllFn) (found bool, err error) {
var (
pageNumber = 0
numberOfEntities = maxEntitiesPerPage
)
name = strings.TrimSpace(name) // search doesn't like spaces
if !searchOK.MatchString(name) {
// If name isn't good then do an unbounded search
name = ""
}
OUTER:
for numberOfEntities == maxEntitiesPerPage {
pageNumber++
opts := &rest.Opts{
Method: "GET",
RootURL: linkboxAPIURL,
Path: "file_search",
Parameters: url.Values{
"token": {f.opt.Token},
"name": {name},
"pid": {dirID},
"pageNo": {itoa(pageNumber)},
"pageSize": {itoa64(maxEntitiesPerPage)},
},
}
var responseResult fileSearchRes
err = getUnmarshaledResponse(ctx, f, opts, &responseResult)
if err != nil {
return false, fmt.Errorf("getting files failed: %w", err)
}
numberOfEntities = len(responseResult.SearchData.Entities)
for _, entity := range responseResult.SearchData.Entities {
if itoa64(entity.Pid) != dirID {
// when name != "" this returns from all directories, so ignore not this one
continue
}
if fn(&entity) {
found = true
break OUTER
}
}
if pageNumber > 100000 {
return false, fmt.Errorf("too many results")
}
}
return found, nil
}
// Turn 64 bit int to string
func itoa64(i int64) string {
return strconv.FormatInt(i, 10)
}
// Turn int to string
func itoa(i int) string {
return itoa64(int64(i))
}
func splitDirAndName(remote string) (dir string, name string) {
lastSlashPosition := strings.LastIndex(remote, "/")
if lastSlashPosition == -1 {
dir = ""
name = remote
} else {
dir = remote[:lastSlashPosition]
name = remote[lastSlashPosition+1:]
}
// fs.Debugf(nil, "splitDirAndName remote = {%s}, dir = {%s}, name = {%s}", remote, dir, name)
return dir, name
}
// FindLeaf finds a directory of name leaf in the folder with ID directoryID
func (f *Fs) FindLeaf(ctx context.Context, directoryID, leaf string) (directoryIDOut string, found bool, err error) {
// Find the leaf in directoryID
found, err = f.listAll(ctx, directoryID, leaf, func(entity *entity) bool {
if entity.isDir() && strings.EqualFold(entity.Name, leaf) {
directoryIDOut = itoa64(entity.ID)
return true
}
return false
})
return directoryIDOut, found, err
}
// Returned from "folder_create"
type folderCreateRes struct {
response
Data struct {
DirID int64 `json:"dirId"`
} `json:"data"`
}
// CreateDir makes a directory with dirID as parent and name leaf
func (f *Fs) CreateDir(ctx context.Context, dirID, leaf string) (newID string, err error) {
// fs.Debugf(f, "CreateDir(%q, %q)\n", dirID, leaf)
opts := &rest.Opts{
Method: "GET",
RootURL: linkboxAPIURL,
Path: "folder_create",
Parameters: url.Values{
"token": {f.opt.Token},
"name": {leaf},
"pid": {dirID},
"isShare": {"0"},
"canInvite": {"1"},
"canShare": {"1"},
"withBodyImg": {"1"},
"desc": {""},
},
}
response := folderCreateRes{}
err = getUnmarshaledResponse(ctx, f, opts, &response)
if err != nil {
// response status 1501 means that directory already exists
if response.Status == 1501 {
return newID, fmt.Errorf("couldn't find already created directory: %w", fs.ErrorDirNotFound)
}
return newID, fmt.Errorf("CreateDir failed: %w", err)
}
if response.Data.DirID == 0 {
return newID, fmt.Errorf("API returned 0 for ID of newly created directory")
}
return itoa64(response.Data.DirID), nil
}
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
// fs.Debugf(f, "List method dir = {%s}", dir)
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
if err != nil {
return nil, err
}
_, err = f.listAll(ctx, directoryID, "", func(entity *entity) bool {
remote := path.Join(dir, entity.Name)
if entity.isDir() {
id := itoa64(entity.ID)
modTime := time.Unix(entity.Ctime, 0)
d := fs.NewDir(remote, modTime).SetID(id).SetParentID(itoa64(entity.Pid))
entries = append(entries, d)
// cache the directory ID for later lookups
f.dirCache.Put(remote, id)
} else {
o := &Object{
fs: f,
remote: remote,
}
o.set(entity)
entries = append(entries, o)
}
return false
})
if err != nil {
return nil, err
}
return entries, nil
}
// get an entity with leaf from dirID
func getEntity(ctx context.Context, f *Fs, leaf string, directoryID string, token string) (*entity, error) {
var result *entity
var resultErr = fs.ErrorObjectNotFound
_, err := f.listAll(ctx, directoryID, leaf, func(entity *entity) bool {
if strings.EqualFold(entity.Name, leaf) {
// fs.Debugf(f, "getObject found entity.Name {%s} name {%s}", entity.Name, name)
if entity.isDir() {
result = nil
resultErr = fs.ErrorIsDir
} else {
result = entity
resultErr = nil
}
return true
}
return false
})
if err != nil {
return nil, err
}
return result, resultErr
}
// NewObject finds the Object at remote. If it can't be found
// it returns the error ErrorObjectNotFound.
//
// If remote points to a directory then it should return
// ErrorIsDir if possible without doing any extra work,
// otherwise ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
leaf, dirID, err := f.dirCache.FindPath(ctx, remote, false)
if err != nil {
if err == fs.ErrorDirNotFound {
return nil, fs.ErrorObjectNotFound
}
return nil, err
}
entity, err := getEntity(ctx, f, leaf, dirID, f.opt.Token)
if err != nil {
return nil, err
}
o := &Object{
fs: f,
remote: remote,
}
o.set(entity)
return o, nil
}
// Mkdir makes the directory (container, bucket)
//
// Shouldn't return an error if it already exists
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
_, err := f.dirCache.FindDir(ctx, dir, true)
return err
}
func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
if check {
entries, err := f.List(ctx, dir)
if err != nil {
return err
}
if len(entries) != 0 {
return fs.ErrorDirectoryNotEmpty
}
}
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
if err != nil {
return err
}
opts := &rest.Opts{
Method: "GET",
RootURL: linkboxAPIURL,
Path: "folder_del",
Parameters: url.Values{
"token": {f.opt.Token},
"dirIds": {directoryID},
},
}
response := response{}
err = getUnmarshaledResponse(ctx, f, opts, &response)
if err != nil {
// Linkbox has some odd error returns here
if response.Status == 403 || response.Status == 500 {
return fs.ErrorDirNotFound
}
return fmt.Errorf("purge error: %w", err)
}
f.dirCache.FlushDir(dir)
if err != nil {
return err
}
return nil
}
// Rmdir removes the directory (container, bucket) if empty
//
// Return an error if it doesn't exist or isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
return f.purgeCheck(ctx, dir, true)
}
// SetModTime sets modTime on a particular file
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
return fs.ErrorCantSetModTime
}
// Open opens the file for read. Call Close() on the returned io.ReadCloser
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) {
var res *http.Response
downloadURL := o.fullURL
if downloadURL == "" {
_, name := splitDirAndName(o.Remote())
newObject, err := getEntity(ctx, o.fs, name, itoa64(o.dirID), o.fs.opt.Token)
if err != nil {
return nil, err
}
if newObject == nil {
// fs.Debugf(o.fs, "Open entity is empty: name = {%s}", name)
return nil, fs.ErrorObjectNotFound
}
downloadURL = newObject.URL
}
opts := &rest.Opts{
Method: "GET",
RootURL: downloadURL,
Options: options,
}
err := o.fs.pacer.Call(func() (bool, error) {
var err error
res, err = o.fs.srv.Call(ctx, opts)
return o.fs.shouldRetry(ctx, res, err)
})
if err != nil {
return nil, fmt.Errorf("Open failed: %w", err)
}
return res.Body, nil
}
// Update in to the object with the modTime given of the given size
//
// When called from outside an Fs by rclone, src.Size() will always be >= 0.
// But for unknown-sized objects (indicated by src.Size() == -1), Upload should either
// return an error or update the object properly (rather than e.g. calling panic).
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
size := src.Size()
if size == 0 {
return fs.ErrorCantUploadEmptyFiles
} else if size < 0 {
return fmt.Errorf("can't upload files of unknown length")
}
remote := o.Remote()
// remove the file if it exists
if o.itemID != "" {
fs.Debugf(o, "Update: removing old file")
err = o.Remove(ctx)
if err != nil {
fs.Errorf(o, "Update: failed to remove existing file: %v", err)
}
o.itemID = ""
} else {
tmpObject, err := o.fs.NewObject(ctx, remote)
if err == nil {
fs.Debugf(o, "Update: removing old file")
err = tmpObject.Remove(ctx)
if err != nil {
fs.Errorf(o, "Update: failed to remove existing file: %v", err)
}
}
}
first10m := io.LimitReader(in, 10_485_760)
first10mBytes, err := io.ReadAll(first10m)
if err != nil {
return fmt.Errorf("Update err in reading file: %w", err)
}
// get upload authorization (step 1)
opts := &rest.Opts{
Method: "GET",
RootURL: linkboxAPIURL,
Path: "get_upload_url",
Options: options,
Parameters: url.Values{
"token": {o.fs.opt.Token},
"fileMd5ofPre10m": {fmt.Sprintf("%x", md5.Sum(first10mBytes))},
"fileSize": {itoa64(size)},
},
}
getFirstStepResult := getUploadURLResponse{}
err = getUnmarshaledResponse(ctx, o.fs, opts, &getFirstStepResult)
if err != nil {
if getFirstStepResult.Status != 600 {
return fmt.Errorf("Update err in unmarshaling response: %w", err)
}
}
switch getFirstStepResult.Status {
case 1:
// upload file using link from first step
var res *http.Response
file := io.MultiReader(bytes.NewReader(first10mBytes), in)
opts := &rest.Opts{
Method: "PUT",
RootURL: getFirstStepResult.Data.SignURL,
Options: options,
Body: file,
ContentLength: &size,
}
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
res, err = o.fs.srv.Call(ctx, opts)
return o.fs.shouldRetry(ctx, res, err)
})
if err != nil {
return fmt.Errorf("update err in uploading file: %w", err)
}
_, err = io.ReadAll(res.Body)
if err != nil {
return fmt.Errorf("update err in reading response: %w", err)
}
case 600:
// Status means that we don't need to upload file
// We need only to make second step
default:
return fmt.Errorf("got unexpected message from Linkbox: %s", getFirstStepResult.Message)
}
leaf, dirID, err := o.fs.dirCache.FindPath(ctx, remote, false)
if err != nil {
return err
}
// create file item at Linkbox (second step)
opts = &rest.Opts{
Method: "GET",
RootURL: linkboxAPIURL,
Path: "folder_upload_file",
Options: options,
Parameters: url.Values{
"token": {o.fs.opt.Token},
"fileMd5ofPre10m": {fmt.Sprintf("%x", md5.Sum(first10mBytes))},
"fileSize": {itoa64(size)},
"pid": {dirID},
"diyName": {leaf},
},
}
getSecondStepResult := getUploadURLResponse{}
err = getUnmarshaledResponse(ctx, o.fs, opts, &getSecondStepResult)
if err != nil {
return fmt.Errorf("Update second step failed: %w", err)
}
// Try a few times to read the object after upload for eventual consistency
const maxTries = 10
var sleepTime = 100 * time.Millisecond
var entity *entity
for try := 1; try <= maxTries; try++ {
entity, err = getEntity(ctx, o.fs, leaf, dirID, o.fs.opt.Token)
if err == nil {
break
}
if err != fs.ErrorObjectNotFound {
return fmt.Errorf("Update failed to read object: %w", err)
}
fs.Debugf(o, "Trying to read object after upload: try again in %v (%d/%d)", sleepTime, try, maxTries)
time.Sleep(sleepTime)
sleepTime *= 2
}
if err != nil {
return err
}
o.set(entity)
return nil
}
// Remove this object
func (o *Object) Remove(ctx context.Context) error {
opts := &rest.Opts{
Method: "GET",
RootURL: linkboxAPIURL,
Path: "file_del",
Parameters: url.Values{
"token": {o.fs.opt.Token},
"itemIds": {o.itemID},
},
}
requestResult := getUploadURLResponse{}
err := getUnmarshaledResponse(ctx, o.fs, opts, &requestResult)
if err != nil {
return fmt.Errorf("could not Remove: %w", err)
}
return nil
}
// ModTime returns the modification time of the remote http file
func (o *Object) ModTime(ctx context.Context) time.Time {
return o.modTime
}
// Remote the name of the remote HTTP file, relative to the fs root
func (o *Object) Remote() string {
return o.remote
}
// Size returns the size in bytes of the remote http file
func (o *Object) Size() int64 {
return o.size
}
// String returns the URL to the remote HTTP file
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.remote
}
// Fs is the filesystem this remote http file object is located within
func (o *Object) Fs() fs.Info {
return o.fs
}
// Hash returns "" since HTTP (in Go or OpenSSH) doesn't support remote calculation of hashes
func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
return "", hash.ErrUnsupported
}
// Storable returns whether the remote http file is a regular file
// (not a directory, symbolic link, block device, character device, named pipe, etc.)
func (o *Object) Storable() bool {
return true
}
// Features returns the optional features of this Fs
// Info provides a read only interface to information about a filesystem.
func (f *Fs) Features() *fs.Features {
return f.features
}
// Name of the remote (as passed into NewFs)
// Name returns the configured name of the file system
func (f *Fs) Name() string {
return f.name
}
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
return f.root
}
// String returns a description of the FS
func (f *Fs) String() string {
return fmt.Sprintf("Linkbox root '%s'", f.root)
}
// Precision of the ModTimes in this Fs
func (f *Fs) Precision() time.Duration {
return fs.ModTimeNotSupported
}
// Hashes returns hash.HashNone to indicate remote hashing is unavailable
// Returns the supported hash types of the filesystem
func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.None)
}
/*
{
"data": {
"signUrl": "http://xx -- Then CURL PUT your file with sign url "
},
"msg": "please use this url to upload (PUT method)",
"status": 1
}
*/
// All messages have these items
type response struct {
Message string `json:"msg"`
Status int `json:"status"`
}
// IsError returns whether response represents an error
func (r *response) IsError() bool {
return r.Status != 1
}
// Error returns the error state of this response
func (r *response) Error() string {
return fmt.Sprintf("Linkbox error %d: %s", r.Status, r.Message)
}
// responser is interface covering the response so we can use it when it is embedded.
type responser interface {
IsError() bool
Error() string
}
type getUploadURLData struct {
SignURL string `json:"signUrl"`
}
type getUploadURLResponse struct {
response
Data getUploadURLData `json:"data"`
}
// Put in to the remote path with the modTime given of the given size
//
// When called from outside an Fs by rclone, src.Size() will always be >= 0.
// But for unknown-sized objects (indicated by src.Size() == -1), Put should either
// return an error or upload it properly (rather than e.g. calling panic).
//
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
// nil and the error
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
o := &Object{
fs: f,
remote: src.Remote(),
size: src.Size(),
}
dir, _ := splitDirAndName(src.Remote())
err := f.Mkdir(ctx, dir)
if err != nil {
return nil, err
}
err = o.Update(ctx, in, src, options...)
return o, err
}
// Purge all files in the directory specified
//
// Implement this if you have a way of deleting all the files
// quicker than just running Remove() on the result of List()
//
// Return an error if it doesn't exist
func (f *Fs) Purge(ctx context.Context, dir string) error {
return f.purgeCheck(ctx, dir, false)
}
// retryErrorCodes is a slice of error codes that we will retry
var retryErrorCodes = []int{
429, // Too Many Requests.
500, // Internal Server Error
502, // Bad Gateway
503, // Service Unavailable
504, // Gateway Timeout
509, // Bandwidth Limit Exceeded
}
// shouldRetry determines whether a given err rates being retried
func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
if fserrors.ContextError(ctx, &err) {
return false, err
}
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
}
// DirCacheFlush resets the directory cache - used in testing as an
// optional interface
func (f *Fs) DirCacheFlush() {
f.dirCache.ResetRoot()
}
// Check the interfaces are satisfied
var (
_ fs.Fs = &Fs{}
_ fs.Purger = &Fs{}
_ fs.DirCacheFlusher = &Fs{}
_ fs.Object = &Object{}
)

View File

@@ -0,0 +1,17 @@
// Test Linkbox filesystem interface
package linkbox_test
import (
"testing"
"github.com/rclone/rclone/backend/linkbox"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestLinkbox:",
NilObject: (*linkbox.Object)(nil),
})
}

View File

@@ -13,6 +13,7 @@ import (
"runtime"
"strings"
"sync"
"sync/atomic"
"time"
"unicode/utf8"
@@ -145,6 +146,11 @@ time we:
- Only checksum the size that stat gave
- Don't update the stat info for the file
**NB** do not use this flag on a Windows Volume Shadow (VSS). For some
unknown reason, files in a VSS sometimes show different sizes from the
directory listing (where the initial stat value comes from on Windows)
and when stat is called on them directly. Other copy tools always use
the direct stat value and setting this flag will disable that.
`,
Default: false,
Advanced: true,
@@ -243,7 +249,7 @@ type Fs struct {
precision time.Duration // precision of local filesystem
warnedMu sync.Mutex // used for locking access to 'warned'.
warned map[string]struct{} // whether we have warned about this string
xattrSupported int32 // whether xattrs are supported (atomic access)
xattrSupported atomic.Int32 // whether xattrs are supported
// do os.Lstat or os.Stat
lstat func(name string) (os.FileInfo, error)
@@ -291,7 +297,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
lstat: os.Lstat,
}
if xattrSupported {
f.xattrSupported = 1
f.xattrSupported.Store(1)
}
f.root = cleanRootPath(root, f.opt.NoUNC, f.opt.Enc)
f.features = (&fs.Features{
@@ -516,7 +522,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
continue
}
}
err = fmt.Errorf("failed to read directory %q: %w", namepath, fierr)
fierr = fmt.Errorf("failed to get info about directory entry %q: %w", namepath, fierr)
fs.Errorf(dir, "%v", fierr)
_ = accounting.Stats(ctx).Error(fserrors.NoRetryError(fierr)) // fail the sync
continue
@@ -641,7 +647,13 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
//
// If it isn't empty it will return an error
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
return os.Remove(f.localPath(dir))
localPath := f.localPath(dir)
if fi, err := os.Stat(localPath); err != nil {
return err
} else if !fi.IsDir() {
return fs.ErrorIsFile
}
return os.Remove(localPath)
}
// Precision of the file system
@@ -1116,6 +1128,12 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
}
}
// Update the file info before we start reading
err = o.lstat()
if err != nil {
return nil, err
}
// If not checking updated then limit to current size. This means if
// file is being extended, readers will read a o.Size() bytes rather
// than the new size making for a consistent upload.
@@ -1280,7 +1298,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
}
// Fetch and set metadata if --metadata is in use
meta, err := fs.GetMetadataOptions(ctx, src, options)
meta, err := fs.GetMetadataOptions(ctx, o.fs, src, options)
if err != nil {
return fmt.Errorf("failed to read metadata from source object: %w", err)
}

View File

@@ -19,6 +19,7 @@ import (
"github.com/rclone/rclone/fs/filter"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/lib/file"
"github.com/rclone/rclone/lib/readers"
@@ -514,3 +515,43 @@ func TestFilterSymlinkCopyLinks(t *testing.T) {
func TestFilterSymlinkLinks(t *testing.T) {
testFilterSymlink(t, false)
}
func TestCopySymlink(t *testing.T) {
ctx := context.Background()
r := fstest.NewRun(t)
defer r.Finalise()
when := time.Now()
f := r.Flocal.(*Fs)
// Create a file and a symlink to it
r.WriteFile("src/file.txt", "hello world", when)
require.NoError(t, os.Symlink("file.txt", filepath.Join(r.LocalName, "src", "link.txt")))
defer func() {
// Reset -L/-l mode
f.opt.FollowSymlinks = false
f.opt.TranslateSymlinks = false
f.lstat = os.Lstat
}()
// Set fs into "-l/--links" mode
f.opt.FollowSymlinks = false
f.opt.TranslateSymlinks = true
f.lstat = os.Lstat
// Create dst
require.NoError(t, f.Mkdir(ctx, "dst"))
// Do copy from src into dst
src, err := f.NewObject(ctx, "src/link.txt.rclonelink")
require.NoError(t, err)
require.NotNil(t, src)
dst, err := operations.Copy(ctx, f, nil, "dst/link.txt.rclonelink", src)
require.NoError(t, err)
require.NotNil(t, dst)
// Test that we made a symlink and it has the right contents
dstPath := filepath.Join(r.LocalName, "dst", "link.txt")
linkContents, err := os.Readlink(dstPath)
require.NoError(t, err)
assert.Equal(t, "file.txt", linkContents)
}

View File

@@ -6,7 +6,6 @@ package local
import (
"fmt"
"strings"
"sync/atomic"
"syscall"
"github.com/pkg/xattr"
@@ -28,7 +27,7 @@ func (f *Fs) xattrIsNotSupported(err error) bool {
// Xattrs not supported can be ENOTSUP or ENOATTR or EINVAL (on Solaris)
if xattrErr.Err == syscall.EINVAL || xattrErr.Err == syscall.ENOTSUP || xattrErr.Err == xattr.ENOATTR {
// Show xattrs not supported
if atomic.CompareAndSwapInt32(&f.xattrSupported, 1, 0) {
if f.xattrSupported.CompareAndSwap(1, 0) {
fs.Errorf(f, "xattrs not supported - disabling: %v", err)
}
return true
@@ -41,7 +40,7 @@ func (f *Fs) xattrIsNotSupported(err error) bool {
// It doesn't return any attributes owned by this backend in
// metadataKeys
func (o *Object) getXattr() (metadata fs.Metadata, err error) {
if !xattrSupported || atomic.LoadInt32(&o.fs.xattrSupported) == 0 {
if !xattrSupported || o.fs.xattrSupported.Load() == 0 {
return nil, nil
}
var list []string
@@ -90,7 +89,7 @@ func (o *Object) getXattr() (metadata fs.Metadata, err error) {
//
// It doesn't set any attributes owned by this backend in metadataKeys
func (o *Object) setXattr(metadata fs.Metadata) (err error) {
if !xattrSupported || atomic.LoadInt32(&o.fs.xattrSupported) == 0 {
if !xattrSupported || o.fs.xattrSupported.Load() == 0 {
return nil
}
for k, value := range metadata {

View File

@@ -85,10 +85,11 @@ func init() {
Name: "mailru",
Description: "Mail.ru Cloud",
NewFs: NewFs,
Options: []fs.Option{{
Name: "user",
Help: "User name (usually email).",
Required: true,
Options: append(oauthutil.SharedOptions, []fs.Option{{
Name: "user",
Help: "User name (usually email).",
Required: true,
Sensitive: true,
}, {
Name: "pass",
Help: `Password.
@@ -213,7 +214,7 @@ Supported quirks: atomicmkdir binlist unknowndirs`,
encoder.EncodeWin | // :?"*<>|
encoder.EncodeBackSlash |
encoder.EncodeInvalidUtf8),
}},
}}...),
})
}

View File

@@ -58,9 +58,10 @@ func init() {
Description: "Mega",
NewFs: NewFs,
Options: []fs.Option{{
Name: "user",
Help: "User name.",
Required: true,
Name: "user",
Help: "User name.",
Required: true,
Sensitive: true,
}, {
Name: "pass",
Help: "Password.",
@@ -205,7 +206,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
}
ci := fs.GetConfig(ctx)
// cache *mega.Mega on username so we can re-use and share
// cache *mega.Mega on username so we can reuse and share
// them between remotes. They are expensive to make as they
// contain all the objects and sharing the objects makes the
// move code easier as we don't have to worry about mixing

View File

@@ -65,11 +65,13 @@ HTTP is provided primarily for debugging purposes.`,
Help: `Domain+path of NetStorage host to connect to.
Format should be ` + "`<domain>/<internal folders>`",
Required: true,
Required: true,
Sensitive: true,
}, {
Name: "account",
Help: "Set the NetStorage account name",
Required: true,
Name: "account",
Help: "Set the NetStorage account name",
Required: true,
Sensitive: true,
}, {
Name: "secret",
Help: `Set the NetStorage account secret/G2O key for authentication.

View File

@@ -99,6 +99,16 @@ type ItemReference struct {
DriveType string `json:"driveType"` // Type of the drive, Read-Only
}
// GetID returns a normalized ID of the item
// If DriveID is known it will be prefixed to the ID with # separator
// Can be parsed using onedrive.parseNormalizedID(normalizedID)
func (i *ItemReference) GetID() string {
if !strings.Contains(i.ID, "#") {
return i.DriveID + "#" + i.ID
}
return i.ID
}
// RemoteItemFacet groups data needed to reference a OneDrive remote item
type RemoteItemFacet struct {
ID string `json:"id"` // The unique identifier of the item within the remote Drive. Read-only.
@@ -185,8 +195,8 @@ type Item struct {
Deleted *DeletedFacet `json:"deleted"` // Information about the deleted state of the item. Read-only.
}
// ViewDeltaResponse is the response to the view delta method
type ViewDeltaResponse struct {
// DeltaResponse is the response to the view delta method
type DeltaResponse struct {
Value []Item `json:"value"` // An array of Item objects which have been created, modified, or deleted.
NextLink string `json:"@odata.nextLink"` // A URL to retrieve the next available page of changes.
DeltaLink string `json:"@odata.deltaLink"` // A URL returned instead of @odata.nextLink after all current changes have been returned. Used to read the next set of changes in the future.
@@ -438,3 +448,27 @@ type Version struct {
type VersionsResponse struct {
Versions []Version `json:"value"`
}
// DriveResource is returned from /me/drive
type DriveResource struct {
DriveID string `json:"id"`
DriveName string `json:"name"`
DriveType string `json:"driveType"`
}
// DrivesResponse is returned from /sites/{siteID}/drives",
type DrivesResponse struct {
Drives []DriveResource `json:"value"`
}
// SiteResource is part of the response from from "/sites/root:"
type SiteResource struct {
SiteID string `json:"id"`
SiteName string `json:"displayName"`
SiteURL string `json:"webUrl"`
}
// SiteResponse is returned from "/sites/root:"
type SiteResponse struct {
Sites []SiteResource `json:"value"`
}

View File

@@ -131,10 +131,11 @@ Note that the chunks will be buffered into memory.`,
Default: defaultChunkSize,
Advanced: true,
}, {
Name: "drive_id",
Help: "The ID of the drive to use.",
Default: "",
Advanced: true,
Name: "drive_id",
Help: "The ID of the drive to use.",
Default: "",
Advanced: true,
Sensitive: true,
}, {
Name: "drive_type",
Help: "The type of the drive (" + driveTypePersonal + " | " + driveTypeBusiness + " | " + driveTypeSharepoint + ").",
@@ -148,7 +149,8 @@ This isn't normally needed, but in special circumstances you might
know the folder ID that you wish to access but not be able to get
there through a path traversal.
`,
Advanced: true,
Advanced: true,
Sensitive: true,
}, {
Name: "access_scopes",
Help: `Set scopes to be requested by rclone.
@@ -260,7 +262,8 @@ this flag there.
At the time of writing this only works with OneDrive personal paid accounts.
`,
Advanced: true,
Advanced: true,
Sensitive: true,
}, {
Name: "hash_type",
Default: "auto",
@@ -321,6 +324,37 @@ the --onedrive-av-override flag, or av_override = true in the config
file.
`,
Advanced: true,
}, {
Name: "delta",
Default: false,
Help: strings.ReplaceAll(`If set rclone will use delta listing to implement recursive listings.
If this flag is set the the onedrive backend will advertise |ListR|
support for recursive listings.
Setting this flag speeds up these things greatly:
rclone lsf -R onedrive:
rclone size onedrive:
rclone rc vfs/refresh recursive=true
**However** the delta listing API **only** works at the root of the
drive. If you use it not at the root then it recurses from the root
and discards all the data that is not under the directory you asked
for. So it will be correct but may not be very efficient.
This is why this flag is not set as the default.
As a rule of thumb if nearly all of your data is under rclone's root
directory (the |root/directory| in |onedrive:root/directory|) then
using this flag will be be a big performance win. If your data is
mostly not under the root then using this flag will be a big
performance loss.
It is recommended if you are mounting your onedrive at the root
(or near the root when using crypt) and using rclone |rc vfs/refresh|.
`, "|", "`"),
Advanced: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
@@ -368,28 +402,6 @@ file.
})
}
type driveResource struct {
DriveID string `json:"id"`
DriveName string `json:"name"`
DriveType string `json:"driveType"`
}
type drivesResponse struct {
Drives []driveResource `json:"value"`
}
type siteResource struct {
SiteID string `json:"id"`
SiteName string `json:"displayName"`
SiteURL string `json:"webUrl"`
}
type siteResponse struct {
Sites []siteResource `json:"value"`
}
type deltaResponse struct {
DeltaLink string `json:"@odata.deltaLink"`
Value []api.Item `json:"value"`
}
// Get the region and graphURL from the config
func getRegionURL(m configmap.Mapper) (region, graphURL string) {
region, _ = m.Get("region")
@@ -416,7 +428,7 @@ func chooseDrive(ctx context.Context, name string, m configmap.Mapper, srv *rest
RootURL: graphURL,
Path: "/sites/root:" + opt.relativePath,
}
site := siteResource{}
site := api.SiteResource{}
_, err := srv.CallJSON(ctx, &opt.opts, nil, &site)
if err != nil {
return fs.ConfigError("choose_type", fmt.Sprintf("Failed to query available site by relative path: %v", err))
@@ -433,7 +445,7 @@ func chooseDrive(ctx context.Context, name string, m configmap.Mapper, srv *rest
}
}
drives := drivesResponse{}
drives := api.DrivesResponse{}
// We don't have the final ID yet?
// query Microsoft Graph
@@ -446,7 +458,7 @@ func chooseDrive(ctx context.Context, name string, m configmap.Mapper, srv *rest
// Also call /me/drive as sometimes /me/drives doesn't return it #4068
if opt.opts.Path == "/me/drives" {
opt.opts.Path = "/me/drive"
meDrive := driveResource{}
meDrive := api.DriveResource{}
_, err := srv.CallJSON(ctx, &opt.opts, nil, &meDrive)
if err != nil {
return fs.ConfigError("choose_type", fmt.Sprintf("Failed to query available drives: %v", err))
@@ -465,7 +477,7 @@ func chooseDrive(ctx context.Context, name string, m configmap.Mapper, srv *rest
}
}
} else {
drives.Drives = append(drives.Drives, driveResource{
drives.Drives = append(drives.Drives, api.DriveResource{
DriveID: opt.finalDriveID,
DriveName: "Chosen Drive ID",
DriveType: "drive",
@@ -569,15 +581,18 @@ func Config(ctx context.Context, name string, m configmap.Mapper, config fs.Conf
case "url":
return fs.ConfigInput("url_end", "config_site_url", `Site URL
Example: "https://contoso.sharepoint.com/sites/mysite" or "mysite"
Examples:
- "mysite"
- "https://XXX.sharepoint.com/sites/mysite"
- "https://XXX.sharepoint.com/teams/ID"
`)
case "url_end":
siteURL := config.Result
re := regexp.MustCompile(`https://.*\.sharepoint\.com/sites/(.*)`)
re := regexp.MustCompile(`https://.*\.sharepoint\.com(/.*)`)
match := re.FindStringSubmatch(siteURL)
if len(match) == 2 {
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
relativePath: "/sites/" + match[1],
relativePath: match[1],
})
}
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
@@ -599,7 +614,7 @@ Example: "https://contoso.sharepoint.com/sites/mysite" or "mysite"
Path: "/sites?search=" + searchTerm,
}
sites := siteResponse{}
sites := api.SiteResponse{}
_, err := srv.CallJSON(ctx, &opts, nil, &sites)
if err != nil {
return fs.ConfigError("choose_type", fmt.Sprintf("Failed to query available sites: %v", err))
@@ -661,6 +676,7 @@ type Options struct {
LinkPassword string `config:"link_password"`
HashType string `config:"hash_type"`
AVOverride bool `config:"av_override"`
Delta bool `config:"delta"`
Enc encoder.MultiEncoder `config:"encoding"`
}
@@ -992,6 +1008,11 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
f.dirCache = dircache.New(root, rootID, f)
// ListR only supported if delta set
if !f.opt.Delta {
f.features.ListR = nil
}
// Find the current root
err = f.dirCache.FindRoot(ctx, false)
if err != nil {
@@ -1111,32 +1132,29 @@ func (f *Fs) CreateDir(ctx context.Context, dirID, leaf string) (newID string, e
// If directories is set it only sends directories
// User function to process a File item from listAll
//
// Should return true to finish processing
type listAllFn func(*api.Item) bool
// If an error is returned then processing stops
type listAllFn func(*api.Item) error
// Lists the directory required calling the user function on each item found
//
// If the user fn ever returns true then it early exits with found = true
func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
// Top parameter asks for bigger pages of data
// https://dev.onedrive.com/odata/optional-query-parameters.htm
opts := f.newOptsCall(dirID, "GET", fmt.Sprintf("/children?$top=%d", f.opt.ListChunk))
OUTER:
//
// This listing function works on both normal listings and delta listings
func (f *Fs) _listAll(ctx context.Context, dirID string, directoriesOnly bool, filesOnly bool, fn listAllFn, opts *rest.Opts, result any, pValue *[]api.Item, pNextLink *string) (err error) {
for {
var result api.ListChildrenResponse
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
resp, err = f.srv.CallJSON(ctx, opts, nil, result)
return shouldRetry(ctx, resp, err)
})
if err != nil {
return found, fmt.Errorf("couldn't list files: %w", err)
return fmt.Errorf("couldn't list files: %w", err)
}
if len(result.Value) == 0 {
if len(*pValue) == 0 {
break
}
for i := range result.Value {
item := &result.Value[i]
for i := range *pValue {
item := &(*pValue)[i]
isFolder := item.GetFolder() != nil
if isFolder {
if filesOnly {
@@ -1151,18 +1169,60 @@ OUTER:
continue
}
item.Name = f.opt.Enc.ToStandardName(item.GetName())
if fn(item) {
found = true
break OUTER
err = fn(item)
if err != nil {
return err
}
}
if result.NextLink == "" {
if *pNextLink == "" {
break
}
opts.Path = ""
opts.RootURL = result.NextLink
opts.Parameters = nil
opts.RootURL = *pNextLink
// reset results
*pNextLink = ""
*pValue = nil
}
return
return nil
}
// Lists the directory required calling the user function on each item found
//
// If the user fn ever returns true then it early exits with found = true
func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, filesOnly bool, fn listAllFn) (err error) {
// Top parameter asks for bigger pages of data
// https://dev.onedrive.com/odata/optional-query-parameters.htm
opts := f.newOptsCall(dirID, "GET", fmt.Sprintf("/children?$top=%d", f.opt.ListChunk))
var result api.ListChildrenResponse
return f._listAll(ctx, dirID, directoriesOnly, filesOnly, fn, &opts, &result, &result.Value, &result.NextLink)
}
// Convert a list item into a DirEntry
//
// Can return nil for an item which should be skipped
func (f *Fs) itemToDirEntry(ctx context.Context, dir string, info *api.Item) (entry fs.DirEntry, err error) {
if !f.opt.ExposeOneNoteFiles && info.GetPackageType() == api.PackageTypeOneNote {
fs.Debugf(info.Name, "OneNote file not shown in directory listing")
return nil, nil
}
remote := path.Join(dir, info.GetName())
folder := info.GetFolder()
if folder != nil {
// cache the directory ID for later lookups
id := info.GetID()
f.dirCache.Put(remote, id)
d := fs.NewDir(remote, time.Time(info.GetLastModifiedDateTime())).SetID(id)
d.SetItems(folder.ChildCount)
entry = d
} else {
o, err := f.newObjectWithInfo(ctx, remote, info)
if err != nil {
return nil, err
}
entry = o
}
return entry, nil
}
// List the objects and directories in dir into entries. The
@@ -1179,41 +1239,137 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
if err != nil {
return nil, err
}
var iErr error
_, err = f.listAll(ctx, directoryID, false, false, func(info *api.Item) bool {
if !f.opt.ExposeOneNoteFiles && info.GetPackageType() == api.PackageTypeOneNote {
fs.Debugf(info.Name, "OneNote file not shown in directory listing")
return false
err = f.listAll(ctx, directoryID, false, false, func(info *api.Item) error {
entry, err := f.itemToDirEntry(ctx, dir, info)
if err == nil {
entries = append(entries, entry)
}
remote := path.Join(dir, info.GetName())
folder := info.GetFolder()
if folder != nil {
// cache the directory ID for later lookups
id := info.GetID()
f.dirCache.Put(remote, id)
d := fs.NewDir(remote, time.Time(info.GetLastModifiedDateTime())).SetID(id)
d.SetItems(folder.ChildCount)
entries = append(entries, d)
} else {
o, err := f.newObjectWithInfo(ctx, remote, info)
if err != nil {
iErr = err
return true
}
entries = append(entries, o)
}
return false
return err
})
if err != nil {
return nil, err
}
if iErr != nil {
return nil, iErr
}
return entries, nil
}
// ListR lists the objects and directories of the Fs starting
// from dir recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
//
// Don't implement this unless you have a more efficient way
// of listing recursively than doing a directory traversal.
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
// Make sure this ID is in the directory cache
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
if err != nil {
return err
}
// ListR only works at the root of a onedrive, not on a folder
// So we have to filter things outside of the root which is
// inefficient.
list := walk.NewListRHelper(callback)
// list a folder conventionally - used for shared folders
var listFolder func(dir string) error
listFolder = func(dir string) error {
entries, err := f.List(ctx, dir)
if err != nil {
return err
}
for _, entry := range entries {
err = list.Add(entry)
if err != nil {
return err
}
if _, isDir := entry.(fs.Directory); isDir {
err = listFolder(entry.Remote())
if err != nil {
return err
}
}
}
return nil
}
// This code relies on the fact that directories are sent before their children. This isn't
// mentioned in the docs though, so maybe it shouldn't be relied on.
seen := map[string]struct{}{}
fn := func(info *api.Item) error {
var parentPath string
var ok bool
id := info.GetID()
// The API can produce duplicates, so skip them
if _, found := seen[id]; found {
return nil
}
seen[id] = struct{}{}
// Skip the root directory
if id == directoryID {
return nil
}
// Skip deleted items
if info.Deleted != nil {
return nil
}
dirID := info.GetParentReference().GetID()
// Skip files that don't have their parent directory
// cached as they are outside the root.
parentPath, ok = f.dirCache.GetInv(dirID)
if !ok {
return nil
}
// Skip files not under the root directory
remote := path.Join(parentPath, info.GetName())
if dir != "" && !strings.HasPrefix(remote, dir+"/") {
return nil
}
entry, err := f.itemToDirEntry(ctx, parentPath, info)
if err != nil {
return err
}
err = list.Add(entry)
if err != nil {
return err
}
// If this is a shared folder, we'll need list it too
if info.RemoteItem != nil && info.RemoteItem.Folder != nil {
fs.Debugf(remote, "Listing shared directory")
return listFolder(remote)
}
return nil
}
opts := rest.Opts{
Method: "GET",
Path: "/root/delta",
Parameters: map[string][]string{
// "token": {token},
"$top": {fmt.Sprintf("%d", f.opt.ListChunk)},
},
}
var result api.DeltaResponse
err = f._listAll(ctx, "", false, false, fn, &opts, &result, &result.Value, &result.NextLink)
if err != nil {
return err
}
return list.Flush()
}
// Creates from the parameters passed in a half finished Object which
// must have setMetaData called on it
//
@@ -1282,15 +1438,12 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
}
if check {
// check to see if there are any items
found, err := f.listAll(ctx, rootID, false, false, func(item *api.Item) bool {
return true
err := f.listAll(ctx, rootID, false, false, func(item *api.Item) error {
return fs.ErrorDirectoryNotEmpty
})
if err != nil {
return err
}
if found {
return fs.ErrorDirectoryNotEmpty
}
}
err = f.deleteObject(ctx, rootID)
if err != nil {
@@ -2475,7 +2628,7 @@ func (f *Fs) changeNotifyStartPageToken(ctx context.Context) (nextDeltaToken str
return
}
func (f *Fs) changeNotifyNextChange(ctx context.Context, token string) (delta deltaResponse, err error) {
func (f *Fs) changeNotifyNextChange(ctx context.Context, token string) (delta api.DeltaResponse, err error) {
opts := f.buildDriveDeltaOpts(token)
_, err = f.srv.CallJSON(ctx, &opts, nil, &delta)
@@ -2594,6 +2747,7 @@ var (
_ fs.Abouter = (*Fs)(nil)
_ fs.PublicLinker = (*Fs)(nil)
_ fs.CleanUpper = (*Fs)(nil)
_ fs.ListRer = (*Fs)(nil)
_ fs.Object = (*Object)(nil)
_ fs.MimeTyper = &Object{}
_ fs.IDer = &Object{}

View File

@@ -42,9 +42,10 @@ func init() {
Description: "OpenDrive",
NewFs: NewFs,
Options: []fs.Option{{
Name: "username",
Help: "Username.",
Required: true,
Name: "username",
Help: "Username.",
Required: true,
Sensitive: true,
}, {
Name: "password",
Help: "Password.",
@@ -766,6 +767,17 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
return f.shouldRetry(ctx, resp, err)
})
if err != nil {
if apiError, ok := err.(*Error); ok {
// Work around a bug maybe in opendrive or maybe in rclone.
//
// We should know whether the folder exists or not by the call to
// FindDir above so exactly why it is not found here is a mystery.
//
// This manifests as a failure in fs/sync TestSyncOverlapWithFilter
if apiError.Info.Message == "Folder is already deleted" {
return fs.DirEntries{}, nil
}
}
return nil, fmt.Errorf("failed to get folder list: %w", err)
}

View File

@@ -13,7 +13,6 @@ import (
"github.com/oracle/oci-go-sdk/v65/common"
"github.com/oracle/oci-go-sdk/v65/objectstorage"
"github.com/oracle/oci-go-sdk/v65/objectstorage/transfer"
)
const (
@@ -128,18 +127,3 @@ func useBYOKCopyObject(fs *Fs, request *objectstorage.CopyObjectRequest) {
request.OpcSseCustomerKeySha256 = common.String(fs.opt.SSECustomerKeySha256)
}
}
func useBYOKUpload(fs *Fs, request *transfer.UploadRequest) {
if fs.opt.SSEKMSKeyID != "" {
request.OpcSseKmsKeyId = common.String(fs.opt.SSEKMSKeyID)
}
if fs.opt.SSECustomerAlgorithm != "" {
request.OpcSseCustomerAlgorithm = common.String(fs.opt.SSECustomerAlgorithm)
}
if fs.opt.SSECustomerKey != "" {
request.OpcSseCustomerKey = common.String(fs.opt.SSECustomerKey)
}
if fs.opt.SSECustomerKeySha256 != "" {
request.OpcSseCustomerKeySha256 = common.String(fs.opt.SSECustomerKeySha256)
}
}

View File

@@ -6,6 +6,7 @@ package oracleobjectstorage
import (
"context"
"fmt"
"sort"
"strings"
"time"
@@ -196,6 +197,32 @@ func (f *Fs) listMultipartUploadsAll(ctx context.Context) (uploadsMap map[string
// for "dir" and it returns "dirKey"
func (f *Fs) listMultipartUploads(ctx context.Context, bucketName, directory string) (
uploads []*objectstorage.MultipartUpload, err error) {
return f.listMultipartUploadsObject(ctx, bucketName, directory, false)
}
// listMultipartUploads finds first outstanding multipart uploads for (bucket, key)
//
// Note that rather lazily we treat key as a prefix, so it matches
// directories and objects. This could surprise the user if they ask
// for "dir" and it returns "dirKey"
func (f *Fs) findLatestMultipartUpload(ctx context.Context, bucketName, directory string) (
uploads []*objectstorage.MultipartUpload, err error) {
pastUploads, err := f.listMultipartUploadsObject(ctx, bucketName, directory, true)
if err != nil {
return nil, err
}
if len(pastUploads) > 0 {
sort.Slice(pastUploads, func(i, j int) bool {
return pastUploads[i].TimeCreated.After(pastUploads[j].TimeCreated.Time)
})
return pastUploads[:1], nil
}
return nil, err
}
func (f *Fs) listMultipartUploadsObject(ctx context.Context, bucketName, directory string, exact bool) (
uploads []*objectstorage.MultipartUpload, err error) {
uploads = []*objectstorage.MultipartUpload{}
req := objectstorage.ListMultipartUploadsRequest{
@@ -217,7 +244,13 @@ func (f *Fs) listMultipartUploads(ctx context.Context, bucketName, directory str
if directory != "" && item.Object != nil && !strings.HasPrefix(*item.Object, directory) {
continue
}
uploads = append(uploads, &response.Items[index])
if exact {
if *item.Object == directory {
uploads = append(uploads, &response.Items[index])
}
} else {
uploads = append(uploads, &response.Items[index])
}
}
if response.OpcNextPage == nil {
break
@@ -226,3 +259,34 @@ func (f *Fs) listMultipartUploads(ctx context.Context, bucketName, directory str
}
return uploads, nil
}
func (f *Fs) listMultipartUploadParts(ctx context.Context, bucketName, bucketPath string, uploadID string) (
uploadedParts map[int]objectstorage.MultipartUploadPartSummary, err error) {
uploadedParts = make(map[int]objectstorage.MultipartUploadPartSummary)
req := objectstorage.ListMultipartUploadPartsRequest{
NamespaceName: common.String(f.opt.Namespace),
BucketName: common.String(bucketName),
ObjectName: common.String(bucketPath),
UploadId: common.String(uploadID),
Limit: common.Int(1000),
}
var response objectstorage.ListMultipartUploadPartsResponse
for {
err = f.pacer.Call(func() (bool, error) {
response, err = f.srv.ListMultipartUploadParts(ctx, req)
return shouldRetry(ctx, response.HTTPResponse(), err)
})
if err != nil {
return uploadedParts, err
}
for _, item := range response.Items {
uploadedParts[*item.PartNumber] = item
}
if response.OpcNextPage == nil {
break
}
req.Page = response.OpcNextPage
}
return uploadedParts, nil
}

View File

@@ -0,0 +1,441 @@
//go:build !plan9 && !solaris && !js
// +build !plan9,!solaris,!js
package oracleobjectstorage
import (
"context"
"crypto/md5"
"encoding/base64"
"encoding/hex"
"fmt"
"io"
"strings"
"sync"
"time"
"github.com/ncw/swift/v2"
"github.com/rclone/rclone/lib/multipart"
"github.com/rclone/rclone/lib/pool"
"golang.org/x/net/http/httpguts"
"github.com/oracle/oci-go-sdk/v65/common"
"github.com/oracle/oci-go-sdk/v65/objectstorage"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/chunksize"
"github.com/rclone/rclone/fs/hash"
)
var warnStreamUpload sync.Once
// Info needed for an upload
type uploadInfo struct {
req *objectstorage.PutObjectRequest
md5sumHex string
}
type objectChunkWriter struct {
chunkSize int64
size int64
f *Fs
bucket *string
key *string
uploadID *string
partsToCommit []objectstorage.CommitMultipartUploadPartDetails
partsToCommitMu sync.Mutex
existingParts map[int]objectstorage.MultipartUploadPartSummary
eTag string
md5sMu sync.Mutex
md5s []byte
ui uploadInfo
o *Object
}
func (o *Object) uploadMultipart(ctx context.Context, src fs.ObjectInfo, in io.Reader, options ...fs.OpenOption) error {
_, err := multipart.UploadMultipart(ctx, src, in, multipart.UploadMultipartOptions{
Open: o.fs,
OpenOptions: options,
})
return err
}
// OpenChunkWriter returns the chunk size and a ChunkWriter
//
// Pass in the remote and the src object
// You can also use options to hint at the desired chunk size
func (f *Fs) OpenChunkWriter(
ctx context.Context,
remote string,
src fs.ObjectInfo,
options ...fs.OpenOption) (info fs.ChunkWriterInfo, writer fs.ChunkWriter, err error) {
// Temporary Object under construction
o := &Object{
fs: f,
remote: remote,
}
ui, err := o.prepareUpload(ctx, src, options)
if err != nil {
return info, nil, fmt.Errorf("failed to prepare upload: %w", err)
}
uploadParts := f.opt.MaxUploadParts
if uploadParts < 1 {
uploadParts = 1
} else if uploadParts > maxUploadParts {
uploadParts = maxUploadParts
}
size := src.Size()
// calculate size of parts
chunkSize := f.opt.ChunkSize
// size can be -1 here meaning we don't know the size of the incoming file. We use ChunkSize
// buffers here (default 5 MiB). With a maximum number of parts (10,000) this will be a file of
// 48 GiB which seems like a not too unreasonable limit.
if size == -1 {
warnStreamUpload.Do(func() {
fs.Logf(f, "Streaming uploads using chunk size %v will have maximum file size of %v",
f.opt.ChunkSize, fs.SizeSuffix(int64(chunkSize)*int64(uploadParts)))
})
} else {
chunkSize = chunksize.Calculator(src, size, uploadParts, chunkSize)
}
uploadID, existingParts, err := o.createMultipartUpload(ctx, ui.req)
if err != nil {
return info, nil, fmt.Errorf("create multipart upload request failed: %w", err)
}
bucketName, bucketPath := o.split()
chunkWriter := &objectChunkWriter{
chunkSize: int64(chunkSize),
size: size,
f: f,
bucket: &bucketName,
key: &bucketPath,
uploadID: &uploadID,
existingParts: existingParts,
ui: ui,
o: o,
}
info = fs.ChunkWriterInfo{
ChunkSize: int64(chunkSize),
Concurrency: o.fs.opt.UploadConcurrency,
LeavePartsOnError: o.fs.opt.LeavePartsOnError,
}
fs.Debugf(o, "open chunk writer: started multipart upload: %v", uploadID)
return info, chunkWriter, err
}
// WriteChunk will write chunk number with reader bytes, where chunk number >= 0
func (w *objectChunkWriter) WriteChunk(ctx context.Context, chunkNumber int, reader io.ReadSeeker) (bytesWritten int64, err error) {
if chunkNumber < 0 {
err := fmt.Errorf("invalid chunk number provided: %v", chunkNumber)
return -1, err
}
// Only account after the checksum reads have been done
if do, ok := reader.(pool.DelayAccountinger); ok {
// To figure out this number, do a transfer and if the accounted size is 0 or a
// multiple of what it should be, increase or decrease this number.
do.DelayAccounting(2)
}
m := md5.New()
currentChunkSize, err := io.Copy(m, reader)
if err != nil {
return -1, err
}
// If no data read, don't write the chunk
if currentChunkSize == 0 {
return 0, nil
}
md5sumBinary := m.Sum([]byte{})
w.addMd5(&md5sumBinary, int64(chunkNumber))
md5sum := base64.StdEncoding.EncodeToString(md5sumBinary[:])
// Object storage requires 1 <= PartNumber <= 10000
ossPartNumber := chunkNumber + 1
if existing, ok := w.existingParts[ossPartNumber]; ok {
if md5sum == *existing.Md5 {
fs.Debugf(w.o, "matched uploaded part found, part num %d, skipping part, md5=%v", *existing.PartNumber, md5sum)
w.addCompletedPart(existing.PartNumber, existing.Etag)
return currentChunkSize, nil
}
}
req := objectstorage.UploadPartRequest{
NamespaceName: common.String(w.f.opt.Namespace),
BucketName: w.bucket,
ObjectName: w.key,
UploadId: w.uploadID,
UploadPartNum: common.Int(ossPartNumber),
ContentLength: common.Int64(currentChunkSize),
ContentMD5: common.String(md5sum),
}
w.o.applyPartUploadOptions(w.ui.req, &req)
var resp objectstorage.UploadPartResponse
err = w.f.pacer.Call(func() (bool, error) {
// req.UploadPartBody = io.NopCloser(bytes.NewReader(buf))
// rewind the reader on retry and after reading md5
_, err = reader.Seek(0, io.SeekStart)
if err != nil {
return false, err
}
req.UploadPartBody = io.NopCloser(reader)
resp, err = w.f.srv.UploadPart(ctx, req)
if err != nil {
if ossPartNumber <= 8 {
return shouldRetry(ctx, resp.HTTPResponse(), err)
}
// retry all chunks once have done the first few
return true, err
}
return false, err
})
if err != nil {
fs.Errorf(w.o, "multipart upload failed to upload part:%d err: %v", ossPartNumber, err)
return -1, fmt.Errorf("multipart upload failed to upload part: %w", err)
}
w.addCompletedPart(&ossPartNumber, resp.ETag)
return currentChunkSize, err
}
// add a part number and etag to the completed parts
func (w *objectChunkWriter) addCompletedPart(partNum *int, eTag *string) {
w.partsToCommitMu.Lock()
defer w.partsToCommitMu.Unlock()
w.partsToCommit = append(w.partsToCommit, objectstorage.CommitMultipartUploadPartDetails{
PartNum: partNum,
Etag: eTag,
})
}
func (w *objectChunkWriter) Close(ctx context.Context) (err error) {
req := objectstorage.CommitMultipartUploadRequest{
NamespaceName: common.String(w.f.opt.Namespace),
BucketName: w.bucket,
ObjectName: w.key,
UploadId: w.uploadID,
}
req.PartsToCommit = w.partsToCommit
var resp objectstorage.CommitMultipartUploadResponse
err = w.f.pacer.Call(func() (bool, error) {
resp, err = w.f.srv.CommitMultipartUpload(ctx, req)
// if multipart is corrupted, we will abort the uploadId
if isMultiPartUploadCorrupted(err) {
fs.Debugf(w.o, "multipart uploadId %v is corrupted, aborting...", *w.uploadID)
_ = w.Abort(ctx)
return false, err
}
return shouldRetry(ctx, resp.HTTPResponse(), err)
})
if err != nil {
return err
}
w.eTag = *resp.ETag
hashOfHashes := md5.Sum(w.md5s)
wantMultipartMd5 := fmt.Sprintf("%s-%d", base64.StdEncoding.EncodeToString(hashOfHashes[:]), len(w.partsToCommit))
gotMultipartMd5 := *resp.OpcMultipartMd5
if wantMultipartMd5 != gotMultipartMd5 {
fs.Errorf(w.o, "multipart upload corrupted: multipart md5 differ: expecting %s but got %s", wantMultipartMd5, gotMultipartMd5)
return fmt.Errorf("multipart upload corrupted: md5 differ: expecting %s but got %s", wantMultipartMd5, gotMultipartMd5)
}
fs.Debugf(w.o, "multipart upload %v md5 matched: expecting %s and got %s", *w.uploadID, wantMultipartMd5, gotMultipartMd5)
return nil
}
func isMultiPartUploadCorrupted(err error) bool {
if err == nil {
return false
}
// Check if this oci-err object, and if it is multipart commit error
if ociError, ok := err.(common.ServiceError); ok {
// If it is a timeout then we want to retry that
if ociError.GetCode() == "InvalidUploadPart" {
return true
}
}
return false
}
func (w *objectChunkWriter) Abort(ctx context.Context) error {
fs.Debugf(w.o, "Cancelling multipart upload")
err := w.o.fs.abortMultiPartUpload(
ctx,
w.bucket,
w.key,
w.uploadID)
if err != nil {
fs.Debugf(w.o, "Failed to cancel multipart upload: %v", err)
} else {
fs.Debugf(w.o, "canceled and aborted multipart upload: %v", *w.uploadID)
}
return err
}
// addMd5 adds a binary md5 to the md5 calculated so far
func (w *objectChunkWriter) addMd5(md5binary *[]byte, chunkNumber int64) {
w.md5sMu.Lock()
defer w.md5sMu.Unlock()
start := chunkNumber * md5.Size
end := start + md5.Size
if extend := end - int64(len(w.md5s)); extend > 0 {
w.md5s = append(w.md5s, make([]byte, extend)...)
}
copy(w.md5s[start:end], (*md5binary)[:])
}
func (o *Object) prepareUpload(ctx context.Context, src fs.ObjectInfo, options []fs.OpenOption) (ui uploadInfo, err error) {
bucket, bucketPath := o.split()
ui.req = &objectstorage.PutObjectRequest{
NamespaceName: common.String(o.fs.opt.Namespace),
BucketName: common.String(bucket),
ObjectName: common.String(bucketPath),
}
// Set the mtime in the metadata
modTime := src.ModTime(ctx)
// Fetch metadata if --metadata is in use
meta, err := fs.GetMetadataOptions(ctx, o.fs, src, options)
if err != nil {
return ui, fmt.Errorf("failed to read metadata from source object: %w", err)
}
ui.req.OpcMeta = make(map[string]string, len(meta)+2)
// merge metadata into request and user metadata
for k, v := range meta {
pv := common.String(v)
k = strings.ToLower(k)
switch k {
case "cache-control":
ui.req.CacheControl = pv
case "content-disposition":
ui.req.ContentDisposition = pv
case "content-encoding":
ui.req.ContentEncoding = pv
case "content-language":
ui.req.ContentLanguage = pv
case "content-type":
ui.req.ContentType = pv
case "tier":
// ignore
case "mtime":
// mtime in meta overrides source ModTime
metaModTime, err := time.Parse(time.RFC3339Nano, v)
if err != nil {
fs.Debugf(o, "failed to parse metadata %s: %q: %v", k, v, err)
} else {
modTime = metaModTime
}
case "btime":
// write as metadata since we can't set it
ui.req.OpcMeta[k] = v
default:
ui.req.OpcMeta[k] = v
}
}
// Set the mtime in the metadata
ui.req.OpcMeta[metaMtime] = swift.TimeToFloatString(modTime)
// read the md5sum if available
// - for non-multipart
// - so we can add a ContentMD5
// - so we can add the md5sum in the metadata as metaMD5Hash if using SSE/SSE-C
// - for multipart provided checksums aren't disabled
// - so we can add the md5sum in the metadata as metaMD5Hash
size := src.Size()
isMultipart := size < 0 || size >= int64(o.fs.opt.UploadCutoff)
var md5sumBase64 string
if !isMultipart || !o.fs.opt.DisableChecksum {
ui.md5sumHex, err = src.Hash(ctx, hash.MD5)
if err == nil && matchMd5.MatchString(ui.md5sumHex) {
hashBytes, err := hex.DecodeString(ui.md5sumHex)
if err == nil {
md5sumBase64 = base64.StdEncoding.EncodeToString(hashBytes)
if isMultipart && !o.fs.opt.DisableChecksum {
// Set the md5sum as metadata on the object if
// - a multipart upload
// - the ETag is not an MD5, e.g. when using SSE/SSE-C
// provided checksums aren't disabled
ui.req.OpcMeta[metaMD5Hash] = md5sumBase64
}
}
}
}
// Set the content type if it isn't set already
if ui.req.ContentType == nil {
ui.req.ContentType = common.String(fs.MimeType(ctx, src))
}
if size >= 0 {
ui.req.ContentLength = common.Int64(size)
}
if md5sumBase64 != "" {
ui.req.ContentMD5 = &md5sumBase64
}
o.applyPutOptions(ui.req, options...)
useBYOKPutObject(o.fs, ui.req)
if o.fs.opt.StorageTier != "" {
storageTier, ok := objectstorage.GetMappingPutObjectStorageTierEnum(o.fs.opt.StorageTier)
if !ok {
return ui, fmt.Errorf("not a valid storage tier: %v", o.fs.opt.StorageTier)
}
ui.req.StorageTier = storageTier
}
// Check metadata keys and values are valid
for key, value := range ui.req.OpcMeta {
if !httpguts.ValidHeaderFieldName(key) {
fs.Errorf(o, "Dropping invalid metadata key %q", key)
delete(ui.req.OpcMeta, key)
} else if value == "" {
fs.Errorf(o, "Dropping nil metadata value for key %q", key)
delete(ui.req.OpcMeta, key)
} else if !httpguts.ValidHeaderFieldValue(value) {
fs.Errorf(o, "Dropping invalid metadata value %q for key %q", value, key)
delete(ui.req.OpcMeta, key)
}
}
return ui, nil
}
func (o *Object) createMultipartUpload(ctx context.Context, putReq *objectstorage.PutObjectRequest) (
uploadID string, existingParts map[int]objectstorage.MultipartUploadPartSummary, err error) {
bucketName, bucketPath := o.split()
f := o.fs
if f.opt.AttemptResumeUpload {
fs.Debugf(o, "attempting to resume upload for %v (if any)", o.remote)
resumeUploads, err := o.fs.findLatestMultipartUpload(ctx, bucketName, bucketPath)
if err == nil && len(resumeUploads) > 0 {
uploadID = *resumeUploads[0].UploadId
existingParts, err = f.listMultipartUploadParts(ctx, bucketName, bucketPath, uploadID)
if err == nil {
fs.Debugf(o, "resuming with existing upload id: %v", uploadID)
return uploadID, existingParts, err
}
}
}
req := objectstorage.CreateMultipartUploadRequest{
NamespaceName: common.String(o.fs.opt.Namespace),
BucketName: common.String(bucketName),
}
req.Object = common.String(bucketPath)
if o.fs.opt.StorageTier != "" {
storageTier, ok := objectstorage.GetMappingStorageTierEnum(o.fs.opt.StorageTier)
if !ok {
return "", nil, fmt.Errorf("not a valid storage tier: %v", o.fs.opt.StorageTier)
}
req.StorageTier = storageTier
}
o.applyMultipartUploadOptions(putReq, &req)
var resp objectstorage.CreateMultipartUploadResponse
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.CreateMultipartUpload(ctx, req)
return shouldRetry(ctx, resp.HTTPResponse(), err)
})
if err != nil {
return "", existingParts, err
}
existingParts = make(map[int]objectstorage.MultipartUploadPartSummary)
uploadID = *resp.UploadId
fs.Debugf(o, "created new upload id: %v", uploadID)
return uploadID, existingParts, err
}

View File

@@ -4,12 +4,14 @@
package oracleobjectstorage
import (
"bytes"
"context"
"encoding/base64"
"encoding/hex"
"fmt"
"io"
"net/http"
"os"
"regexp"
"strconv"
"strings"
@@ -18,10 +20,8 @@ import (
"github.com/ncw/swift/v2"
"github.com/oracle/oci-go-sdk/v65/common"
"github.com/oracle/oci-go-sdk/v65/objectstorage"
"github.com/oracle/oci-go-sdk/v65/objectstorage/transfer"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/atexit"
)
// ------------------------------------------------------------
@@ -367,9 +367,28 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadClo
return resp.HTTPResponse().Body, nil
}
func isZeroLength(streamReader io.Reader) bool {
switch v := streamReader.(type) {
case *bytes.Buffer:
return v.Len() == 0
case *bytes.Reader:
return v.Len() == 0
case *strings.Reader:
return v.Len() == 0
case *os.File:
fi, err := v.Stat()
if err != nil {
return false
}
return fi.Size() == 0
default:
return false
}
}
// Update an object if it has changed
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
bucketName, bucketPath := o.split()
bucketName, _ := o.split()
err = o.fs.makeBucket(ctx, bucketName)
if err != nil {
return err
@@ -377,142 +396,24 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// determine if we like upload single or multipart.
size := src.Size()
multipart := size >= int64(o.fs.opt.UploadCutoff)
// Set the mtime in the metadata
modTime := src.ModTime(ctx)
metadata := map[string]string{
metaMtime: swift.TimeToFloatString(modTime),
multipart := size < 0 || size >= int64(o.fs.opt.UploadCutoff)
if isZeroLength(in) {
multipart = false
}
// read the md5sum if available
// - for non-multipart
// - so we can add a ContentMD5
// - so we can add the md5sum in the metadata as metaMD5Hash if using SSE/SSE-C
// - for multipart provided checksums aren't disabled
// - so we can add the md5sum in the metadata as metaMD5Hash
var md5sumBase64 string
var md5sumHex string
if !multipart || !o.fs.opt.DisableChecksum {
md5sumHex, err = src.Hash(ctx, hash.MD5)
if err == nil && matchMd5.MatchString(md5sumHex) {
hashBytes, err := hex.DecodeString(md5sumHex)
if err == nil {
md5sumBase64 = base64.StdEncoding.EncodeToString(hashBytes)
if multipart && !o.fs.opt.DisableChecksum {
// Set the md5sum as metadata on the object if
// - a multipart upload
// - the ETag is not an MD5, e.g. when using SSE/SSE-C
// provided checksums aren't disabled
metadata[metaMD5Hash] = md5sumBase64
}
}
}
}
// Guess the content type
mimeType := fs.MimeType(ctx, src)
if multipart {
chunkSize := int64(o.fs.opt.ChunkSize)
uploadRequest := transfer.UploadRequest{
NamespaceName: common.String(o.fs.opt.Namespace),
BucketName: common.String(bucketName),
ObjectName: common.String(bucketPath),
ContentType: common.String(mimeType),
PartSize: common.Int64(chunkSize),
AllowMultipartUploads: common.Bool(true),
AllowParrallelUploads: common.Bool(true),
ObjectStorageClient: o.fs.srv,
EnableMultipartChecksumVerification: common.Bool(!o.fs.opt.DisableChecksum),
NumberOfGoroutines: common.Int(o.fs.opt.UploadConcurrency),
Metadata: metadataWithOpcPrefix(metadata),
}
if o.fs.opt.StorageTier != "" {
storageTier, ok := objectstorage.GetMappingPutObjectStorageTierEnum(o.fs.opt.StorageTier)
if !ok {
return fmt.Errorf("not a valid storage tier: %v", o.fs.opt.StorageTier)
}
uploadRequest.StorageTier = storageTier
}
o.applyMultiPutOptions(&uploadRequest, options...)
useBYOKUpload(o.fs, &uploadRequest)
uploadStreamRequest := transfer.UploadStreamRequest{
UploadRequest: uploadRequest,
StreamReader: in,
}
uploadMgr := transfer.NewUploadManager()
var uploadID = ""
defer atexit.OnError(&err, func() {
if uploadID == "" {
return
}
if o.fs.opt.LeavePartsOnError {
return
}
fs.Debugf(o, "Cancelling multipart upload")
errCancel := o.fs.abortMultiPartUpload(
context.Background(),
bucketName,
bucketPath,
uploadID)
if errCancel != nil {
fs.Debugf(o, "Failed to cancel multipart upload: %v", errCancel)
}
})()
err = o.fs.pacer.Call(func() (bool, error) {
uploadResponse, err := uploadMgr.UploadStream(ctx, uploadStreamRequest)
var httpResponse *http.Response
if err == nil {
if uploadResponse.Type == transfer.MultipartUpload {
if uploadResponse.MultipartUploadResponse != nil {
httpResponse = uploadResponse.MultipartUploadResponse.HTTPResponse()
}
} else {
if uploadResponse.SinglepartUploadResponse != nil {
httpResponse = uploadResponse.SinglepartUploadResponse.HTTPResponse()
}
}
}
if err != nil {
uploadID := ""
if uploadResponse.MultipartUploadResponse != nil && uploadResponse.MultipartUploadResponse.UploadID != nil {
uploadID = *uploadResponse.MultipartUploadResponse.UploadID
fs.Debugf(o, "multipart streaming upload failed, aborting uploadID: %v, may retry", uploadID)
_ = o.fs.abortMultiPartUpload(ctx, bucketName, bucketPath, uploadID)
}
}
return shouldRetry(ctx, httpResponse, err)
})
err = o.uploadMultipart(ctx, src, in, options...)
if err != nil {
fs.Errorf(o, "multipart streaming upload failed %v", err)
return err
}
} else {
req := objectstorage.PutObjectRequest{
NamespaceName: common.String(o.fs.opt.Namespace),
BucketName: common.String(bucketName),
ObjectName: common.String(bucketPath),
ContentType: common.String(mimeType),
PutObjectBody: io.NopCloser(in),
OpcMeta: metadata,
ui, err := o.prepareUpload(ctx, src, options)
if err != nil {
return fmt.Errorf("failed to prepare upload: %w", err)
}
if size >= 0 {
req.ContentLength = common.Int64(size)
}
if o.fs.opt.StorageTier != "" {
storageTier, ok := objectstorage.GetMappingPutObjectStorageTierEnum(o.fs.opt.StorageTier)
if !ok {
return fmt.Errorf("not a valid storage tier: %v", o.fs.opt.StorageTier)
}
req.StorageTier = storageTier
}
o.applyPutOptions(&req, options...)
useBYOKPutObject(o.fs, &req)
var resp objectstorage.PutObjectResponse
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.PutObject(ctx, req)
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
ui.req.PutObjectBody = io.NopCloser(in)
resp, err = o.fs.srv.PutObject(ctx, *ui.req)
return shouldRetry(ctx, resp.HTTPResponse(), err)
})
if err != nil {
@@ -591,28 +492,24 @@ func (o *Object) applyGetObjectOptions(req *objectstorage.GetObjectRequest, opti
}
}
func (o *Object) applyMultiPutOptions(req *transfer.UploadRequest, options ...fs.OpenOption) {
// Apply upload options
for _, option := range options {
key, value := option.Header()
lowerKey := strings.ToLower(key)
switch lowerKey {
case "":
// ignore
case "content-encoding":
req.ContentEncoding = common.String(value)
case "content-language":
req.ContentLanguage = common.String(value)
case "content-type":
req.ContentType = common.String(value)
default:
if strings.HasPrefix(lowerKey, ociMetaPrefix) {
req.Metadata[lowerKey] = value
} else {
fs.Errorf(o, "Don't know how to set key %q on upload", key)
}
}
}
func (o *Object) applyMultipartUploadOptions(putReq *objectstorage.PutObjectRequest, req *objectstorage.CreateMultipartUploadRequest) {
req.ContentType = putReq.ContentType
req.ContentLanguage = putReq.ContentLanguage
req.ContentEncoding = putReq.ContentEncoding
req.ContentDisposition = putReq.ContentDisposition
req.CacheControl = putReq.CacheControl
req.Metadata = metadataWithOpcPrefix(putReq.OpcMeta)
req.OpcSseCustomerAlgorithm = putReq.OpcSseCustomerAlgorithm
req.OpcSseCustomerKey = putReq.OpcSseCustomerKey
req.OpcSseCustomerKeySha256 = putReq.OpcSseCustomerKeySha256
req.OpcSseKmsKeyId = putReq.OpcSseKmsKeyId
}
func (o *Object) applyPartUploadOptions(putReq *objectstorage.PutObjectRequest, req *objectstorage.UploadPartRequest) {
req.OpcSseCustomerAlgorithm = putReq.OpcSseCustomerAlgorithm
req.OpcSseCustomerKey = putReq.OpcSseCustomerKey
req.OpcSseCustomerKeySha256 = putReq.OpcSseCustomerKeySha256
req.OpcSseKmsKeyId = putReq.OpcSseKmsKeyId
}
func metadataWithOpcPrefix(src map[string]string) map[string]string {

View File

@@ -13,9 +13,10 @@ import (
const (
maxSizeForCopy = 4768 * 1024 * 1024
minChunkSize = fs.SizeSuffix(1024 * 1024 * 5)
defaultUploadCutoff = fs.SizeSuffix(200 * 1024 * 1024)
maxUploadParts = 10000
defaultUploadConcurrency = 10
minChunkSize = fs.SizeSuffix(5 * 1024 * 1024)
defaultUploadCutoff = fs.SizeSuffix(200 * 1024 * 1024)
maxUploadCutoff = fs.SizeSuffix(5 * 1024 * 1024 * 1024)
minSleep = 10 * time.Millisecond
defaultCopyTimeoutDuration = fs.Duration(time.Minute)
@@ -55,12 +56,14 @@ type Options struct {
ConfigProfile string `config:"config_profile"`
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
ChunkSize fs.SizeSuffix `config:"chunk_size"`
MaxUploadParts int `config:"max_upload_parts"`
UploadConcurrency int `config:"upload_concurrency"`
DisableChecksum bool `config:"disable_checksum"`
CopyCutoff fs.SizeSuffix `config:"copy_cutoff"`
CopyTimeout fs.Duration `config:"copy_timeout"`
StorageTier string `config:"storage_tier"`
LeavePartsOnError bool `config:"leave_parts_on_error"`
AttemptResumeUpload bool `config:"attempt_resume_upload"`
NoCheckBucket bool `config:"no_check_bucket"`
SSEKMSKeyID string `config:"sse_kms_key_id"`
SSECustomerAlgorithm string `config:"sse_customer_algorithm"`
@@ -92,14 +95,16 @@ func newOptions() []fs.Option {
Help: noAuthHelpText,
}},
}, {
Name: "namespace",
Help: "Object storage namespace",
Required: true,
Name: "namespace",
Help: "Object storage namespace",
Required: true,
Sensitive: true,
}, {
Name: "compartment",
Help: "Object storage compartment OCID",
Provider: "!no_auth",
Required: true,
Name: "compartment",
Help: "Object storage compartment OCID",
Provider: "!no_auth",
Required: true,
Sensitive: true,
}, {
Name: "region",
Help: "Object storage Region",
@@ -155,9 +160,8 @@ The minimum is 0 and the maximum is 5 GiB.`,
Help: `Chunk size to use for uploading.
When uploading files larger than upload_cutoff or files with unknown
size (e.g. from "rclone rcat" or uploaded with "rclone mount" or google
photos or google docs) they will be uploaded as multipart uploads
using this chunk size.
size (e.g. from "rclone rcat" or uploaded with "rclone mount" they will be uploaded
as multipart uploads using this chunk size.
Note that "upload_concurrency" chunks of this size are buffered
in memory per transfer.
@@ -179,6 +183,20 @@ statistics displayed with "-P" flag.
`,
Default: minChunkSize,
Advanced: true,
}, {
Name: "max_upload_parts",
Help: `Maximum number of parts in a multipart upload.
This option defines the maximum number of multipart chunks to use
when doing a multipart upload.
OCI has max parts limit of 10,000 chunks.
Rclone will automatically increase the chunk size when uploading a
large file of a known size to stay below this number of chunks limit.
`,
Default: maxUploadParts,
Advanced: true,
}, {
Name: "upload_concurrency",
Help: `Concurrency for multipart uploads.
@@ -236,12 +254,24 @@ to start uploading.`,
encoder.EncodeDot,
}, {
Name: "leave_parts_on_error",
Help: `If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery.
Help: `If true avoid calling abort upload on a failure, leaving all successfully uploaded parts for manual recovery.
It should be set to true for resuming uploads across different sessions.
WARNING: Storing parts of an incomplete multipart upload counts towards space usage on object storage and will add
additional costs if not cleaned up.
`,
Default: false,
Advanced: true,
}, {
Name: "attempt_resume_upload",
Help: `If true attempt to resume previously started multipart upload for the object.
This will be helpful to speed up multipart transfers by resuming uploads from past session.
WARNING: If chunk size differs in resumed session from past incomplete session, then the resumed multipart upload is
aborted and a new multipart upload is started with the new chunk size.
The flag leave_parts_on_error must be true to resume and optimize to skip parts that were already uploaded successfully.
`,
Default: false,
Advanced: true,

View File

@@ -138,6 +138,14 @@ func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
return
}
func (f *Fs) setCopyCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
err = checkUploadChunkSize(cs)
if err == nil {
old, f.opt.CopyCutoff = f.opt.CopyCutoff, cs
}
return
}
// ------------------------------------------------------------
// Implement backed that represents a remote object storage server
// Fs is the interface a cloud storage system must provide
@@ -318,7 +326,6 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
remote := *object.Name
remote = f.opt.Enc.ToStandardPath(remote)
if !strings.HasPrefix(remote, prefix) {
// fs.Debugf(f, "Odd name received %v", object.Name)
continue
}
remote = remote[len(prefix):]
@@ -558,15 +565,15 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
})
}
func (f *Fs) abortMultiPartUpload(ctx context.Context, bucketName, bucketPath, uploadID string) (err error) {
if uploadID == "" {
func (f *Fs) abortMultiPartUpload(ctx context.Context, bucketName, bucketPath, uploadID *string) (err error) {
if uploadID == nil || *uploadID == "" {
return nil
}
request := objectstorage.AbortMultipartUploadRequest{
NamespaceName: common.String(f.opt.Namespace),
BucketName: common.String(bucketName),
ObjectName: common.String(bucketPath),
UploadId: common.String(uploadID),
BucketName: bucketName,
ObjectName: bucketPath,
UploadId: uploadID,
}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.srv.AbortMultipartUpload(ctx, request)
@@ -589,7 +596,7 @@ func (f *Fs) cleanUpBucket(ctx context.Context, bucket string, maxAge time.Durat
if operations.SkipDestructive(ctx, what, "remove pending upload") {
continue
}
_ = f.abortMultiPartUpload(ctx, *upload.Bucket, *upload.Object, *upload.UploadId)
_ = f.abortMultiPartUpload(ctx, upload.Bucket, upload.Object, upload.UploadId)
}
} else {
fs.Infof(f, "MultipartUpload doesn't have sufficient details to abort.")
@@ -684,12 +691,13 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
// Check the interfaces are satisfied
var (
_ fs.Fs = &Fs{}
_ fs.Copier = &Fs{}
_ fs.PutStreamer = &Fs{}
_ fs.ListRer = &Fs{}
_ fs.Commander = &Fs{}
_ fs.CleanUpper = &Fs{}
_ fs.Fs = &Fs{}
_ fs.Copier = &Fs{}
_ fs.PutStreamer = &Fs{}
_ fs.ListRer = &Fs{}
_ fs.Commander = &Fs{}
_ fs.CleanUpper = &Fs{}
_ fs.OpenChunkWriter = &Fs{}
_ fs.Object = &Object{}
_ fs.MimeTyper = &Object{}

View File

@@ -30,4 +30,12 @@ func (f *Fs) SetUploadCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setUploadCutoff(cs)
}
var _ fstests.SetUploadChunkSizer = (*Fs)(nil)
func (f *Fs) SetCopyCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setCopyCutoff(cs)
}
var (
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
_ fstests.SetUploadCutoffer = (*Fs)(nil)
_ fstests.SetCopyCutoffer = (*Fs)(nil)
)

View File

@@ -110,10 +110,11 @@ func init() {
encoder.EncodeBackSlash |
encoder.EncodeInvalidUtf8),
}, {
Name: "root_folder_id",
Help: "Fill in for rclone to use a non root folder as its starting point.",
Default: "d0",
Advanced: true,
Name: "root_folder_id",
Help: "Fill in for rclone to use a non root folder as its starting point.",
Default: "d0",
Advanced: true,
Sensitive: true,
}, {
Name: "hostname",
Help: `Hostname to connect to.
@@ -138,7 +139,8 @@ with rclone authorize.
This is only required when you want to use the cleanup command. Due to a bug
in the pcloud API the required API does not support OAuth authentication so
we have to rely on user password authentication for it.`,
Advanced: true,
Advanced: true,
Sensitive: true,
}, {
Name: "password",
Help: "Your pcloud password.",

View File

@@ -158,9 +158,10 @@ func init() {
return nil, fmt.Errorf("unknown state %q", config.State)
},
Options: append(pikpakOAuthOptions(), []fs.Option{{
Name: "user",
Help: "Pikpak username.",
Required: true,
Name: "user",
Help: "Pikpak username.",
Required: true,
Sensitive: true,
}, {
Name: "pass",
Help: "Pikpak password.",
@@ -173,7 +174,8 @@ Leave blank normally.
Fill in for rclone to use a non root folder as its starting point.
`,
Advanced: true,
Advanced: true,
Sensitive: true,
}, {
Name: "use_trash",
Default: true,
@@ -1213,7 +1215,7 @@ func (f *Fs) upload(ctx context.Context, in io.Reader, leaf, dirID, sha1Str stri
return nil, fmt.Errorf("failed to upload: %w", err)
}
// refresh uploaded file info
// Compared to `newfile.File` this upgrades several feilds...
// Compared to `newfile.File` this upgrades several fields...
// audit, links, modified_time, phase, revision, and web_content_link
return f.getFile(ctx, newfile.File.ID)
}

View File

@@ -82,14 +82,15 @@ func init() {
OAuth2Config: oauthConfig,
})
},
Options: []fs.Option{{
Options: append(oauthutil.SharedOptions, []fs.Option{{
Name: "api_key",
Help: `API Key.
This is not normally used - use oauth instead.
`,
Hide: fs.OptionHideBoth,
Default: "",
Hide: fs.OptionHideBoth,
Default: "",
Sensitive: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
@@ -99,7 +100,7 @@ This is not normally used - use oauth instead.
encoder.EncodeBackSlash |
encoder.EncodeDoubleQuote |
encoder.EncodeInvalidUtf8),
}},
}}...),
})
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,16 @@
package protondrive_test
import (
"testing"
"github.com/rclone/rclone/backend/protondrive"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestProtonDrive:",
NilObject: (*protondrive.Object)(nil),
})
}

View File

@@ -67,7 +67,7 @@ func init() {
NoOffline: true,
})
},
Options: []fs.Option{{
Options: append(oauthutil.SharedOptions, []fs.Option{{
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
Advanced: true,
@@ -77,7 +77,7 @@ func init() {
Default: (encoder.Display |
encoder.EncodeBackSlash |
encoder.EncodeInvalidUtf8),
}},
}}...),
})
}

View File

@@ -49,11 +49,13 @@ func init() {
Help: "Get QingStor credentials from the environment (env vars or IAM).",
}},
}, {
Name: "access_key_id",
Help: "QingStor Access Key ID.\n\nLeave blank for anonymous access or runtime credentials.",
Name: "access_key_id",
Help: "QingStor Access Key ID.\n\nLeave blank for anonymous access or runtime credentials.",
Sensitive: true,
}, {
Name: "secret_access_key",
Help: "QingStor Secret Access Key (password).\n\nLeave blank for anonymous access or runtime credentials.",
Name: "secret_access_key",
Help: "QingStor Secret Access Key (password).\n\nLeave blank for anonymous access or runtime credentials.",
Sensitive: true,
}, {
Name: "endpoint",
Help: "Enter an endpoint URL to connection QingStor API.\n\nLeave blank will use the default value \"https://qingstor.com:443\".",

View File

@@ -0,0 +1,182 @@
// Package api provides types used by the Quatrix API.
package api
import (
"strconv"
"time"
)
// OverwriteMode is a conflict resolve mode during copy or move. Files with conflicting names will be overwritten
const OverwriteMode = "overwrite"
// ProfileInfo is a profile info about quota
type ProfileInfo struct {
UserUsed int64 `json:"user_used"`
UserLimit int64 `json:"user_limit"`
AccUsed int64 `json:"acc_used"`
AccLimit int64 `json:"acc_limit"`
}
// IDList is a general object that contains list of ids
type IDList struct {
IDs []string `json:"ids"`
}
// DeleteParams is the request to delete object
type DeleteParams struct {
IDs []string `json:"ids"`
DeletePermanently bool `json:"delete_permanently"`
}
// FileInfoParams is the request to get object's (file or directory) info
type FileInfoParams struct {
ParentID string `json:"parent_id,omitempty"`
Path string `json:"path"`
}
// FileInfo is the response to get object's (file or directory) info
type FileInfo struct {
FileID string `json:"file_id"`
ParentID string `json:"parent_id"`
Src string `json:"src"`
Type string `json:"type"`
}
// IsFile returns true if object is a file
// false otherwise
func (fi *FileInfo) IsFile() bool {
if fi == nil {
return false
}
return fi.Type == "F"
}
// IsDir returns true if object is a directory
// false otherwise
func (fi *FileInfo) IsDir() bool {
if fi == nil {
return false
}
return fi.Type == "D" || fi.Type == "S" || fi.Type == "T"
}
// CreateDirParams is the request to create a directory
type CreateDirParams struct {
Target string `json:"target,omitempty"`
Name string `json:"name"`
Resolve bool `json:"resolve"`
}
// File represent metadata about object in Quatrix (file or directory)
type File struct {
ID string `json:"id"`
Created JSONTime `json:"created"`
Modified JSONTime `json:"modified"`
Name string `json:"name"`
ParentID string `json:"parent_id"`
Size int64 `json:"size"`
ModifiedMS JSONTime `json:"modified_ms"`
Type string `json:"type"`
Operations int `json:"operations"`
SubType string `json:"sub_type"`
Content []File `json:"content"`
}
// IsFile returns true if object is a file
// false otherwise
func (f *File) IsFile() bool {
if f == nil {
return false
}
return f.Type == "F"
}
// IsDir returns true if object is a directory
// false otherwise
func (f *File) IsDir() bool {
if f == nil {
return false
}
return f.Type == "D" || f.Type == "S" || f.Type == "T"
}
// SetMTimeParams is the request to set modification time for object
type SetMTimeParams struct {
ID string `json:"id,omitempty"`
MTime JSONTime `json:"mtime"`
}
// JSONTime provides methods to marshal/unmarshal time.Time as Unix time
type JSONTime time.Time
// MarshalJSON returns time representation in Unix time
func (u JSONTime) MarshalJSON() ([]byte, error) {
return []byte(strconv.FormatFloat(float64(time.Time(u).UTC().UnixNano())/1e9, 'f', 6, 64)), nil
}
// UnmarshalJSON sets time from Unix time representation
func (u *JSONTime) UnmarshalJSON(data []byte) error {
f, err := strconv.ParseFloat(string(data), 64)
if err != nil {
return err
}
t := JSONTime(time.Unix(0, int64(f*1e9)))
*u = t
return nil
}
// String returns Unix time representation of time as string
func (u JSONTime) String() string {
return strconv.FormatInt(time.Time(u).UTC().Unix(), 10)
}
// DownloadLinkResponse is the response to download-link request
type DownloadLinkResponse struct {
ID string `json:"id"`
}
// UploadLinkParams is the request to get upload-link
type UploadLinkParams struct {
Name string `json:"name"`
ParentID string `json:"parent_id"`
Resolve bool `json:"resolve"`
}
// UploadLinkResponse is the response to upload-link request
type UploadLinkResponse struct {
Name string `json:"name"`
FileID string `json:"file_id"`
ParentID string `json:"parent_id"`
UploadKey string `json:"upload_key"`
}
// UploadFinalizeResponse is the response to finalize file method
type UploadFinalizeResponse struct {
FileID string `json:"id"`
ParentID string `json:"parent_id"`
Modified int64 `json:"modified"`
FileSize int64 `json:"size"`
}
// FileModifyParams is the request to get modify file link
type FileModifyParams struct {
ID string `json:"id"`
Truncate int64 `json:"truncate"`
}
// FileCopyMoveOneParams is the request to do server-side copy and move
// can be used for file or directory
type FileCopyMoveOneParams struct {
ID string `json:"file_id"`
Target string `json:"target_id"`
Name string `json:"name"`
MTime JSONTime `json:"mtime"`
Resolve bool `json:"resolve"`
ResolveMode string `json:"resolve_mode"`
}

1256
backend/quatrix/quatrix.go Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,17 @@
// Test Quatrix filesystem interface
package quatrix_test
import (
"testing"
"github.com/rclone/rclone/backend/quatrix"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestQuatrix:",
NilObject: (*quatrix.Object)(nil),
})
}

View File

@@ -0,0 +1,108 @@
package quatrix
import (
"sync"
"time"
"github.com/rclone/rclone/fs"
)
// UploadMemoryManager dynamically calculates every chunk size for the transfer and increases or decreases it
// depending on the upload speed. This makes general upload time smaller, because transfers that are faster
// does not have to wait for the slower ones until they finish upload.
type UploadMemoryManager struct {
m sync.Mutex
useDynamicSize bool
shared int64
reserved int64
effectiveTime time.Duration
fileUsage map[string]int64
}
// NewUploadMemoryManager is a constructor for UploadMemoryManager
func NewUploadMemoryManager(ci *fs.ConfigInfo, opt *Options) *UploadMemoryManager {
useDynamicSize := true
sharedMemory := int64(opt.MaximalSummaryChunkSize) - int64(opt.MinimalChunkSize)*int64(ci.Transfers)
if sharedMemory <= 0 {
sharedMemory = 0
useDynamicSize = false
}
return &UploadMemoryManager{
useDynamicSize: useDynamicSize,
shared: sharedMemory,
reserved: int64(opt.MinimalChunkSize),
effectiveTime: time.Duration(opt.EffectiveUploadTime),
fileUsage: map[string]int64{},
}
}
// Consume -- decide amount of memory to consume
func (u *UploadMemoryManager) Consume(fileID string, neededMemory int64, speed float64) int64 {
if !u.useDynamicSize {
if neededMemory < u.reserved {
return neededMemory
}
return u.reserved
}
u.m.Lock()
defer u.m.Unlock()
borrowed, found := u.fileUsage[fileID]
if found {
u.shared += borrowed
borrowed = 0
}
defer func() { u.fileUsage[fileID] = borrowed }()
effectiveChunkSize := int64(speed * u.effectiveTime.Seconds())
if effectiveChunkSize < u.reserved {
effectiveChunkSize = u.reserved
}
if neededMemory < effectiveChunkSize {
effectiveChunkSize = neededMemory
}
if effectiveChunkSize <= u.reserved {
return effectiveChunkSize
}
toBorrow := effectiveChunkSize - u.reserved
if toBorrow <= u.shared {
u.shared -= toBorrow
borrowed = toBorrow
return effectiveChunkSize
}
borrowed = u.shared
u.shared = 0
return borrowed + u.reserved
}
// Return returns consumed memory for the previous chunk upload to the memory pool
func (u *UploadMemoryManager) Return(fileID string) {
if !u.useDynamicSize {
return
}
u.m.Lock()
defer u.m.Unlock()
borrowed, found := u.fileUsage[fileID]
if !found {
return
}
u.shared += borrowed
delete(u.fileUsage, fileID)
}

File diff suppressed because it is too large Load Diff

View File

@@ -12,6 +12,7 @@ import (
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/cache"
@@ -393,6 +394,41 @@ func (f *Fs) InternalTestVersions(t *testing.T) {
}
})
t.Run("Mkdir", func(t *testing.T) {
// Test what happens when we create a bucket we already own and see whether the
// quirk is set correctly
req := s3.CreateBucketInput{
Bucket: &f.rootBucket,
ACL: stringPointerOrNil(f.opt.BucketACL),
}
if f.opt.LocationConstraint != "" {
req.CreateBucketConfiguration = &s3.CreateBucketConfiguration{
LocationConstraint: &f.opt.LocationConstraint,
}
}
err := f.pacer.Call(func() (bool, error) {
_, err := f.c.CreateBucketWithContext(ctx, &req)
return f.shouldRetry(ctx, err)
})
var errString string
if err == nil {
errString = "No Error"
} else if awsErr, ok := err.(awserr.Error); ok {
errString = awsErr.Code()
} else {
assert.Fail(t, "Unknown error %T %v", err, err)
}
t.Logf("Creating a bucket we already have created returned code: %s", errString)
switch errString {
case "BucketAlreadyExists":
assert.False(t, f.opt.UseAlreadyExists.Value, "Need to clear UseAlreadyExists quirk")
case "No Error", "BucketAlreadyOwnedByYou":
assert.True(t, f.opt.UseAlreadyExists.Value, "Need to set UseAlreadyExists quirk")
default:
assert.Fail(t, "Unknown error string %q", errString)
}
})
t.Run("Cleanup", func(t *testing.T) {
require.NoError(t, f.CleanUpHidden(ctx))
items := append([]fstest.Item{newItem}, fstests.InternalTestFiles...)

View File

@@ -47,4 +47,12 @@ func (f *Fs) SetUploadCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setUploadCutoff(cs)
}
var _ fstests.SetUploadChunkSizer = (*Fs)(nil)
func (f *Fs) SetCopyCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setCopyCutoff(cs)
}
var (
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
_ fstests.SetUploadCutoffer = (*Fs)(nil)
_ fstests.SetCopyCutoffer = (*Fs)(nil)
)

Some files were not shown because too many files have changed in this diff Show More